aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/ambassador.c1
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/solos-pci.c32
-rw-r--r--drivers/ieee802154/fakehard.c62
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c12
-rw-r--r--drivers/isdn/gigaset/Kconfig25
-rw-r--r--drivers/isdn/gigaset/Makefile5
-rw-r--r--drivers/isdn/gigaset/asyncdata.c662
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c76
-rw-r--r--drivers/isdn/gigaset/capi.c2292
-rw-r--r--drivers/isdn/gigaset/common.c139
-rw-r--r--drivers/isdn/gigaset/dummyll.c68
-rw-r--r--drivers/isdn/gigaset/ev-layer.c578
-rw-r--r--drivers/isdn/gigaset/gigaset.h176
-rw-r--r--drivers/isdn/gigaset/i4l.c563
-rw-r--r--drivers/isdn/gigaset/interface.c41
-rw-r--r--drivers/isdn/gigaset/isocdata.c186
-rw-r--r--drivers/isdn/gigaset/proc.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c56
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c71
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c1
-rw-r--r--drivers/isdn/mISDN/socket.c5
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c133
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c355
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h209
-rw-r--r--drivers/misc/iwmc3200top/log.c347
-rw-r--r--drivers/misc/iwmc3200top/log.h158
-rw-r--r--drivers/misc/iwmc3200top/main.c678
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c505.c2
-rw-r--r--drivers/net/3c507.c2
-rw-r--r--drivers/net/3c509.c2
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/3c527.c2
-rw-r--r--drivers/net/3c59x.c3
-rw-r--r--drivers/net/8139cp.c8
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arcnet/arc-rimi.c2
-rw-r--r--drivers/net/arcnet/com20020.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c2
-rw-r--r--drivers/net/arm/ks8695net.c131
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atl1c/atl1c.h22
-rw-r--r--drivers/net/atl1c/atl1c_main.c88
-rw-r--r--drivers/net/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/atlx/atl1.c13
-rw-r--r--drivers/net/atlx/atl2.c5
-rw-r--r--drivers/net/atp.c2
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/bcm63xx_enet.c5
-rw-r--r--drivers/net/benet/be.h17
-rw-r--r--drivers/net/benet/be_cmds.c288
-rw-r--r--drivers/net/benet/be_cmds.h93
-rw-r--r--drivers/net/benet/be_ethtool.c79
-rw-r--r--drivers/net/benet/be_hw.h5
-rw-r--r--drivers/net/benet/be_main.c158
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/bnx2x.h95
-rw-r--r--drivers/net/bnx2x_hsi.h21
-rw-r--r--drivers/net/bnx2x_link.c317
-rw-r--r--drivers/net/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x_main.c1443
-rw-r--r--drivers/net/bnx2x_reg.h23
-rw-r--r--drivers/net/bonding/bond_3ad.c99
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c326
-rw-r--r--drivers/net/bonding/bond_sysfs.c92
-rw-r--r--drivers/net/bonding/bonding.h31
-rw-r--r--drivers/net/can/Kconfig15
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c34
-rw-r--r--drivers/net/can/dev.c76
-rw-r--r--drivers/net/can/mcp251x.c1166
-rw-r--r--drivers/net/can/mscan/Kconfig23
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c259
-rw-r--r--drivers/net/can/mscan/mscan.c668
-rw-r--r--drivers/net/can/mscan/mscan.h296
-rw-r--r--drivers/net/can/sja1000/sja1000.c19
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/ti_hecc.c993
-rw-r--r--drivers/net/can/usb/ems_usb.c20
-rw-r--r--drivers/net/cnic.c1822
-rw-r--r--drivers/net/cnic.h64
-rw-r--r--drivers/net/cnic_defs.h1917
-rw-r--r--drivers/net/cnic_if.h14
-rw-r--r--drivers/net/cpmac.c6
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/adapter.h16
-rw-r--r--drivers/net/cxgb3/common.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c47
-rw-r--r--drivers/net/cxgb3/sge.c31
-rw-r--r--drivers/net/davinci_emac.c30
-rw-r--r--drivers/net/declance.c4
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/dl2k.c20
-rw-r--r--drivers/net/dm9000.c145
-rw-r--r--drivers/net/dm9000.h7
-rw-r--r--drivers/net/e100.c25
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_ethtool.c39
-rw-r--r--drivers/net/e1000/e1000_main.c70
-rw-r--r--drivers/net/e1000e/82571.c209
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h26
-rw-r--r--drivers/net/e1000e/es2lan.c170
-rw-r--r--drivers/net/e1000e/ethtool.c78
-rw-r--r--drivers/net/e1000e/hw.h45
-rw-r--r--drivers/net/e1000e/ich8lan.c291
-rw-r--r--drivers/net/e1000e/lib.c235
-rw-r--r--drivers/net/e1000e/netdev.c331
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c208
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/enic/enic_main.c15
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/fec_mpc52xx.c6
-rw-r--r--drivers/net/forcedeth.c6
-rw-r--r--drivers/net/fsl_pq_mdio.c67
-rw-r--r--drivers/net/fsl_pq_mdio.h11
-rw-r--r--drivers/net/gianfar.c1826
-rw-r--r--drivers/net/gianfar.h412
-rw-r--r--drivers/net/gianfar_ethtool.c376
-rw-r--r--drivers/net/gianfar_sysfs.c77
-rw-r--r--drivers/net/hamachi.c14
-rw-r--r--drivers/net/hamradio/6pack.c21
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hamradio/mkiss.c21
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibmveth.c2
-rw-r--r--drivers/net/ifb.c10
-rw-r--r--drivers/net/igb/e1000_82575.c572
-rw-r--r--drivers/net/igb/e1000_82575.h32
-rw-r--r--drivers/net/igb/e1000_defines.h50
-rw-r--r--drivers/net/igb/e1000_hw.h22
-rw-r--r--drivers/net/igb/e1000_mac.c100
-rw-r--r--drivers/net/igb/e1000_mbx.c82
-rw-r--r--drivers/net/igb/e1000_mbx.h10
-rw-r--r--drivers/net/igb/e1000_nvm.c36
-rw-r--r--drivers/net/igb/e1000_phy.c453
-rw-r--r--drivers/net/igb/e1000_phy.h37
-rw-r--r--drivers/net/igb/e1000_regs.h80
-rw-r--r--drivers/net/igb/igb.h144
-rw-r--r--drivers/net/igb/igb_ethtool.c747
-rw-r--r--drivers/net/igb/igb_main.c3386
-rw-r--r--drivers/net/igbvf/ethtool.c25
-rw-r--r--drivers/net/igbvf/netdev.c14
-rw-r--r--drivers/net/ipg.c9
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/isa-skeleton.c4
-rw-r--r--drivers/net/ixgb/ixgb.h1
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c69
-rw-r--r--drivers/net/ixgb/ixgb_main.c58
-rw-r--r--drivers/net/ixgbe/ixgbe.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c178
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c135
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c46
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c105
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h26
-rw-r--r--drivers/net/ixp2000/ixpdev.c3
-rw-r--r--drivers/net/jazzsonic.c2
-rw-r--r--drivers/net/jme.c10
-rw-r--r--drivers/net/korina.c13
-rw-r--r--drivers/net/ks8842.c5
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c13
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/mac89x0.c2
-rw-r--r--drivers/net/macsonic.c4
-rw-r--r--drivers/net/macvlan.c254
-rw-r--r--drivers/net/mdio.c12
-rw-r--r--drivers/net/mipsnet.c2
-rw-r--r--drivers/net/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/netx-eth.c5
-rw-r--r--drivers/net/netxen/netxen_nic.h78
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h76
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c916
-rw-r--r--drivers/net/netxen/netxen_nic_init.c363
-rw-r--r--drivers/net/netxen/netxen_nic_main.c308
-rw-r--r--drivers/net/ni5010.c2
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c15
-rw-r--r--drivers/net/pasemi_mac.c4
-rw-r--r--drivers/net/pasemi_mac_ethtool.c14
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c2
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c7
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/broadcom.c208
-rw-r--r--drivers/net/ppp_async.c7
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/ppp_synctty.c5
-rw-r--r--drivers/net/pppoe.c21
-rw-r--r--drivers/net/pppol2tp.c26
-rw-r--r--drivers/net/pppox.c5
-rw-r--r--drivers/net/qlge/qlge.h229
-rw-r--r--drivers/net/qlge/qlge_dbg.c180
-rw-r--r--drivers/net/qlge/qlge_ethtool.c290
-rw-r--r--drivers/net/qlge/qlge_main.c486
-rw-r--r--drivers/net/qlge/qlge_mpi.c210
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c67
-rw-r--r--drivers/net/s6gmac.c2
-rw-r--r--drivers/net/sb1000.c2
-rw-r--r--drivers/net/sb1250-mac.c3
-rw-r--r--drivers/net/sc92031.c4
-rw-r--r--drivers/net/seeq8005.c4
-rw-r--r--drivers/net/sfc/Makefile4
-rw-r--r--drivers/net/sfc/bitfield.h13
-rw-r--r--drivers/net/sfc/boards.c328
-rw-r--r--drivers/net/sfc/boards.h28
-rw-r--r--drivers/net/sfc/efx.c371
-rw-r--r--drivers/net/sfc/efx.h48
-rw-r--r--drivers/net/sfc/enum.h6
-rw-r--r--drivers/net/sfc/ethtool.c47
-rw-r--r--drivers/net/sfc/ethtool.h27
-rw-r--r--drivers/net/sfc/falcon.c1639
-rw-r--r--drivers/net/sfc/falcon.h96
-rw-r--r--drivers/net/sfc/falcon_boards.c751
-rw-r--r--drivers/net/sfc/falcon_gmac.c115
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1333
-rw-r--r--drivers/net/sfc/falcon_io.h258
-rw-r--r--drivers/net/sfc/falcon_xmac.c261
-rw-r--r--drivers/net/sfc/gmii.h60
-rw-r--r--drivers/net/sfc/io.h256
-rw-r--r--drivers/net/sfc/mdio_10g.c121
-rw-r--r--drivers/net/sfc/mdio_10g.h1
-rw-r--r--drivers/net/sfc/net_driver.h192
-rw-r--r--drivers/net/sfc/phy.h8
-rw-r--r--drivers/net/sfc/qt202x_phy.c (renamed from drivers/net/sfc/xfp_phy.c)118
-rw-r--r--drivers/net/sfc/regs.h3180
-rw-r--r--drivers/net/sfc/rx.c68
-rw-r--r--drivers/net/sfc/rx.h26
-rw-r--r--drivers/net/sfc/selftest.c30
-rw-r--r--drivers/net/sfc/sfe4001.c435
-rw-r--r--drivers/net/sfc/tenxpress.c122
-rw-r--r--drivers/net/sfc/tx.c116
-rw-r--r--drivers/net/sfc/tx.h25
-rw-r--r--drivers/net/sfc/workarounds.h13
-rw-r--r--drivers/net/sgiseeq.c7
-rw-r--r--drivers/net/sh_eth.c56
-rw-r--r--drivers/net/sh_eth.h1
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skge.c9
-rw-r--r--drivers/net/sky2.c129
-rw-r--r--drivers/net/sky2.h185
-rw-r--r--drivers/net/slip.c25
-rw-r--r--drivers/net/smc911x.c2
-rw-r--r--drivers/net/smc9194.c2
-rw-r--r--drivers/net/smc91x.c20
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c1
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/sun3_82586.c2
-rw-r--r--drivers/net/sunbmac.c2
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunlance.c2
-rw-r--r--drivers/net/sunqe.c2
-rw-r--r--drivers/net/tc35815.c292
-rw-r--r--drivers/net/tehuti.c31
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c887
-rw-r--r--drivers/net/tg3.h73
-rw-r--r--drivers/net/tlan.c7
-rw-r--r--drivers/net/tokenring/3c359.c3
-rw-r--r--drivers/net/tokenring/ibmtr.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tsi108_eth.c10
-rw-r--r--drivers/net/tulip/dmfe.c2
-rw-r--r--drivers/net/tulip/tulip_core.c4
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/tun.c58
-rw-r--r--drivers/net/typhoon.c2
-rw-r--r--drivers/net/usb/asix.c12
-rw-r--r--drivers/net/usb/cdc_ether.c38
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/kaweth.c11
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c38
-rw-r--r--drivers/net/via-rhine.c10
-rw-r--r--drivers/net/via-velocity.c391
-rw-r--r--drivers/net/via-velocity.h15
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h246
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c359
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h17
-rw-r--r--drivers/net/vxge/vxge-config.c300
-rw-r--r--drivers/net/vxge/vxge-config.h2
-rw-r--r--drivers/net/vxge/vxge-main.c109
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/vxge/vxge-reg.h4
-rw-r--r--drivers/net/vxge/vxge-traffic.c4
-rw-r--r--drivers/net/vxge/vxge-traffic.h2
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cosa.c20
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hdlc.c4
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/n2.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wan/x25_asy.c19
-rw-r--r--drivers/net/wimax/i2400m/Kconfig8
-rw-r--r--drivers/net/wimax/i2400m/control.c16
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c500
-rw-r--r--drivers/net/wimax/i2400m/fw.c886
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h16
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h16
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h209
-rw-r--r--drivers/net/wimax/i2400m/netdev.c127
-rw-r--r--drivers/net/wimax/i2400m/rx.c170
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c11
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c42
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c5
-rw-r--r--drivers/net/wimax/i2400m/sdio.c205
-rw-r--r--drivers/net/wimax/i2400m/tx.c20
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c37
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c35
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c60
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c61
-rw-r--r--drivers/net/wimax/i2400m/usb.c189
-rw-r--r--drivers/net/wireless/Kconfig212
-rw-r--r--drivers/net/wireless/Makefile10
-rw-r--r--drivers/net/wireless/adm8211.c2
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/at76c50x-usb.c55
-rw-r--r--drivers/net/wireless/ath/Kconfig9
-rw-r--r--drivers/net/wireless/ath/Makefile9
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h6
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c3
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h6
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c15
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c50
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c99
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c16
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath.h69
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h53
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c33
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c140
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c193
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c191
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h19
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig13
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile30
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c141
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h205
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c112
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c383
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c421
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c295
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h124
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c66
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c97
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c183
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1310
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/initvals.h101
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c162
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1247
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c1034
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h42
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c106
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c366
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c110
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c214
-rw-r--r--drivers/net/wireless/ath/debug.c32
-rw-r--r--drivers/net/wireless/ath/debug.h77
-rw-r--r--drivers/net/wireless/ath/hw.c126
-rw-r--r--drivers/net/wireless/ath/reg.h27
-rw-r--r--drivers/net/wireless/ath/regd.c5
-rw-r--r--drivers/net/wireless/ath/regd.h8
-rw-r--r--drivers/net/wireless/ath/regd_common.h32
-rw-r--r--drivers/net/wireless/atmel.c16
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/b43.h18
-rw-r--r--drivers/net/wireless/b43/dma.c312
-rw-r--r--drivers/net/wireless/b43/dma.h13
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/phy_lp.c777
-rw-r--r--drivers/net/wireless/b43/phy_lp.h11
-rw-r--r--drivers/net/wireless/b43/pio.c85
-rw-r--r--drivers/net/wireless/b43/xmit.c8
-rw-r--r--drivers/net/wireless/b43/xmit.h19
-rw-r--r--drivers/net/wireless/b43legacy/Kconfig2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c17
-rw-r--r--drivers/net/wireless/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/Kconfig3
-rw-r--r--drivers/net/wireless/i82593.h229
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c136
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c159
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h8
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c38
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c371
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c287
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c255
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c415
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c305
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c584
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c768
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h297
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c691
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h189
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c860
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h150
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h197
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c133
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c324
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c232
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c218
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c160
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c155
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c435
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c47
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h70
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h8
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c48
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c84
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c13
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h5
-rw-r--r--drivers/net/wireless/libertas/11d.c696
-rw-r--r--drivers/net/wireless/libertas/11d.h105
-rw-r--r--drivers/net/wireless/libertas/Kconfig39
-rw-r--r--drivers/net/wireless/libertas/Makefile14
-rw-r--r--drivers/net/wireless/libertas/README26
-rw-r--r--drivers/net/wireless/libertas/assoc.c445
-rw-r--r--drivers/net/wireless/libertas/assoc.h141
-rw-r--r--drivers/net/wireless/libertas/cfg.c198
-rw-r--r--drivers/net/wireless/libertas/cfg.h16
-rw-r--r--drivers/net/wireless/libertas/cmd.c537
-rw-r--r--drivers/net/wireless/libertas/cmd.h127
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c116
-rw-r--r--drivers/net/wireless/libertas/debugfs.c27
-rw-r--r--drivers/net/wireless/libertas/decl.h59
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/dev.h419
-rw-r--r--drivers/net/wireless/libertas/host.h959
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h800
-rw-r--r--drivers/net/wireless/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c62
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h3
-rw-r--r--drivers/net/wireless/libertas/if_spi.c143
-rw-r--r--drivers/net/wireless/libertas/if_usb.c5
-rw-r--r--drivers/net/wireless/libertas/main.c373
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c8
-rw-r--r--drivers/net/wireless/libertas/rx.c2
-rw-r--r--drivers/net/wireless/libertas/scan.c250
-rw-r--r--drivers/net/wireless/libertas/scan.h30
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/types.h4
-rw-r--r--drivers/net/wireless/libertas/wext.c196
-rw-r--r--drivers/net/wireless/libertas/wext.h8
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c85
-rw-r--r--drivers/net/wireless/mwl8k.c1265
-rw-r--r--drivers/net/wireless/orinoco/Kconfig6
-rw-r--r--drivers/net/wireless/orinoco/fw.c6
-rw-r--r--drivers/net/wireless/orinoco/hw.c33
-rw-r--r--drivers/net/wireless/orinoco/hw.h3
-rw-r--r--drivers/net/wireless/orinoco/main.c34
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h1
-rw-r--r--drivers/net/wireless/p54/Kconfig2
-rw-r--r--drivers/net/wireless/p54/eeprom.c31
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c3
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c13
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig46
-rw-r--r--drivers/net/wireless/rt2x00/Makefile3
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c137
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h1852
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2289
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h151
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1321
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h159
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c2265
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h1864
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h72
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h20
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h7
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c165
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h21
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c13
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h36
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c55
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_event.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c171
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_netlink.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c44
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h95
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c369
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h586
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c218
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c396
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h118
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h919
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c117
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c162
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h53
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c977
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c88
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c311
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h65
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c76
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h18
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h4
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zd1201.c3
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h18
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c202
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h25
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c11
-rw-r--r--drivers/net/xilinx_emaclite.c2
-rw-r--r--drivers/net/xtsonic.c2
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/net/znet.c5
-rw-r--r--drivers/parisc/led.c7
-rw-r--r--drivers/s390/net/Makefile6
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c168
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c11
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c115
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c225
-rw-r--r--drivers/s390/net/qeth_core_mpc.h45
-rw-r--r--drivers/s390/net/qeth_core_sys.c83
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c144
-rw-r--r--drivers/s390/net/qeth_l3_sys.c67
-rw-r--r--drivers/ssb/driver_pcicore.c4
-rw-r--r--drivers/ssb/main.c126
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/ssb/sprom.c30
-rw-r--r--drivers/ssb/ssb_private.h12
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/arlan/Kconfig15
-rw-r--r--drivers/staging/arlan/Makefile3
-rw-r--r--drivers/staging/arlan/TODO7
-rw-r--r--drivers/staging/arlan/arlan-main.c (renamed from drivers/net/wireless/arlan-main.c)0
-rw-r--r--drivers/staging/arlan/arlan-proc.c (renamed from drivers/net/wireless/arlan-proc.c)0
-rw-r--r--drivers/staging/arlan/arlan.h (renamed from drivers/net/wireless/arlan.h)0
-rw-r--r--drivers/staging/netwave/Kconfig11
-rw-r--r--drivers/staging/netwave/Makefile1
-rw-r--r--drivers/staging/netwave/TODO7
-rw-r--r--drivers/staging/netwave/netwave_cs.c (renamed from drivers/net/wireless/netwave_cs.c)0
-rw-r--r--drivers/staging/rtl8187se/Kconfig3
-rw-r--r--drivers/staging/rtl8192e/Kconfig3
-rw-r--r--drivers/staging/strip/Kconfig22
-rw-r--r--drivers/staging/strip/Makefile1
-rw-r--r--drivers/staging/strip/TODO7
-rw-r--r--drivers/staging/strip/strip.c (renamed from drivers/net/wireless/strip.c)17
-rw-r--r--drivers/staging/vt6655/Kconfig4
-rw-r--r--drivers/staging/vt6656/Kconfig4
-rw-r--r--drivers/staging/wavelan/Kconfig38
-rw-r--r--drivers/staging/wavelan/Makefile2
-rw-r--r--drivers/staging/wavelan/TODO7
-rw-r--r--drivers/staging/wavelan/i82586.h (renamed from drivers/net/wireless/i82586.h)0
-rw-r--r--drivers/staging/wavelan/wavelan.c (renamed from drivers/net/wireless/wavelan.c)0
-rw-r--r--drivers/staging/wavelan/wavelan.h (renamed from drivers/net/wireless/wavelan.h)0
-rw-r--r--drivers/staging/wavelan/wavelan.p.h (renamed from drivers/net/wireless/wavelan.p.h)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c (renamed from drivers/net/wireless/wavelan_cs.c)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.h (renamed from drivers/net/wireless/wavelan_cs.h)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.p.h (renamed from drivers/net/wireless/wavelan_cs.p.h)2
712 files changed, 62014 insertions, 31466 deletions
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 66e181345b3a..8af23411743c 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2351,6 +2351,7 @@ static void __init amb_check_args (void) {
2351MODULE_AUTHOR(maintainer_string); 2351MODULE_AUTHOR(maintainer_string);
2352MODULE_DESCRIPTION(description_string); 2352MODULE_DESCRIPTION(description_string);
2353MODULE_LICENSE("GPL"); 2353MODULE_LICENSE("GPL");
2354MODULE_FIRMWARE("atmsar11.fw");
2354module_param(debug, ushort, 0644); 2355module_param(debug, ushort, 0644);
2355module_param(cmds, uint, 0); 2356module_param(cmds, uint, 0);
2356module_param(txs, uint, 0); 2357module_param(txs, uint, 0);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f766cc46b4c4..bc53fed89b1e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2906,8 +2906,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2906 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2906 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2907 u32 oc3_index; 2907 u32 oc3_index;
2908 2908
2909 if ((media_index < 0) || (media_index > 4)) 2909 if (media_index > 4)
2910 media_index = 5; 2910 media_index = 5;
2911 2911
2912 switch (fore200e->loop_mode) { 2912 switch (fore200e->loop_mode) {
2913 case ATM_LM_NONE: oc3_index = 0; 2913 case ATM_LM_NONE: oc3_index = 0;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 70667033a568..e90665876c47 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2739,7 +2739,7 @@ he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2739 spin_lock_irqsave(&he_dev->global_lock, flags); 2739 spin_lock_irqsave(&he_dev->global_lock, flags);
2740 switch (reg.type) { 2740 switch (reg.type) {
2741 case HE_REGTYPE_PCI: 2741 case HE_REGTYPE_PCI:
2742 if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) { 2742 if (reg.addr >= HE_REGMAP_SIZE) {
2743 err = -EINVAL; 2743 err = -EINVAL;
2744 break; 2744 break;
2745 } 2745 }
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c5f5186d62a3..51eed679a059 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -142,6 +142,9 @@ MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>");
142MODULE_DESCRIPTION("Solos PCI driver"); 142MODULE_DESCRIPTION("Solos PCI driver");
143MODULE_VERSION(VERSION); 143MODULE_VERSION(VERSION);
144MODULE_LICENSE("GPL"); 144MODULE_LICENSE("GPL");
145MODULE_FIRMWARE("solos-FPGA.bin");
146MODULE_FIRMWARE("solos-Firmware.bin");
147MODULE_FIRMWARE("solos-db-FPGA.bin");
145MODULE_PARM_DESC(reset, "Reset Solos chips on startup"); 148MODULE_PARM_DESC(reset, "Reset Solos chips on startup");
146MODULE_PARM_DESC(atmdebug, "Print ATM data"); 149MODULE_PARM_DESC(atmdebug, "Print ATM data");
147MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade"); 150MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade");
@@ -528,34 +531,37 @@ static int flash_upgrade(struct solos_card *card, int chip)
528 int numblocks = 0; 531 int numblocks = 0;
529 int offset; 532 int offset;
530 533
531 if (chip == 0) { 534 switch (chip) {
535 case 0:
532 fw_name = "solos-FPGA.bin"; 536 fw_name = "solos-FPGA.bin";
533 blocksize = FPGA_BLOCK; 537 blocksize = FPGA_BLOCK;
534 } 538 break;
535 539 case 1:
536 if (chip == 1) {
537 fw_name = "solos-Firmware.bin"; 540 fw_name = "solos-Firmware.bin";
538 blocksize = SOLOS_BLOCK; 541 blocksize = SOLOS_BLOCK;
539 } 542 break;
540 543 case 2:
541 if (chip == 2){
542 if (card->fpga_version > LEGACY_BUFFERS){ 544 if (card->fpga_version > LEGACY_BUFFERS){
543 fw_name = "solos-db-FPGA.bin"; 545 fw_name = "solos-db-FPGA.bin";
544 blocksize = FPGA_BLOCK; 546 blocksize = FPGA_BLOCK;
545 } else { 547 } else {
546 dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); 548 dev_info(&card->dev->dev, "FPGA version doesn't support"
549 " daughter board upgrades\n");
547 return -EPERM; 550 return -EPERM;
548 } 551 }
549 } 552 break;
550 553 case 3:
551 if (chip == 3){
552 if (card->fpga_version > LEGACY_BUFFERS){ 554 if (card->fpga_version > LEGACY_BUFFERS){
553 fw_name = "solos-Firmware.bin"; 555 fw_name = "solos-Firmware.bin";
554 blocksize = SOLOS_BLOCK; 556 blocksize = SOLOS_BLOCK;
555 } else { 557 } else {
556 dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); 558 dev_info(&card->dev->dev, "FPGA version doesn't support"
557 return -EPERM; 559 " daughter board upgrades\n");
560 return -EPERM;
558 } 561 }
562 break;
563 default:
564 return -ENODEV;
559 } 565 }
560 566
561 if (request_firmware(&fw, fw_name, &card->dev->dev)) 567 if (request_firmware(&fw, fw_name, &card->dev->dev))
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index 7c544f7c74c4..5f67540e669c 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -32,9 +32,29 @@
32#include <net/nl802154.h> 32#include <net/nl802154.h>
33#include <net/wpan-phy.h> 33#include <net/wpan-phy.h>
34 34
35struct wpan_phy *net_to_phy(struct net_device *dev) 35struct fakehard_priv {
36 struct wpan_phy *phy;
37};
38
39static struct wpan_phy *fake_to_phy(const struct net_device *dev)
40{
41 struct fakehard_priv *priv = netdev_priv(dev);
42 return priv->phy;
43}
44
45/**
46 * fake_get_phy - Return a phy corresponding to this device.
47 * @dev: The network device for which to return the wan-phy object
48 *
49 * This function returns a wpan-phy object corresponding to the passed
50 * network device. Reference counter for wpan-phy object is incremented,
51 * so when the wpan-phy isn't necessary, you should drop the reference
52 * via @wpan_phy_put() call.
53 */
54static struct wpan_phy *fake_get_phy(const struct net_device *dev)
36{ 55{
37 return container_of(dev->dev.parent, struct wpan_phy, dev); 56 struct wpan_phy *phy = fake_to_phy(dev);
57 return to_phy(get_device(&phy->dev));
38} 58}
39 59
40/** 60/**
@@ -43,7 +63,7 @@ struct wpan_phy *net_to_phy(struct net_device *dev)
43 * 63 *
44 * Return the ID of the PAN from the PIB. 64 * Return the ID of the PAN from the PIB.
45 */ 65 */
46static u16 fake_get_pan_id(struct net_device *dev) 66static u16 fake_get_pan_id(const struct net_device *dev)
47{ 67{
48 BUG_ON(dev->type != ARPHRD_IEEE802154); 68 BUG_ON(dev->type != ARPHRD_IEEE802154);
49 69
@@ -58,7 +78,7 @@ static u16 fake_get_pan_id(struct net_device *dev)
58 * device. If the device has not yet had a short address assigned 78 * device. If the device has not yet had a short address assigned
59 * then this should return 0xFFFF to indicate a lack of association. 79 * then this should return 0xFFFF to indicate a lack of association.
60 */ 80 */
61static u16 fake_get_short_addr(struct net_device *dev) 81static u16 fake_get_short_addr(const struct net_device *dev)
62{ 82{
63 BUG_ON(dev->type != ARPHRD_IEEE802154); 83 BUG_ON(dev->type != ARPHRD_IEEE802154);
64 84
@@ -78,7 +98,7 @@ static u16 fake_get_short_addr(struct net_device *dev)
78 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 98 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
79 * document. 99 * document.
80 */ 100 */
81static u8 fake_get_dsn(struct net_device *dev) 101static u8 fake_get_dsn(const struct net_device *dev)
82{ 102{
83 BUG_ON(dev->type != ARPHRD_IEEE802154); 103 BUG_ON(dev->type != ARPHRD_IEEE802154);
84 104
@@ -98,7 +118,7 @@ static u8 fake_get_dsn(struct net_device *dev)
98 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 118 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
99 * document. 119 * document.
100 */ 120 */
101static u8 fake_get_bsn(struct net_device *dev) 121static u8 fake_get_bsn(const struct net_device *dev)
102{ 122{
103 BUG_ON(dev->type != ARPHRD_IEEE802154); 123 BUG_ON(dev->type != ARPHRD_IEEE802154);
104 124
@@ -121,7 +141,7 @@ static u8 fake_get_bsn(struct net_device *dev)
121static int fake_assoc_req(struct net_device *dev, 141static int fake_assoc_req(struct net_device *dev,
122 struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap) 142 struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
123{ 143{
124 struct wpan_phy *phy = net_to_phy(dev); 144 struct wpan_phy *phy = fake_to_phy(dev);
125 145
126 mutex_lock(&phy->pib_lock); 146 mutex_lock(&phy->pib_lock);
127 phy->current_channel = channel; 147 phy->current_channel = channel;
@@ -196,7 +216,7 @@ static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
196 u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, 216 u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
197 u8 coord_realign) 217 u8 coord_realign)
198{ 218{
199 struct wpan_phy *phy = net_to_phy(dev); 219 struct wpan_phy *phy = fake_to_phy(dev);
200 220
201 mutex_lock(&phy->pib_lock); 221 mutex_lock(&phy->pib_lock);
202 phy->current_channel = channel; 222 phy->current_channel = channel;
@@ -239,6 +259,8 @@ static struct ieee802154_mlme_ops fake_mlme = {
239 .start_req = fake_start_req, 259 .start_req = fake_start_req,
240 .scan_req = fake_scan_req, 260 .scan_req = fake_scan_req,
241 261
262 .get_phy = fake_get_phy,
263
242 .get_pan_id = fake_get_pan_id, 264 .get_pan_id = fake_get_pan_id,
243 .get_short_addr = fake_get_short_addr, 265 .get_short_addr = fake_get_short_addr,
244 .get_dsn = fake_get_dsn, 266 .get_dsn = fake_get_dsn,
@@ -260,6 +282,9 @@ static int ieee802154_fake_close(struct net_device *dev)
260static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, 282static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
261 struct net_device *dev) 283 struct net_device *dev)
262{ 284{
285 skb->skb_iif = dev->ifindex;
286 skb->dev = dev;
287
263 dev->stats.tx_packets++; 288 dev->stats.tx_packets++;
264 dev->stats.tx_bytes += skb->len; 289 dev->stats.tx_bytes += skb->len;
265 290
@@ -310,7 +335,7 @@ static const struct net_device_ops fake_ops = {
310 335
311static void ieee802154_fake_destruct(struct net_device *dev) 336static void ieee802154_fake_destruct(struct net_device *dev)
312{ 337{
313 struct wpan_phy *phy = net_to_phy(dev); 338 struct wpan_phy *phy = fake_to_phy(dev);
314 339
315 wpan_phy_unregister(phy); 340 wpan_phy_unregister(phy);
316 free_netdev(dev); 341 free_netdev(dev);
@@ -335,13 +360,14 @@ static void ieee802154_fake_setup(struct net_device *dev)
335static int __devinit ieee802154fake_probe(struct platform_device *pdev) 360static int __devinit ieee802154fake_probe(struct platform_device *pdev)
336{ 361{
337 struct net_device *dev; 362 struct net_device *dev;
363 struct fakehard_priv *priv;
338 struct wpan_phy *phy = wpan_phy_alloc(0); 364 struct wpan_phy *phy = wpan_phy_alloc(0);
339 int err; 365 int err;
340 366
341 if (!phy) 367 if (!phy)
342 return -ENOMEM; 368 return -ENOMEM;
343 369
344 dev = alloc_netdev(0, "hardwpan%d", ieee802154_fake_setup); 370 dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
345 if (!dev) { 371 if (!dev) {
346 wpan_phy_free(phy); 372 wpan_phy_free(phy);
347 return -ENOMEM; 373 return -ENOMEM;
@@ -353,12 +379,23 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
353 dev->addr_len); 379 dev->addr_len);
354 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 380 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
355 381
356 phy->channels_supported = (1 << 27) - 1; 382 /*
383 * For now we'd like to emulate 2.4 GHz-only device,
384 * both O-QPSK and CSS
385 */
386 /* 2.4 GHz O-QPSK 802.15.4-2003 */
387 phy->channels_supported[0] |= 0x7FFF800;
388 /* 2.4 GHz CSS 802.15.4a-2007 */
389 phy->channels_supported[3] |= 0x3fff;
390
357 phy->transmit_power = 0xbf; 391 phy->transmit_power = 0xbf;
358 392
359 dev->netdev_ops = &fake_ops; 393 dev->netdev_ops = &fake_ops;
360 dev->ml_priv = &fake_mlme; 394 dev->ml_priv = &fake_mlme;
361 395
396 priv = netdev_priv(dev);
397 priv->phy = phy;
398
362 /* 399 /*
363 * If the name is a format string the caller wants us to do a 400 * If the name is a format string the caller wants us to do a
364 * name allocation. 401 * name allocation.
@@ -369,11 +406,12 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
369 goto out; 406 goto out;
370 } 407 }
371 408
409 wpan_phy_set_dev(phy, &pdev->dev);
372 SET_NETDEV_DEV(dev, &phy->dev); 410 SET_NETDEV_DEV(dev, &phy->dev);
373 411
374 platform_set_drvdata(pdev, dev); 412 platform_set_drvdata(pdev, dev);
375 413
376 err = wpan_phy_register(&pdev->dev, phy); 414 err = wpan_phy_register(phy);
377 if (err) 415 if (err)
378 goto out; 416 goto out;
379 417
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index e593af3354b8..de18fdfdadf2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1080,11 +1080,14 @@ static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
1080 1080
1081 1081
1082/** 1082/**
1083 * nes_netdev_get_stats_count 1083 * nes_netdev_get_sset_count
1084 */ 1084 */
1085static int nes_netdev_get_stats_count(struct net_device *netdev) 1085static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset)
1086{ 1086{
1087 return NES_ETHTOOL_STAT_COUNT; 1087 if (stringset == ETH_SS_STATS)
1088 return NES_ETHTOOL_STAT_COUNT;
1089 else
1090 return -EINVAL;
1088} 1091}
1089 1092
1090 1093
@@ -1264,7 +1267,6 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
1264 sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16, 1267 sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
1265 nesadapter->firmware_version & 0x000000ff); 1268 nesadapter->firmware_version & 0x000000ff);
1266 strcpy(drvinfo->version, DRV_VERSION); 1269 strcpy(drvinfo->version, DRV_VERSION);
1267 drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
1268 drvinfo->testinfo_len = 0; 1270 drvinfo->testinfo_len = 0;
1269 drvinfo->eedump_len = 0; 1271 drvinfo->eedump_len = 0;
1270 drvinfo->regdump_len = 0; 1272 drvinfo->regdump_len = 0;
@@ -1516,7 +1518,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
1516 .get_rx_csum = nes_netdev_get_rx_csum, 1518 .get_rx_csum = nes_netdev_get_rx_csum,
1517 .get_sg = ethtool_op_get_sg, 1519 .get_sg = ethtool_op_get_sg,
1518 .get_strings = nes_netdev_get_strings, 1520 .get_strings = nes_netdev_get_strings,
1519 .get_stats_count = nes_netdev_get_stats_count, 1521 .get_sset_count = nes_netdev_get_sset_count,
1520 .get_ethtool_stats = nes_netdev_get_ethtool_stats, 1522 .get_ethtool_stats = nes_netdev_get_ethtool_stats,
1521 .get_drvinfo = nes_netdev_get_drvinfo, 1523 .get_drvinfo = nes_netdev_get_drvinfo,
1522 .get_coalesce = nes_netdev_get_coalesce, 1524 .get_coalesce = nes_netdev_get_coalesce,
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index 18ab8652aa57..dcefedc7044a 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,6 +1,5 @@
1menuconfig ISDN_DRV_GIGASET 1menuconfig ISDN_DRV_GIGASET
2 tristate "Siemens Gigaset support" 2 tristate "Siemens Gigaset support"
3 depends on ISDN_I4L
4 select CRC_CCITT 3 select CRC_CCITT
5 select BITREVERSE 4 select BITREVERSE
6 help 5 help
@@ -11,9 +10,33 @@ menuconfig ISDN_DRV_GIGASET
11 If you have one of these devices, say M here and for at least 10 If you have one of these devices, say M here and for at least
12 one of the connection specific parts that follow. 11 one of the connection specific parts that follow.
13 This will build a module called "gigaset". 12 This will build a module called "gigaset".
13 Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
14 as a module, you have to build this driver as a module too,
15 otherwise the Gigaset device won't show up as an ISDN device.
14 16
15if ISDN_DRV_GIGASET 17if ISDN_DRV_GIGASET
16 18
19config GIGASET_CAPI
20 bool "Gigaset CAPI support (EXPERIMENTAL)"
21 depends on EXPERIMENTAL
22 depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
23 default ISDN_I4L='n'
24 help
25 Build the Gigaset driver as a CAPI 2.0 driver interfacing with
26 the Kernel CAPI subsystem. To use it with the old ISDN4Linux
27 subsystem you'll have to enable the capidrv glue driver.
28 (select ISDN_CAPI_CAPIDRV.)
29 Say N to build the old native ISDN4Linux variant.
30
31config GIGASET_I4L
32 bool
33 depends on ISDN_I4L='y'||(ISDN_I4L='m'&&ISDN_DRV_GIGASET='m')
34 default !GIGASET_CAPI
35
36config GIGASET_DUMMYLL
37 bool
38 default !GIGASET_CAPI&&!GIGASET_I4L
39
17config GIGASET_BASE 40config GIGASET_BASE
18 tristate "Gigaset base station support" 41 tristate "Gigaset base station support"
19 depends on USB 42 depends on USB
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
index e9d3189f56b7..c453b72272a0 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/isdn/gigaset/Makefile
@@ -1,4 +1,7 @@
1gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o 1gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
2gigaset-$(CONFIG_GIGASET_CAPI) += capi.o
3gigaset-$(CONFIG_GIGASET_I4L) += i4l.o
4gigaset-$(CONFIG_GIGASET_DUMMYLL) += dummyll.o
2usb_gigaset-y := usb-gigaset.o 5usb_gigaset-y := usb-gigaset.o
3ser_gigaset-y := ser-gigaset.o 6ser_gigaset-y := ser-gigaset.o
4bas_gigaset-y := bas-gigaset.o isocdata.o 7bas_gigaset-y := bas-gigaset.o isocdata.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 44a58e6f8f65..ccb2a7b7c41d 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -19,7 +19,7 @@
19 19
20/* check if byte must be stuffed/escaped 20/* check if byte must be stuffed/escaped
21 * I'm not sure which data should be encoded. 21 * I'm not sure which data should be encoded.
22 * Therefore I will go the hard way and decode every value 22 * Therefore I will go the hard way and encode every value
23 * less than 0x20, the flag sequence and the control escape char. 23 * less than 0x20, the flag sequence and the control escape char.
24 */ 24 */
25static inline int muststuff(unsigned char c) 25static inline int muststuff(unsigned char c)
@@ -35,303 +35,383 @@ static inline int muststuff(unsigned char c)
35 35
36/* == data input =========================================================== */ 36/* == data input =========================================================== */
37 37
38/* process a block of received bytes in command mode (modem response) 38/* process a block of received bytes in command mode
39 * (mstate != MS_LOCKED && (inputstate & INS_command))
40 * Append received bytes to the command response buffer and forward them
41 * line by line to the response handler. Exit whenever a mode/state change
42 * might have occurred.
39 * Return value: 43 * Return value:
40 * number of processed bytes 44 * number of processed bytes
41 */ 45 */
42static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes, 46static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
43 struct inbuf_t *inbuf)
44{ 47{
48 unsigned char *src = inbuf->data + inbuf->head;
45 struct cardstate *cs = inbuf->cs; 49 struct cardstate *cs = inbuf->cs;
46 unsigned cbytes = cs->cbytes; 50 unsigned cbytes = cs->cbytes;
47 int inputstate = inbuf->inputstate; 51 unsigned procbytes = 0;
48 int startbytes = numbytes; 52 unsigned char c;
49 53
50 for (;;) { 54 while (procbytes < numbytes) {
51 cs->respdata[cbytes] = c; 55 c = *src++;
52 if (c == 10 || c == 13) { 56 procbytes++;
53 gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", 57
58 switch (c) {
59 case '\n':
60 if (cbytes == 0 && cs->respdata[0] == '\r') {
61 /* collapse LF with preceding CR */
62 cs->respdata[0] = 0;
63 break;
64 }
65 /* --v-- fall through --v-- */
66 case '\r':
67 /* end of message line, pass to response handler */
68 gig_dbg(DEBUG_TRANSCMD, "%s: End of Message (%d Bytes)",
54 __func__, cbytes); 69 __func__, cbytes);
70 if (cbytes >= MAX_RESP_SIZE) {
71 dev_warn(cs->dev, "response too large (%d)\n",
72 cbytes);
73 cbytes = MAX_RESP_SIZE;
74 }
55 cs->cbytes = cbytes; 75 cs->cbytes = cbytes;
56 gigaset_handle_modem_response(cs); /* can change 76 gigaset_handle_modem_response(cs);
57 cs->dle */
58 cbytes = 0; 77 cbytes = 0;
59 78
60 if (cs->dle && 79 /* store EOL byte for CRLF collapsing */
61 !(inputstate & INS_DLE_command)) { 80 cs->respdata[0] = c;
62 inputstate &= ~INS_command;
63 break;
64 }
65 } else {
66 /* advance in line buffer, checking for overflow */
67 if (cbytes < MAX_RESP_SIZE - 1)
68 cbytes++;
69 else
70 dev_warn(cs->dev, "response too large\n");
71 }
72 81
73 if (!numbytes) 82 /* cs->dle may have changed */
74 break; 83 if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
75 c = *src++; 84 inbuf->inputstate &= ~INS_command;
76 --numbytes; 85
77 if (c == DLE_FLAG && 86 /* return for reevaluating state */
78 (cs->dle || inputstate & INS_DLE_command)) { 87 goto exit;
79 inputstate |= INS_DLE_char; 88
80 break; 89 case DLE_FLAG:
90 if (inbuf->inputstate & INS_DLE_char) {
91 /* quoted DLE: clear quote flag */
92 inbuf->inputstate &= ~INS_DLE_char;
93 } else if (cs->dle ||
94 (inbuf->inputstate & INS_DLE_command)) {
95 /* DLE escape, pass up for handling */
96 inbuf->inputstate |= INS_DLE_char;
97 goto exit;
98 }
99 /* quoted or not in DLE mode: treat as regular data */
100 /* --v-- fall through --v-- */
101 default:
102 /* append to line buffer if possible */
103 if (cbytes < MAX_RESP_SIZE)
104 cs->respdata[cbytes] = c;
105 cbytes++;
81 } 106 }
82 } 107 }
83 108exit:
84 cs->cbytes = cbytes; 109 cs->cbytes = cbytes;
85 inbuf->inputstate = inputstate; 110 return procbytes;
86
87 return startbytes - numbytes;
88} 111}
89 112
90/* process a block of received bytes in lock mode (tty i/f) 113/* process a block of received bytes in lock mode
114 * All received bytes are passed unmodified to the tty i/f.
91 * Return value: 115 * Return value:
92 * number of processed bytes 116 * number of processed bytes
93 */ 117 */
94static inline int lock_loop(unsigned char *src, int numbytes, 118static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
95 struct inbuf_t *inbuf)
96{ 119{
97 struct cardstate *cs = inbuf->cs; 120 unsigned char *src = inbuf->data + inbuf->head;
98
99 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
100 numbytes, src);
101 gigaset_if_receive(cs, src, numbytes);
102 121
122 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
123 gigaset_if_receive(inbuf->cs, src, numbytes);
103 return numbytes; 124 return numbytes;
104} 125}
105 126
127/* set up next receive skb for data mode
128 */
129static void new_rcv_skb(struct bc_state *bcs)
130{
131 struct cardstate *cs = bcs->cs;
132 unsigned short hw_hdr_len = cs->hw_hdr_len;
133
134 if (bcs->ignore) {
135 bcs->skb = NULL;
136 return;
137 }
138
139 bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len);
140 if (bcs->skb == NULL) {
141 dev_warn(cs->dev, "could not allocate new skb\n");
142 return;
143 }
144 skb_reserve(bcs->skb, hw_hdr_len);
145}
146
106/* process a block of received bytes in HDLC data mode 147/* process a block of received bytes in HDLC data mode
148 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
107 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. 149 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
108 * When a frame is complete, check the FCS and pass valid frames to the LL. 150 * When a frame is complete, check the FCS and pass valid frames to the LL.
109 * If DLE is encountered, return immediately to let the caller handle it. 151 * If DLE is encountered, return immediately to let the caller handle it.
110 * Return value: 152 * Return value:
111 * number of processed bytes 153 * number of processed bytes
112 * numbytes (all bytes processed) on error --FIXME
113 */ 154 */
114static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes, 155static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
115 struct inbuf_t *inbuf)
116{ 156{
117 struct cardstate *cs = inbuf->cs; 157 struct cardstate *cs = inbuf->cs;
118 struct bc_state *bcs = inbuf->bcs; 158 struct bc_state *bcs = cs->bcs;
119 int inputstate = bcs->inputstate; 159 int inputstate = bcs->inputstate;
120 __u16 fcs = bcs->fcs; 160 __u16 fcs = bcs->fcs;
121 struct sk_buff *skb = bcs->skb; 161 struct sk_buff *skb = bcs->skb;
122 unsigned char error; 162 unsigned char *src = inbuf->data + inbuf->head;
123 struct sk_buff *compskb; 163 unsigned procbytes = 0;
124 int startbytes = numbytes; 164 unsigned char c;
125 int l;
126 165
127 if (unlikely(inputstate & INS_byte_stuff)) { 166 if (inputstate & INS_byte_stuff) {
167 if (!numbytes)
168 return 0;
128 inputstate &= ~INS_byte_stuff; 169 inputstate &= ~INS_byte_stuff;
129 goto byte_stuff; 170 goto byte_stuff;
130 } 171 }
131 for (;;) { 172
132 if (unlikely(c == PPP_ESCAPE)) { 173 while (procbytes < numbytes) {
133 if (unlikely(!numbytes)) { 174 c = *src++;
134 inputstate |= INS_byte_stuff; 175 procbytes++;
176 if (c == DLE_FLAG) {
177 if (inputstate & INS_DLE_char) {
178 /* quoted DLE: clear quote flag */
179 inputstate &= ~INS_DLE_char;
180 } else if (cs->dle || (inputstate & INS_DLE_command)) {
181 /* DLE escape, pass up for handling */
182 inputstate |= INS_DLE_char;
135 break; 183 break;
136 } 184 }
137 c = *src++; 185 }
138 --numbytes; 186
139 if (unlikely(c == DLE_FLAG && 187 if (c == PPP_ESCAPE) {
140 (cs->dle || 188 /* byte stuffing indicator: pull in next byte */
141 inbuf->inputstate & INS_DLE_command))) { 189 if (procbytes >= numbytes) {
142 inbuf->inputstate |= INS_DLE_char; 190 /* end of buffer, save for later processing */
143 inputstate |= INS_byte_stuff; 191 inputstate |= INS_byte_stuff;
144 break; 192 break;
145 } 193 }
146byte_stuff: 194byte_stuff:
195 c = *src++;
196 procbytes++;
197 if (c == DLE_FLAG) {
198 if (inputstate & INS_DLE_char) {
199 /* quoted DLE: clear quote flag */
200 inputstate &= ~INS_DLE_char;
201 } else if (cs->dle ||
202 (inputstate & INS_DLE_command)) {
203 /* DLE escape, pass up for handling */
204 inputstate |=
205 INS_DLE_char | INS_byte_stuff;
206 break;
207 }
208 }
147 c ^= PPP_TRANS; 209 c ^= PPP_TRANS;
148 if (unlikely(!muststuff(c)))
149 gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
150 } else if (unlikely(c == PPP_FLAG)) {
151 if (unlikely(inputstate & INS_skip_frame)) {
152#ifdef CONFIG_GIGASET_DEBUG
153 if (!(inputstate & INS_have_data)) { /* 7E 7E */
154 ++bcs->emptycount;
155 } else
156 gig_dbg(DEBUG_HDLC,
157 "7e----------------------------");
158#endif
159
160 /* end of frame */
161 error = 1;
162 gigaset_rcv_error(NULL, cs, bcs);
163 } else if (!(inputstate & INS_have_data)) { /* 7E 7E */
164#ifdef CONFIG_GIGASET_DEBUG 210#ifdef CONFIG_GIGASET_DEBUG
165 ++bcs->emptycount; 211 if (!muststuff(c))
212 gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
166#endif 213#endif
167 break; 214 } else if (c == PPP_FLAG) {
168 } else { 215 /* end of frame: process content if any */
216 if (inputstate & INS_have_data) {
169 gig_dbg(DEBUG_HDLC, 217 gig_dbg(DEBUG_HDLC,
170 "7e----------------------------"); 218 "7e----------------------------");
171 219
172 /* end of frame */ 220 /* check and pass received frame */
173 error = 0; 221 if (!skb) {
174 222 /* skipped frame */
175 if (unlikely(fcs != PPP_GOODFCS)) { 223 gigaset_isdn_rcv_err(bcs);
224 } else if (skb->len < 2) {
225 /* frame too short for FCS */
226 dev_warn(cs->dev,
227 "short frame (%d)\n",
228 skb->len);
229 gigaset_isdn_rcv_err(bcs);
230 dev_kfree_skb_any(skb);
231 } else if (fcs != PPP_GOODFCS) {
232 /* frame check error */
176 dev_err(cs->dev, 233 dev_err(cs->dev,
177 "Checksum failed, %u bytes corrupted!\n", 234 "Checksum failed, %u bytes corrupted!\n",
178 skb->len); 235 skb->len);
179 compskb = NULL; 236 gigaset_isdn_rcv_err(bcs);
180 gigaset_rcv_error(compskb, cs, bcs); 237 dev_kfree_skb_any(skb);
181 error = 1;
182 } else { 238 } else {
183 if (likely((l = skb->len) > 2)) { 239 /* good frame */
184 skb->tail -= 2; 240 __skb_trim(skb, skb->len - 2);
185 skb->len -= 2; 241 gigaset_skb_rcvd(bcs, skb);
186 } else {
187 dev_kfree_skb(skb);
188 skb = NULL;
189 inputstate |= INS_skip_frame;
190 if (l == 1) {
191 dev_err(cs->dev,
192 "invalid packet size (1)!\n");
193 error = 1;
194 gigaset_rcv_error(NULL,
195 cs, bcs);
196 }
197 }
198 if (likely(!(error ||
199 (inputstate &
200 INS_skip_frame)))) {
201 gigaset_rcv_skb(skb, cs, bcs);
202 }
203 } 242 }
204 }
205 243
206 if (unlikely(error)) 244 /* prepare reception of next frame */
207 if (skb) 245 inputstate &= ~INS_have_data;
208 dev_kfree_skb(skb); 246 new_rcv_skb(bcs);
209 247 skb = bcs->skb;
210 fcs = PPP_INITFCS;
211 inputstate &= ~(INS_have_data | INS_skip_frame);
212 if (unlikely(bcs->ignore)) {
213 inputstate |= INS_skip_frame;
214 skb = NULL;
215 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
216 skb_reserve(skb, HW_HDR_LEN);
217 } else { 248 } else {
218 dev_warn(cs->dev, 249 /* empty frame (7E 7E) */
219 "could not allocate new skb\n"); 250#ifdef CONFIG_GIGASET_DEBUG
220 inputstate |= INS_skip_frame; 251 ++bcs->emptycount;
252#endif
253 if (!skb) {
254 /* skipped (?) */
255 gigaset_isdn_rcv_err(bcs);
256 new_rcv_skb(bcs);
257 skb = bcs->skb;
258 }
221 } 259 }
222 260
223 break; 261 fcs = PPP_INITFCS;
224 } else if (unlikely(muststuff(c))) { 262 continue;
263#ifdef CONFIG_GIGASET_DEBUG
264 } else if (muststuff(c)) {
225 /* Should not happen. Possible after ZDLE=1<CR><LF>. */ 265 /* Should not happen. Possible after ZDLE=1<CR><LF>. */
226 gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); 266 gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
267#endif
227 } 268 }
228 269
229 /* add character */ 270 /* regular data byte, append to skb */
230
231#ifdef CONFIG_GIGASET_DEBUG 271#ifdef CONFIG_GIGASET_DEBUG
232 if (unlikely(!(inputstate & INS_have_data))) { 272 if (!(inputstate & INS_have_data)) {
233 gig_dbg(DEBUG_HDLC, "7e (%d x) ================", 273 gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
234 bcs->emptycount); 274 bcs->emptycount);
235 bcs->emptycount = 0; 275 bcs->emptycount = 0;
236 } 276 }
237#endif 277#endif
238
239 inputstate |= INS_have_data; 278 inputstate |= INS_have_data;
240 279 if (skb) {
241 if (likely(!(inputstate & INS_skip_frame))) { 280 if (skb->len == SBUFSIZE) {
242 if (unlikely(skb->len == SBUFSIZE)) {
243 dev_warn(cs->dev, "received packet too long\n"); 281 dev_warn(cs->dev, "received packet too long\n");
244 dev_kfree_skb_any(skb); 282 dev_kfree_skb_any(skb);
245 skb = NULL; 283 /* skip remainder of packet */
246 inputstate |= INS_skip_frame; 284 bcs->skb = skb = NULL;
247 break; 285 } else {
286 *__skb_put(skb, 1) = c;
287 fcs = crc_ccitt_byte(fcs, c);
248 } 288 }
249 *__skb_put(skb, 1) = c;
250 fcs = crc_ccitt_byte(fcs, c);
251 }
252
253 if (unlikely(!numbytes))
254 break;
255 c = *src++;
256 --numbytes;
257 if (unlikely(c == DLE_FLAG &&
258 (cs->dle ||
259 inbuf->inputstate & INS_DLE_command))) {
260 inbuf->inputstate |= INS_DLE_char;
261 break;
262 } 289 }
263 } 290 }
291
264 bcs->inputstate = inputstate; 292 bcs->inputstate = inputstate;
265 bcs->fcs = fcs; 293 bcs->fcs = fcs;
266 bcs->skb = skb; 294 return procbytes;
267 return startbytes - numbytes;
268} 295}
269 296
270/* process a block of received bytes in transparent data mode 297/* process a block of received bytes in transparent data mode
298 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
271 * Invert bytes, undoing byte stuffing and watching for DLE escapes. 299 * Invert bytes, undoing byte stuffing and watching for DLE escapes.
272 * If DLE is encountered, return immediately to let the caller handle it. 300 * If DLE is encountered, return immediately to let the caller handle it.
273 * Return value: 301 * Return value:
274 * number of processed bytes 302 * number of processed bytes
275 * numbytes (all bytes processed) on error --FIXME
276 */ 303 */
277static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, 304static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
278 struct inbuf_t *inbuf)
279{ 305{
280 struct cardstate *cs = inbuf->cs; 306 struct cardstate *cs = inbuf->cs;
281 struct bc_state *bcs = inbuf->bcs; 307 struct bc_state *bcs = cs->bcs;
282 int inputstate = bcs->inputstate; 308 int inputstate = bcs->inputstate;
283 struct sk_buff *skb = bcs->skb; 309 struct sk_buff *skb = bcs->skb;
284 int startbytes = numbytes; 310 unsigned char *src = inbuf->data + inbuf->head;
311 unsigned procbytes = 0;
312 unsigned char c;
285 313
286 for (;;) { 314 if (!skb) {
287 /* add character */ 315 /* skip this block */
288 inputstate |= INS_have_data; 316 new_rcv_skb(bcs);
317 return numbytes;
318 }
289 319
290 if (likely(!(inputstate & INS_skip_frame))) { 320 while (procbytes < numbytes && skb->len < SBUFSIZE) {
291 if (unlikely(skb->len == SBUFSIZE)) { 321 c = *src++;
292 //FIXME just pass skb up and allocate a new one 322 procbytes++;
293 dev_warn(cs->dev, "received packet too long\n"); 323
294 dev_kfree_skb_any(skb); 324 if (c == DLE_FLAG) {
295 skb = NULL; 325 if (inputstate & INS_DLE_char) {
296 inputstate |= INS_skip_frame; 326 /* quoted DLE: clear quote flag */
327 inputstate &= ~INS_DLE_char;
328 } else if (cs->dle || (inputstate & INS_DLE_command)) {
329 /* DLE escape, pass up for handling */
330 inputstate |= INS_DLE_char;
297 break; 331 break;
298 } 332 }
299 *__skb_put(skb, 1) = bitrev8(c);
300 } 333 }
301 334
302 if (unlikely(!numbytes)) 335 /* regular data byte: append to current skb */
303 break; 336 inputstate |= INS_have_data;
304 c = *src++; 337 *__skb_put(skb, 1) = bitrev8(c);
305 --numbytes;
306 if (unlikely(c == DLE_FLAG &&
307 (cs->dle ||
308 inbuf->inputstate & INS_DLE_command))) {
309 inbuf->inputstate |= INS_DLE_char;
310 break;
311 }
312 } 338 }
313 339
314 /* pass data up */ 340 /* pass data up */
315 if (likely(inputstate & INS_have_data)) { 341 if (inputstate & INS_have_data) {
316 if (likely(!(inputstate & INS_skip_frame))) { 342 gigaset_skb_rcvd(bcs, skb);
317 gigaset_rcv_skb(skb, cs, bcs); 343 inputstate &= ~INS_have_data;
318 } 344 new_rcv_skb(bcs);
319 inputstate &= ~(INS_have_data | INS_skip_frame); 345 }
320 if (unlikely(bcs->ignore)) { 346
321 inputstate |= INS_skip_frame; 347 bcs->inputstate = inputstate;
322 skb = NULL; 348 return procbytes;
323 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) 349}
324 != NULL)) { 350
325 skb_reserve(skb, HW_HDR_LEN); 351/* process DLE escapes
352 * Called whenever a DLE sequence might be encountered in the input stream.
353 * Either processes the entire DLE sequence or, if that isn't possible,
354 * notes the fact that an initial DLE has been received in the INS_DLE_char
355 * inputstate flag and resumes processing of the sequence on the next call.
356 */
357static void handle_dle(struct inbuf_t *inbuf)
358{
359 struct cardstate *cs = inbuf->cs;
360
361 if (cs->mstate == MS_LOCKED)
362 return; /* no DLE processing in lock mode */
363
364 if (!(inbuf->inputstate & INS_DLE_char)) {
365 /* no DLE pending */
366 if (inbuf->data[inbuf->head] == DLE_FLAG &&
367 (cs->dle || inbuf->inputstate & INS_DLE_command)) {
368 /* start of DLE sequence */
369 inbuf->head++;
370 if (inbuf->head == inbuf->tail ||
371 inbuf->head == RBUFSIZE) {
372 /* end of buffer, save for later processing */
373 inbuf->inputstate |= INS_DLE_char;
374 return;
375 }
326 } else { 376 } else {
327 dev_warn(cs->dev, "could not allocate new skb\n"); 377 /* regular data byte */
328 inputstate |= INS_skip_frame; 378 return;
329 } 379 }
330 } 380 }
331 381
332 bcs->inputstate = inputstate; 382 /* consume pending DLE */
333 bcs->skb = skb; 383 inbuf->inputstate &= ~INS_DLE_char;
334 return startbytes - numbytes; 384
385 switch (inbuf->data[inbuf->head]) {
386 case 'X': /* begin of event message */
387 if (inbuf->inputstate & INS_command)
388 dev_notice(cs->dev,
389 "received <DLE>X in command mode\n");
390 inbuf->inputstate |= INS_command | INS_DLE_command;
391 inbuf->head++; /* byte consumed */
392 break;
393 case '.': /* end of event message */
394 if (!(inbuf->inputstate & INS_DLE_command))
395 dev_notice(cs->dev,
396 "received <DLE>. without <DLE>X\n");
397 inbuf->inputstate &= ~INS_DLE_command;
398 /* return to data mode if in DLE mode */
399 if (cs->dle)
400 inbuf->inputstate &= ~INS_command;
401 inbuf->head++; /* byte consumed */
402 break;
403 case DLE_FLAG: /* DLE in data stream */
404 /* mark as quoted */
405 inbuf->inputstate |= INS_DLE_char;
406 if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
407 dev_notice(cs->dev,
408 "received <DLE><DLE> not in DLE mode\n");
409 break; /* quoted byte left in buffer */
410 default:
411 dev_notice(cs->dev, "received <DLE><%02x>\n",
412 inbuf->data[inbuf->head]);
413 /* quoted byte left in buffer */
414 }
335} 415}
336 416
337/** 417/**
@@ -345,94 +425,39 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
345 */ 425 */
346void gigaset_m10x_input(struct inbuf_t *inbuf) 426void gigaset_m10x_input(struct inbuf_t *inbuf)
347{ 427{
348 struct cardstate *cs; 428 struct cardstate *cs = inbuf->cs;
349 unsigned tail, head, numbytes; 429 unsigned numbytes, procbytes;
350 unsigned char *src, c;
351 int procbytes;
352
353 head = inbuf->head;
354 tail = inbuf->tail;
355 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
356
357 if (head != tail) {
358 cs = inbuf->cs;
359 src = inbuf->data + head;
360 numbytes = (head > tail ? RBUFSIZE : tail) - head;
361 gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
362 430
363 while (numbytes) { 431 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
364 if (cs->mstate == MS_LOCKED) {
365 procbytes = lock_loop(src, numbytes, inbuf);
366 src += procbytes;
367 numbytes -= procbytes;
368 } else {
369 c = *src++;
370 --numbytes;
371 if (c == DLE_FLAG && (cs->dle ||
372 inbuf->inputstate & INS_DLE_command)) {
373 if (!(inbuf->inputstate & INS_DLE_char)) {
374 inbuf->inputstate |= INS_DLE_char;
375 goto nextbyte;
376 }
377 /* <DLE> <DLE> => <DLE> in data stream */
378 inbuf->inputstate &= ~INS_DLE_char;
379 }
380 432
381 if (!(inbuf->inputstate & INS_DLE_char)) { 433 while (inbuf->head != inbuf->tail) {
382 434 /* check for DLE escape */
383 /* FIXME use function pointers? */ 435 handle_dle(inbuf);
384 if (inbuf->inputstate & INS_command)
385 procbytes = cmd_loop(c, src, numbytes, inbuf);
386 else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
387 procbytes = hdlc_loop(c, src, numbytes, inbuf);
388 else
389 procbytes = iraw_loop(c, src, numbytes, inbuf);
390
391 src += procbytes;
392 numbytes -= procbytes;
393 } else { /* DLE char */
394 inbuf->inputstate &= ~INS_DLE_char;
395 switch (c) {
396 case 'X': /*begin of command*/
397 if (inbuf->inputstate & INS_command)
398 dev_warn(cs->dev,
399 "received <DLE> 'X' in command mode\n");
400 inbuf->inputstate |=
401 INS_command | INS_DLE_command;
402 break;
403 case '.': /*end of command*/
404 if (!(inbuf->inputstate & INS_command))
405 dev_warn(cs->dev,
406 "received <DLE> '.' in hdlc mode\n");
407 inbuf->inputstate &= cs->dle ?
408 ~(INS_DLE_command|INS_command)
409 : ~INS_DLE_command;
410 break;
411 //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
412 default:
413 dev_err(cs->dev,
414 "received 0x10 0x%02x!\n",
415 (int) c);
416 /* FIXME: reset driver?? */
417 }
418 }
419 }
420nextbyte:
421 if (!numbytes) {
422 /* end of buffer, check for wrap */
423 if (head > tail) {
424 head = 0;
425 src = inbuf->data;
426 numbytes = tail;
427 } else {
428 head = tail;
429 break;
430 }
431 }
432 }
433 436
434 gig_dbg(DEBUG_INTR, "setting head to %u", head); 437 /* process a contiguous block of bytes */
435 inbuf->head = head; 438 numbytes = (inbuf->head > inbuf->tail ?
439 RBUFSIZE : inbuf->tail) - inbuf->head;
440 gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
441 /*
442 * numbytes may be 0 if handle_dle() ate the last byte.
443 * This does no harm, *_loop() will just return 0 immediately.
444 */
445
446 if (cs->mstate == MS_LOCKED)
447 procbytes = lock_loop(numbytes, inbuf);
448 else if (inbuf->inputstate & INS_command)
449 procbytes = cmd_loop(numbytes, inbuf);
450 else if (cs->bcs->proto2 == L2_HDLC)
451 procbytes = hdlc_loop(numbytes, inbuf);
452 else
453 procbytes = iraw_loop(numbytes, inbuf);
454 inbuf->head += procbytes;
455
456 /* check for buffer wraparound */
457 if (inbuf->head >= RBUFSIZE)
458 inbuf->head = 0;
459
460 gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
436 } 461 }
437} 462}
438EXPORT_SYMBOL_GPL(gigaset_m10x_input); 463EXPORT_SYMBOL_GPL(gigaset_m10x_input);
@@ -440,16 +465,16 @@ EXPORT_SYMBOL_GPL(gigaset_m10x_input);
440 465
441/* == data output ========================================================== */ 466/* == data output ========================================================== */
442 467
443/* Encoding of a PPP packet into an octet stuffed HDLC frame 468/*
444 * with FCS, opening and closing flags. 469 * Encode a data packet into an octet stuffed HDLC frame with FCS,
470 * opening and closing flags, preserving headroom data.
445 * parameters: 471 * parameters:
446 * skb skb containing original packet (freed upon return) 472 * skb skb containing original packet (freed upon return)
447 * head number of headroom bytes to allocate in result skb
448 * tail number of tailroom bytes to allocate in result skb
449 * Return value: 473 * Return value:
450 * pointer to newly allocated skb containing the result frame 474 * pointer to newly allocated skb containing the result frame
475 * and the original link layer header, NULL on error
451 */ 476 */
452static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail) 477static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
453{ 478{
454 struct sk_buff *hdlc_skb; 479 struct sk_buff *hdlc_skb;
455 __u16 fcs; 480 __u16 fcs;
@@ -471,16 +496,19 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
471 496
472 /* size of new buffer: original size + number of stuffing bytes 497 /* size of new buffer: original size + number of stuffing bytes
473 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes 498 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
499 * + room for link layer header
474 */ 500 */
475 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head); 501 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
476 if (!hdlc_skb) { 502 if (!hdlc_skb) {
477 dev_kfree_skb(skb); 503 dev_kfree_skb_any(skb);
478 return NULL; 504 return NULL;
479 } 505 }
480 skb_reserve(hdlc_skb, head);
481 506
482 /* Copy acknowledge request into new skb */ 507 /* Copy link layer header into new skb */
483 memcpy(hdlc_skb->head, skb->head, 2); 508 skb_reset_mac_header(hdlc_skb);
509 skb_reserve(hdlc_skb, skb->mac_len);
510 memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
511 hdlc_skb->mac_len = skb->mac_len;
484 512
485 /* Add flag sequence in front of everything.. */ 513 /* Add flag sequence in front of everything.. */
486 *(skb_put(hdlc_skb, 1)) = PPP_FLAG; 514 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
@@ -511,33 +539,42 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
511 539
512 *(skb_put(hdlc_skb, 1)) = PPP_FLAG; 540 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
513 541
514 dev_kfree_skb(skb); 542 dev_kfree_skb_any(skb);
515 return hdlc_skb; 543 return hdlc_skb;
516} 544}
517 545
518/* Encoding of a raw packet into an octet stuffed bit inverted frame 546/*
547 * Encode a data packet into an octet stuffed raw bit inverted frame,
548 * preserving headroom data.
519 * parameters: 549 * parameters:
520 * skb skb containing original packet (freed upon return) 550 * skb skb containing original packet (freed upon return)
521 * head number of headroom bytes to allocate in result skb
522 * tail number of tailroom bytes to allocate in result skb
523 * Return value: 551 * Return value:
524 * pointer to newly allocated skb containing the result frame 552 * pointer to newly allocated skb containing the result frame
553 * and the original link layer header, NULL on error
525 */ 554 */
526static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) 555static struct sk_buff *iraw_encode(struct sk_buff *skb)
527{ 556{
528 struct sk_buff *iraw_skb; 557 struct sk_buff *iraw_skb;
529 unsigned char c; 558 unsigned char c;
530 unsigned char *cp; 559 unsigned char *cp;
531 int len; 560 int len;
532 561
533 /* worst case: every byte must be stuffed */ 562 /* size of new buffer (worst case = every byte must be stuffed):
534 iraw_skb = dev_alloc_skb(2*skb->len + tail + head); 563 * 2 * original size + room for link layer header
564 */
565 iraw_skb = dev_alloc_skb(2*skb->len + skb->mac_len);
535 if (!iraw_skb) { 566 if (!iraw_skb) {
536 dev_kfree_skb(skb); 567 dev_kfree_skb_any(skb);
537 return NULL; 568 return NULL;
538 } 569 }
539 skb_reserve(iraw_skb, head);
540 570
571 /* copy link layer header into new skb */
572 skb_reset_mac_header(iraw_skb);
573 skb_reserve(iraw_skb, skb->mac_len);
574 memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
575 iraw_skb->mac_len = skb->mac_len;
576
577 /* copy and stuff data */
541 cp = skb->data; 578 cp = skb->data;
542 len = skb->len; 579 len = skb->len;
543 while (len--) { 580 while (len--) {
@@ -546,7 +583,7 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
546 *(skb_put(iraw_skb, 1)) = c; 583 *(skb_put(iraw_skb, 1)) = c;
547 *(skb_put(iraw_skb, 1)) = c; 584 *(skb_put(iraw_skb, 1)) = c;
548 } 585 }
549 dev_kfree_skb(skb); 586 dev_kfree_skb_any(skb);
550 return iraw_skb; 587 return iraw_skb;
551} 588}
552 589
@@ -555,8 +592,10 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
555 * @bcs: B channel descriptor structure. 592 * @bcs: B channel descriptor structure.
556 * @skb: data to send. 593 * @skb: data to send.
557 * 594 *
558 * Called by i4l.c to encode and queue an skb for sending, and start 595 * Called by LL to encode and queue an skb for sending, and start
559 * transmission if necessary. 596 * transmission if necessary.
597 * Once the payload data has been transmitted completely, gigaset_skb_sent()
598 * will be called with the skb's link layer header preserved.
560 * 599 *
561 * Return value: 600 * Return value:
562 * number of bytes accepted for sending (skb->len) if ok, 601 * number of bytes accepted for sending (skb->len) if ok,
@@ -564,24 +603,25 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
564 */ 603 */
565int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) 604int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
566{ 605{
606 struct cardstate *cs = bcs->cs;
567 unsigned len = skb->len; 607 unsigned len = skb->len;
568 unsigned long flags; 608 unsigned long flags;
569 609
570 if (bcs->proto2 == ISDN_PROTO_L2_HDLC) 610 if (bcs->proto2 == L2_HDLC)
571 skb = HDLC_Encode(skb, HW_HDR_LEN, 0); 611 skb = HDLC_Encode(skb);
572 else 612 else
573 skb = iraw_encode(skb, HW_HDR_LEN, 0); 613 skb = iraw_encode(skb);
574 if (!skb) { 614 if (!skb) {
575 dev_err(bcs->cs->dev, 615 dev_err(cs->dev,
576 "unable to allocate memory for encoding!\n"); 616 "unable to allocate memory for encoding!\n");
577 return -ENOMEM; 617 return -ENOMEM;
578 } 618 }
579 619
580 skb_queue_tail(&bcs->squeue, skb); 620 skb_queue_tail(&bcs->squeue, skb);
581 spin_lock_irqsave(&bcs->cs->lock, flags); 621 spin_lock_irqsave(&cs->lock, flags);
582 if (bcs->cs->connected) 622 if (cs->connected)
583 tasklet_schedule(&bcs->cs->write_tasklet); 623 tasklet_schedule(&cs->write_tasklet);
584 spin_unlock_irqrestore(&bcs->cs->lock, flags); 624 spin_unlock_irqrestore(&cs->lock, flags);
585 625
586 return len; /* ok so far */ 626 return len; /* ok so far */
587} 627}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 5ed1d99eb9f3..95ebc5129895 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
57#define USB_SX353_PRODUCT_ID 0x0022 57#define USB_SX353_PRODUCT_ID 0x0022
58 58
59/* table of devices that work with this driver */ 59/* table of devices that work with this driver */
60static const struct usb_device_id gigaset_table [] = { 60static const struct usb_device_id gigaset_table[] = {
61 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) }, 61 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
62 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) }, 62 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
63 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) }, 63 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
@@ -137,7 +137,7 @@ struct bas_cardstate {
137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ 137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
138 138
139 139
140static struct gigaset_driver *driver = NULL; 140static struct gigaset_driver *driver;
141 141
142/* usb specific object needed to register this driver with the usb subsystem */ 142/* usb specific object needed to register this driver with the usb subsystem */
143static struct usb_driver gigaset_usb_driver = { 143static struct usb_driver gigaset_usb_driver = {
@@ -601,11 +601,12 @@ static int atread_submit(struct cardstate *cs, int timeout)
601 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size); 601 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
602 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev, 602 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
603 usb_rcvctrlpipe(ucs->udev, 0), 603 usb_rcvctrlpipe(ucs->udev, 0),
604 (unsigned char*) & ucs->dr_cmd_in, 604 (unsigned char *) &ucs->dr_cmd_in,
605 ucs->rcvbuf, ucs->rcvbuf_size, 605 ucs->rcvbuf, ucs->rcvbuf_size,
606 read_ctrl_callback, cs->inbuf); 606 read_ctrl_callback, cs->inbuf);
607 607
608 if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) { 608 ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
609 if (ret != 0) {
609 update_basstate(ucs, 0, BS_ATRDPEND); 610 update_basstate(ucs, 0, BS_ATRDPEND);
610 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n", 611 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
611 get_usb_rcmsg(ret)); 612 get_usb_rcmsg(ret));
@@ -652,13 +653,11 @@ static void read_int_callback(struct urb *urb)
652 return; 653 return;
653 case -ENODEV: /* device removed */ 654 case -ENODEV: /* device removed */
654 case -ESHUTDOWN: /* device shut down */ 655 case -ESHUTDOWN: /* device shut down */
655 //FIXME use this as disconnect indicator?
656 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__); 656 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__);
657 return; 657 return;
658 default: /* severe trouble */ 658 default: /* severe trouble */
659 dev_warn(cs->dev, "interrupt read: %s\n", 659 dev_warn(cs->dev, "interrupt read: %s\n",
660 get_usb_statmsg(status)); 660 get_usb_statmsg(status));
661 //FIXME corrective action? resubmission always ok?
662 goto resubmit; 661 goto resubmit;
663 } 662 }
664 663
@@ -742,7 +741,8 @@ static void read_int_callback(struct urb *urb)
742 kfree(ucs->rcvbuf); 741 kfree(ucs->rcvbuf);
743 ucs->rcvbuf_size = 0; 742 ucs->rcvbuf_size = 0;
744 } 743 }
745 if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) { 744 ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
745 if (ucs->rcvbuf == NULL) {
746 spin_unlock_irqrestore(&cs->lock, flags); 746 spin_unlock_irqrestore(&cs->lock, flags);
747 dev_err(cs->dev, "out of memory receiving AT data\n"); 747 dev_err(cs->dev, "out of memory receiving AT data\n");
748 error_reset(cs); 748 error_reset(cs);
@@ -750,12 +750,12 @@ static void read_int_callback(struct urb *urb)
750 } 750 }
751 ucs->rcvbuf_size = l; 751 ucs->rcvbuf_size = l;
752 ucs->retry_cmd_in = 0; 752 ucs->retry_cmd_in = 0;
753 if ((rc = atread_submit(cs, BAS_TIMEOUT)) < 0) { 753 rc = atread_submit(cs, BAS_TIMEOUT);
754 if (rc < 0) {
754 kfree(ucs->rcvbuf); 755 kfree(ucs->rcvbuf);
755 ucs->rcvbuf = NULL; 756 ucs->rcvbuf = NULL;
756 ucs->rcvbuf_size = 0; 757 ucs->rcvbuf_size = 0;
757 if (rc != -ENODEV) { 758 if (rc != -ENODEV) {
758 //FIXME corrective action?
759 spin_unlock_irqrestore(&cs->lock, flags); 759 spin_unlock_irqrestore(&cs->lock, flags);
760 error_reset(cs); 760 error_reset(cs);
761 break; 761 break;
@@ -911,7 +911,7 @@ static int starturbs(struct bc_state *bcs)
911 int rc; 911 int rc;
912 912
913 /* initialize L2 reception */ 913 /* initialize L2 reception */
914 if (bcs->proto2 == ISDN_PROTO_L2_HDLC) 914 if (bcs->proto2 == L2_HDLC)
915 bcs->inputstate |= INS_flag_hunt; 915 bcs->inputstate |= INS_flag_hunt;
916 916
917 /* submit all isochronous input URBs */ 917 /* submit all isochronous input URBs */
@@ -940,7 +940,8 @@ static int starturbs(struct bc_state *bcs)
940 } 940 }
941 941
942 dump_urb(DEBUG_ISO, "Initial isoc read", urb); 942 dump_urb(DEBUG_ISO, "Initial isoc read", urb);
943 if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0) 943 rc = usb_submit_urb(urb, GFP_ATOMIC);
944 if (rc != 0)
944 goto error; 945 goto error;
945 } 946 }
946 947
@@ -1045,7 +1046,8 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1045 1046
1046 /* compute frame length according to flow control */ 1047 /* compute frame length according to flow control */
1047 ifd->length = BAS_NORMFRAME; 1048 ifd->length = BAS_NORMFRAME;
1048 if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) { 1049 corrbytes = atomic_read(&ubc->corrbytes);
1050 if (corrbytes != 0) {
1049 gig_dbg(DEBUG_ISO, "%s: corrbytes=%d", 1051 gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
1050 __func__, corrbytes); 1052 __func__, corrbytes);
1051 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME) 1053 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
@@ -1064,7 +1066,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1064 "%s: buffer busy at frame %d", 1066 "%s: buffer busy at frame %d",
1065 __func__, nframe); 1067 __func__, nframe);
1066 /* tasklet will be restarted from 1068 /* tasklet will be restarted from
1067 gigaset_send_skb() */ 1069 gigaset_isoc_send_skb() */
1068 } else { 1070 } else {
1069 dev_err(ucx->bcs->cs->dev, 1071 dev_err(ucx->bcs->cs->dev,
1070 "%s: buffer error %d at frame %d\n", 1072 "%s: buffer error %d at frame %d\n",
@@ -1284,7 +1286,8 @@ static void read_iso_tasklet(unsigned long data)
1284 for (;;) { 1286 for (;;) {
1285 /* retrieve URB */ 1287 /* retrieve URB */
1286 spin_lock_irqsave(&ubc->isoinlock, flags); 1288 spin_lock_irqsave(&ubc->isoinlock, flags);
1287 if (!(urb = ubc->isoindone)) { 1289 urb = ubc->isoindone;
1290 if (!urb) {
1288 spin_unlock_irqrestore(&ubc->isoinlock, flags); 1291 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1289 return; 1292 return;
1290 } 1293 }
@@ -1371,7 +1374,7 @@ static void read_iso_tasklet(unsigned long data)
1371 "isochronous read: %d data bytes missing\n", 1374 "isochronous read: %d data bytes missing\n",
1372 totleft); 1375 totleft);
1373 1376
1374 error: 1377error:
1375 /* URB processed, resubmit */ 1378 /* URB processed, resubmit */
1376 for (frame = 0; frame < BAS_NUMFRAMES; frame++) { 1379 for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
1377 urb->iso_frame_desc[frame].status = 0; 1380 urb->iso_frame_desc[frame].status = 0;
@@ -1568,7 +1571,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1568 ucs->dr_ctrl.wLength = 0; 1571 ucs->dr_ctrl.wLength = 0;
1569 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev, 1572 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
1570 usb_sndctrlpipe(ucs->udev, 0), 1573 usb_sndctrlpipe(ucs->udev, 0),
1571 (unsigned char*) &ucs->dr_ctrl, NULL, 0, 1574 (unsigned char *) &ucs->dr_ctrl, NULL, 0,
1572 write_ctrl_callback, ucs); 1575 write_ctrl_callback, ucs);
1573 ucs->retry_ctrl = 0; 1576 ucs->retry_ctrl = 0;
1574 ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC); 1577 ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
@@ -1621,7 +1624,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1621 return -EHOSTUNREACH; 1624 return -EHOSTUNREACH;
1622 } 1625 }
1623 1626
1624 if ((ret = starturbs(bcs)) < 0) { 1627 ret = starturbs(bcs);
1628 if (ret < 0) {
1625 dev_err(cs->dev, 1629 dev_err(cs->dev,
1626 "could not start isochronous I/O for channel B%d: %s\n", 1630 "could not start isochronous I/O for channel B%d: %s\n",
1627 bcs->channel + 1, 1631 bcs->channel + 1,
@@ -1633,7 +1637,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1633 } 1637 }
1634 1638
1635 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL; 1639 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
1636 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) { 1640 ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
1641 if (ret < 0) {
1637 dev_err(cs->dev, "could not open channel B%d\n", 1642 dev_err(cs->dev, "could not open channel B%d\n",
1638 bcs->channel + 1); 1643 bcs->channel + 1);
1639 stopurbs(bcs->hw.bas); 1644 stopurbs(bcs->hw.bas);
@@ -1677,7 +1682,8 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
1677 1682
1678 /* channel running: tell device to close it */ 1683 /* channel running: tell device to close it */
1679 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL; 1684 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
1680 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) 1685 ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
1686 if (ret < 0)
1681 dev_err(cs->dev, "closing channel B%d failed\n", 1687 dev_err(cs->dev, "closing channel B%d failed\n",
1682 bcs->channel + 1); 1688 bcs->channel + 1);
1683 1689
@@ -1703,10 +1709,12 @@ static void complete_cb(struct cardstate *cs)
1703 gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, 1709 gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD,
1704 "write_command: sent %u bytes, %u left", 1710 "write_command: sent %u bytes, %u left",
1705 cs->curlen, cs->cmdbytes); 1711 cs->curlen, cs->cmdbytes);
1706 if ((cs->cmdbuf = cb->next) != NULL) { 1712 if (cb->next != NULL) {
1713 cs->cmdbuf = cb->next;
1707 cs->cmdbuf->prev = NULL; 1714 cs->cmdbuf->prev = NULL;
1708 cs->curlen = cs->cmdbuf->len; 1715 cs->curlen = cs->cmdbuf->len;
1709 } else { 1716 } else {
1717 cs->cmdbuf = NULL;
1710 cs->lastcmdbuf = NULL; 1718 cs->lastcmdbuf = NULL;
1711 cs->curlen = 0; 1719 cs->curlen = 0;
1712 } 1720 }
@@ -1833,7 +1841,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
1833 ucs->dr_cmd_out.wLength = cpu_to_le16(len); 1841 ucs->dr_cmd_out.wLength = cpu_to_le16(len);
1834 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev, 1842 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
1835 usb_sndctrlpipe(ucs->udev, 0), 1843 usb_sndctrlpipe(ucs->udev, 0),
1836 (unsigned char*) &ucs->dr_cmd_out, buf, len, 1844 (unsigned char *) &ucs->dr_cmd_out, buf, len,
1837 write_command_callback, cs); 1845 write_command_callback, cs);
1838 rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC); 1846 rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
1839 if (unlikely(rc)) { 1847 if (unlikely(rc)) {
@@ -1953,7 +1961,8 @@ static int gigaset_write_cmd(struct cardstate *cs,
1953 1961
1954 if (len > IF_WRITEBUF) 1962 if (len > IF_WRITEBUF)
1955 len = IF_WRITEBUF; 1963 len = IF_WRITEBUF;
1956 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1964 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
1965 if (!cb) {
1957 dev_err(cs->dev, "%s: out of memory\n", __func__); 1966 dev_err(cs->dev, "%s: out of memory\n", __func__);
1958 rc = -ENOMEM; 1967 rc = -ENOMEM;
1959 goto notqueued; 1968 goto notqueued;
@@ -2100,14 +2109,15 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2100 } 2109 }
2101 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL; 2110 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
2102 ubc->numsub = 0; 2111 ubc->numsub = 0;
2103 if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) { 2112 ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
2113 if (!ubc->isooutbuf) {
2104 pr_err("out of memory\n"); 2114 pr_err("out of memory\n");
2105 kfree(ubc); 2115 kfree(ubc);
2106 bcs->hw.bas = NULL; 2116 bcs->hw.bas = NULL;
2107 return 0; 2117 return 0;
2108 } 2118 }
2109 tasklet_init(&ubc->sent_tasklet, 2119 tasklet_init(&ubc->sent_tasklet,
2110 &write_iso_tasklet, (unsigned long) bcs); 2120 write_iso_tasklet, (unsigned long) bcs);
2111 2121
2112 spin_lock_init(&ubc->isoinlock); 2122 spin_lock_init(&ubc->isoinlock);
2113 for (i = 0; i < BAS_INURBS; ++i) 2123 for (i = 0; i < BAS_INURBS; ++i)
@@ -2128,7 +2138,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2128 ubc->shared0s = 0; 2138 ubc->shared0s = 0;
2129 ubc->stolen0s = 0; 2139 ubc->stolen0s = 0;
2130 tasklet_init(&ubc->rcvd_tasklet, 2140 tasklet_init(&ubc->rcvd_tasklet,
2131 &read_iso_tasklet, (unsigned long) bcs); 2141 read_iso_tasklet, (unsigned long) bcs);
2132 return 1; 2142 return 1;
2133} 2143}
2134 2144
@@ -2252,7 +2262,8 @@ static int gigaset_probe(struct usb_interface *interface,
2252 gig_dbg(DEBUG_ANY, 2262 gig_dbg(DEBUG_ANY,
2253 "%s: wrong alternate setting %d - trying to switch", 2263 "%s: wrong alternate setting %d - trying to switch",
2254 __func__, hostif->desc.bAlternateSetting); 2264 __func__, hostif->desc.bAlternateSetting);
2255 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) { 2265 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
2266 < 0) {
2256 dev_warn(&udev->dev, "usb_set_interface failed, " 2267 dev_warn(&udev->dev, "usb_set_interface failed, "
2257 "device %d interface %d altsetting %d\n", 2268 "device %d interface %d altsetting %d\n",
2258 udev->devnum, hostif->desc.bInterfaceNumber, 2269 udev->devnum, hostif->desc.bInterfaceNumber,
@@ -2321,14 +2332,16 @@ static int gigaset_probe(struct usb_interface *interface,
2321 (endpoint->bEndpointAddress) & 0x0f), 2332 (endpoint->bEndpointAddress) & 0x0f),
2322 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, 2333 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
2323 endpoint->bInterval); 2334 endpoint->bInterval);
2324 if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) { 2335 rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
2336 if (rc != 0) {
2325 dev_err(cs->dev, "could not submit interrupt URB: %s\n", 2337 dev_err(cs->dev, "could not submit interrupt URB: %s\n",
2326 get_usb_rcmsg(rc)); 2338 get_usb_rcmsg(rc));
2327 goto error; 2339 goto error;
2328 } 2340 }
2329 2341
2330 /* tell the device that the driver is ready */ 2342 /* tell the device that the driver is ready */
2331 if ((rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0) 2343 rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
2344 if (rc != 0)
2332 goto error; 2345 goto error;
2333 2346
2334 /* tell common part that the device is ready */ 2347 /* tell common part that the device is ready */
@@ -2524,9 +2537,10 @@ static int __init bas_gigaset_init(void)
2524 int result; 2537 int result;
2525 2538
2526 /* allocate memory for our driver state and intialize it */ 2539 /* allocate memory for our driver state and intialize it */
2527 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 2540 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
2528 GIGASET_MODULENAME, GIGASET_DEVNAME, 2541 GIGASET_MODULENAME, GIGASET_DEVNAME,
2529 &gigops, THIS_MODULE)) == NULL) 2542 &gigops, THIS_MODULE);
2543 if (driver == NULL)
2530 goto error; 2544 goto error;
2531 2545
2532 /* register this driver with the USB subsystem */ 2546 /* register this driver with the USB subsystem */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
new file mode 100644
index 000000000000..3f5cd06af104
--- /dev/null
+++ b/drivers/isdn/gigaset/capi.c
@@ -0,0 +1,2292 @@
1/*
2 * Kernel CAPI interface for the Gigaset driver
3 *
4 * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 */
13
14#include "gigaset.h"
15#include <linux/ctype.h>
16#include <linux/isdn/capilli.h>
17#include <linux/isdn/capicmd.h>
18#include <linux/isdn/capiutil.h>
19
20/* missing from kernelcapi.h */
21#define CapiNcpiNotSupportedByProtocol 0x0001
22#define CapiFlagsNotSupportedByProtocol 0x0002
23#define CapiAlertAlreadySent 0x0003
24#define CapiFacilitySpecificFunctionNotSupported 0x3011
25
26/* missing from capicmd.h */
27#define CAPI_CONNECT_IND_BASELEN (CAPI_MSG_BASELEN+4+2+8*1)
28#define CAPI_CONNECT_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN+4+3*1)
29#define CAPI_CONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN+4+1)
30#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN+4+1)
31#define CAPI_DATA_B3_REQ_LEN64 (CAPI_MSG_BASELEN+4+4+2+2+2+8)
32#define CAPI_DATA_B3_CONF_LEN (CAPI_MSG_BASELEN+4+2+2)
33#define CAPI_DISCONNECT_IND_LEN (CAPI_MSG_BASELEN+4+2)
34#define CAPI_DISCONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN+4+2+1)
35#define CAPI_FACILITY_CONF_BASELEN (CAPI_MSG_BASELEN+4+2+2+1)
36/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
37#define CAPI_STDCONF_LEN (CAPI_MSG_BASELEN+4+2)
38
39#define CAPI_FACILITY_HANDSET 0x0000
40#define CAPI_FACILITY_DTMF 0x0001
41#define CAPI_FACILITY_V42BIS 0x0002
42#define CAPI_FACILITY_SUPPSVC 0x0003
43#define CAPI_FACILITY_WAKEUP 0x0004
44#define CAPI_FACILITY_LI 0x0005
45
46#define CAPI_SUPPSVC_GETSUPPORTED 0x0000
47
48/* missing from capiutil.h */
49#define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9)
50#define CAPIMSG_NCCI_PART(m) CAPIMSG_U16(m, 10)
51#define CAPIMSG_HANDLE_REQ(m) CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
52#define CAPIMSG_FLAGS(m) CAPIMSG_U16(m, 20)
53#define CAPIMSG_SETCONTROLLER(m, contr) capimsg_setu8(m, 8, contr)
54#define CAPIMSG_SETPLCI_PART(m, plci) capimsg_setu8(m, 9, plci)
55#define CAPIMSG_SETNCCI_PART(m, ncci) capimsg_setu16(m, 10, ncci)
56#define CAPIMSG_SETFLAGS(m, flags) capimsg_setu16(m, 20, flags)
57
58/* parameters with differing location in DATA_B3_CONF/_RESP: */
59#define CAPIMSG_SETHANDLE_CONF(m, handle) capimsg_setu16(m, 12, handle)
60#define CAPIMSG_SETINFO_CONF(m, info) capimsg_setu16(m, 14, info)
61
62/* Flags (DATA_B3_REQ/_IND) */
63#define CAPI_FLAGS_DELIVERY_CONFIRMATION 0x04
64#define CAPI_FLAGS_RESERVED (~0x1f)
65
66/* buffer sizes */
67#define MAX_BC_OCTETS 11
68#define MAX_HLC_OCTETS 3
69#define MAX_NUMBER_DIGITS 20
70#define MAX_FMT_IE_LEN 20
71
72/* values for gigaset_capi_appl.connected */
73#define APCONN_NONE 0 /* inactive/listening */
74#define APCONN_SETUP 1 /* connecting */
75#define APCONN_ACTIVE 2 /* B channel up */
76
77/* registered application data structure */
78struct gigaset_capi_appl {
79 struct list_head ctrlist;
80 struct gigaset_capi_appl *bcnext;
81 u16 id;
82 u16 nextMessageNumber;
83 u32 listenInfoMask;
84 u32 listenCIPmask;
85 int connected;
86};
87
88/* CAPI specific controller data structure */
89struct gigaset_capi_ctr {
90 struct capi_ctr ctr;
91 struct list_head appls;
92 struct sk_buff_head sendqueue;
93 atomic_t sendqlen;
94 /* two _cmsg structures possibly used concurrently: */
95 _cmsg hcmsg; /* for message composition triggered from hardware */
96 _cmsg acmsg; /* for dissection of messages sent from application */
97 u8 bc_buf[MAX_BC_OCTETS+1];
98 u8 hlc_buf[MAX_HLC_OCTETS+1];
99 u8 cgpty_buf[MAX_NUMBER_DIGITS+3];
100 u8 cdpty_buf[MAX_NUMBER_DIGITS+2];
101};
102
103/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
104static struct {
105 u8 *bc;
106 u8 *hlc;
107} cip2bchlc[] = {
108 [1] = { "8090A3", NULL },
109 /* Speech (A-law) */
110 [2] = { "8890", NULL },
111 /* Unrestricted digital information */
112 [3] = { "8990", NULL },
113 /* Restricted digital information */
114 [4] = { "9090A3", NULL },
115 /* 3,1 kHz audio (A-law) */
116 [5] = { "9190", NULL },
117 /* 7 kHz audio */
118 [6] = { "9890", NULL },
119 /* Video */
120 [7] = { "88C0C6E6", NULL },
121 /* Packet mode */
122 [8] = { "8890218F", NULL },
123 /* 56 kbit/s rate adaptation */
124 [9] = { "9190A5", NULL },
125 /* Unrestricted digital information with tones/announcements */
126 [16] = { "8090A3", "9181" },
127 /* Telephony */
128 [17] = { "9090A3", "9184" },
129 /* Group 2/3 facsimile */
130 [18] = { "8890", "91A1" },
131 /* Group 4 facsimile Class 1 */
132 [19] = { "8890", "91A4" },
133 /* Teletex service basic and mixed mode
134 and Group 4 facsimile service Classes II and III */
135 [20] = { "8890", "91A8" },
136 /* Teletex service basic and processable mode */
137 [21] = { "8890", "91B1" },
138 /* Teletex service basic mode */
139 [22] = { "8890", "91B2" },
140 /* International interworking for Videotex */
141 [23] = { "8890", "91B5" },
142 /* Telex */
143 [24] = { "8890", "91B8" },
144 /* Message Handling Systems in accordance with X.400 */
145 [25] = { "8890", "91C1" },
146 /* OSI application in accordance with X.200 */
147 [26] = { "9190A5", "9181" },
148 /* 7 kHz telephony */
149 [27] = { "9190A5", "916001" },
150 /* Video telephony, first connection */
151 [28] = { "8890", "916002" },
152 /* Video telephony, second connection */
153};
154
155/*
156 * helper functions
157 * ================
158 */
159
160/*
161 * emit unsupported parameter warning
162 */
163static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
164 char *msgname, char *paramname)
165{
166 if (param && *param)
167 dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
168 msgname, paramname);
169}
170
171/*
172 * check for legal hex digit
173 */
174static inline int ishexdigit(char c)
175{
176 if (c >= '0' && c <= '9')
177 return 1;
178 if (c >= 'A' && c <= 'F')
179 return 1;
180 if (c >= 'a' && c <= 'f')
181 return 1;
182 return 0;
183}
184
185/*
186 * convert hex to binary
187 */
188static inline u8 hex2bin(char c)
189{
190 int result = c & 0x0f;
191 if (c & 0x40)
192 result += 9;
193 return result;
194}
195
196/*
197 * convert an IE from Gigaset hex string to ETSI binary representation
198 * including length byte
199 * return value: result length, -1 on error
200 */
201static int encode_ie(char *in, u8 *out, int maxlen)
202{
203 int l = 0;
204 while (*in) {
205 if (!ishexdigit(in[0]) || !ishexdigit(in[1]) || l >= maxlen)
206 return -1;
207 out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
208 in += 2;
209 }
210 out[0] = l;
211 return l;
212}
213
214/*
215 * convert an IE from ETSI binary representation including length byte
216 * to Gigaset hex string
217 */
218static void decode_ie(u8 *in, char *out)
219{
220 int i = *in;
221 while (i-- > 0) {
222 /* ToDo: conversion to upper case necessary? */
223 *out++ = toupper(hex_asc_hi(*++in));
224 *out++ = toupper(hex_asc_lo(*in));
225 }
226}
227
228/*
229 * retrieve application data structure for an application ID
230 */
231static inline struct gigaset_capi_appl *
232get_appl(struct gigaset_capi_ctr *iif, u16 appl)
233{
234 struct gigaset_capi_appl *ap;
235
236 list_for_each_entry(ap, &iif->appls, ctrlist)
237 if (ap->id == appl)
238 return ap;
239 return NULL;
240}
241
242/*
243 * dump CAPI message to kernel messages for debugging
244 */
245static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
246{
247#ifdef CONFIG_GIGASET_DEBUG
248 _cdebbuf *cdb;
249
250 if (!(gigaset_debuglevel & level))
251 return;
252
253 cdb = capi_cmsg2str(p);
254 if (cdb) {
255 gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
256 cdebbuf_free(cdb);
257 } else {
258 gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
259 capi_cmd2str(p->Command, p->Subcommand));
260 }
261#endif
262}
263
264static inline void dump_rawmsg(enum debuglevel level, const char *tag,
265 unsigned char *data)
266{
267#ifdef CONFIG_GIGASET_DEBUG
268 char *dbgline;
269 int i, l;
270
271 if (!(gigaset_debuglevel & level))
272 return;
273
274 l = CAPIMSG_LEN(data);
275 if (l < 12) {
276 gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
277 return;
278 }
279 gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
280 tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
281 CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
282 CAPIMSG_CONTROL(data));
283 l -= 12;
284 dbgline = kmalloc(3*l, GFP_ATOMIC);
285 if (!dbgline)
286 return;
287 for (i = 0; i < l; i++) {
288 dbgline[3*i] = hex_asc_hi(data[12+i]);
289 dbgline[3*i+1] = hex_asc_lo(data[12+i]);
290 dbgline[3*i+2] = ' ';
291 }
292 dbgline[3*l-1] = '\0';
293 gig_dbg(level, " %s", dbgline);
294 kfree(dbgline);
295 if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
296 (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
297 CAPIMSG_SUBCOMMAND(data) == CAPI_IND) &&
298 CAPIMSG_DATALEN(data) > 0) {
299 l = CAPIMSG_DATALEN(data);
300 dbgline = kmalloc(3*l, GFP_ATOMIC);
301 if (!dbgline)
302 return;
303 data += CAPIMSG_LEN(data);
304 for (i = 0; i < l; i++) {
305 dbgline[3*i] = hex_asc_hi(data[i]);
306 dbgline[3*i+1] = hex_asc_lo(data[i]);
307 dbgline[3*i+2] = ' ';
308 }
309 dbgline[3*l-1] = '\0';
310 gig_dbg(level, " %s", dbgline);
311 kfree(dbgline);
312 }
313#endif
314}
315
316/*
317 * format CAPI IE as string
318 */
319
320static const char *format_ie(const char *ie)
321{
322 static char result[3*MAX_FMT_IE_LEN];
323 int len, count;
324 char *pout = result;
325
326 if (!ie)
327 return "NULL";
328
329 count = len = ie[0];
330 if (count > MAX_FMT_IE_LEN)
331 count = MAX_FMT_IE_LEN-1;
332 while (count--) {
333 *pout++ = hex_asc_hi(*++ie);
334 *pout++ = hex_asc_lo(*ie);
335 *pout++ = ' ';
336 }
337 if (len > MAX_FMT_IE_LEN) {
338 *pout++ = '.';
339 *pout++ = '.';
340 *pout++ = '.';
341 }
342 *--pout = 0;
343 return result;
344}
345
346
347/*
348 * driver interface functions
349 * ==========================
350 */
351
352/**
353 * gigaset_skb_sent() - acknowledge transmission of outgoing skb
354 * @bcs: B channel descriptor structure.
355 * @skb: sent data.
356 *
357 * Called by hardware module {bas,ser,usb}_gigaset when the data in a
358 * skb has been successfully sent, for signalling completion to the LL.
359 */
360void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
361{
362 struct cardstate *cs = bcs->cs;
363 struct gigaset_capi_ctr *iif = cs->iif;
364 struct gigaset_capi_appl *ap = bcs->ap;
365 unsigned char *req = skb_mac_header(dskb);
366 struct sk_buff *cskb;
367 u16 flags;
368
369 /* update statistics */
370 ++bcs->trans_up;
371
372 if (!ap) {
373 dev_err(cs->dev, "%s: no application\n", __func__);
374 return;
375 }
376
377 /* don't send further B3 messages if disconnected */
378 if (ap->connected < APCONN_ACTIVE) {
379 gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack");
380 return;
381 }
382
383 /* ToDo: honor unset "delivery confirmation" bit */
384 flags = CAPIMSG_FLAGS(req);
385
386 /* build DATA_B3_CONF message */
387 cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
388 if (!cskb) {
389 dev_err(cs->dev, "%s: out of memory\n", __func__);
390 return;
391 }
392 /* frequent message, avoid _cmsg overhead */
393 CAPIMSG_SETLEN(cskb->data, CAPI_DATA_B3_CONF_LEN);
394 CAPIMSG_SETAPPID(cskb->data, ap->id);
395 CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3);
396 CAPIMSG_SETSUBCOMMAND(cskb->data, CAPI_CONF);
397 CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req));
398 CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr);
399 CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1);
400 CAPIMSG_SETNCCI_PART(cskb->data, 1);
401 CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req));
402 if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION)
403 CAPIMSG_SETINFO_CONF(cskb->data,
404 CapiFlagsNotSupportedByProtocol);
405 else
406 CAPIMSG_SETINFO_CONF(cskb->data, CAPI_NOERROR);
407
408 /* emit message */
409 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_CONF", cskb->data);
410 capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
411}
412EXPORT_SYMBOL_GPL(gigaset_skb_sent);
413
414/**
415 * gigaset_skb_rcvd() - pass received skb to LL
416 * @bcs: B channel descriptor structure.
417 * @skb: received data.
418 *
419 * Called by hardware module {bas,ser,usb}_gigaset when user data has
420 * been successfully received, for passing to the LL.
421 * Warning: skb must not be accessed anymore!
422 */
423void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
424{
425 struct cardstate *cs = bcs->cs;
426 struct gigaset_capi_ctr *iif = cs->iif;
427 struct gigaset_capi_appl *ap = bcs->ap;
428 int len = skb->len;
429
430 /* update statistics */
431 bcs->trans_down++;
432
433 if (!ap) {
434 dev_err(cs->dev, "%s: no application\n", __func__);
435 return;
436 }
437
438 /* don't send further B3 messages if disconnected */
439 if (ap->connected < APCONN_ACTIVE) {
440 gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
441 dev_kfree_skb_any(skb);
442 return;
443 }
444
445 /*
446 * prepend DATA_B3_IND message to payload
447 * Parameters: NCCI = 1, all others 0/unused
448 * frequent message, avoid _cmsg overhead
449 */
450 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
451 CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
452 CAPIMSG_SETAPPID(skb->data, ap->id);
453 CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
454 CAPIMSG_SETSUBCOMMAND(skb->data, CAPI_IND);
455 CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
456 CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
457 CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
458 CAPIMSG_SETNCCI_PART(skb->data, 1);
459 /* Data parameter not used */
460 CAPIMSG_SETDATALEN(skb->data, len);
461 /* Data handle parameter not used */
462 CAPIMSG_SETFLAGS(skb->data, 0);
463 /* Data64 parameter not present */
464
465 /* emit message */
466 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_IND", skb->data);
467 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
468}
469EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
470
471/**
472 * gigaset_isdn_rcv_err() - signal receive error
473 * @bcs: B channel descriptor structure.
474 *
475 * Called by hardware module {bas,ser,usb}_gigaset when a receive error
476 * has occurred, for signalling to the LL.
477 */
478void gigaset_isdn_rcv_err(struct bc_state *bcs)
479{
480 /* if currently ignoring packets, just count down */
481 if (bcs->ignore) {
482 bcs->ignore--;
483 return;
484 }
485
486 /* update statistics */
487 bcs->corrupted++;
488
489 /* ToDo: signal error -> LL */
490}
491EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
492
493/**
494 * gigaset_isdn_icall() - signal incoming call
495 * @at_state: connection state structure.
496 *
497 * Called by main module at tasklet level to notify the LL that an incoming
498 * call has been received. @at_state contains the parameters of the call.
499 *
500 * Return value: call disposition (ICALL_*)
501 */
502int gigaset_isdn_icall(struct at_state_t *at_state)
503{
504 struct cardstate *cs = at_state->cs;
505 struct bc_state *bcs = at_state->bcs;
506 struct gigaset_capi_ctr *iif = cs->iif;
507 struct gigaset_capi_appl *ap;
508 u32 actCIPmask;
509 struct sk_buff *skb;
510 unsigned int msgsize;
511 int i;
512
513 /*
514 * ToDo: signal calls without a free B channel, too
515 * (requires a u8 handle for the at_state structure that can
516 * be stored in the PLCI and used in the CONNECT_RESP message
517 * handler to retrieve it)
518 */
519 if (!bcs)
520 return ICALL_IGNORE;
521
522 /* prepare CONNECT_IND message, using B channel number as PLCI */
523 capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
524 iif->ctr.cnr | ((bcs->channel + 1) << 8));
525
526 /* minimum size, all structs empty */
527 msgsize = CAPI_CONNECT_IND_BASELEN;
528
529 /* Bearer Capability (mandatory) */
530 if (at_state->str_var[STR_ZBC]) {
531 /* pass on BC from Gigaset */
532 if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
533 MAX_BC_OCTETS) < 0) {
534 dev_warn(cs->dev, "RING ignored - bad BC %s\n",
535 at_state->str_var[STR_ZBC]);
536 return ICALL_IGNORE;
537 }
538
539 /* look up corresponding CIP value */
540 iif->hcmsg.CIPValue = 0; /* default if nothing found */
541 for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
542 if (cip2bchlc[i].bc != NULL &&
543 cip2bchlc[i].hlc == NULL &&
544 !strcmp(cip2bchlc[i].bc,
545 at_state->str_var[STR_ZBC])) {
546 iif->hcmsg.CIPValue = i;
547 break;
548 }
549 } else {
550 /* no BC (internal call): assume CIP 1 (speech, A-law) */
551 iif->hcmsg.CIPValue = 1;
552 encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
553 }
554 iif->hcmsg.BC = iif->bc_buf;
555 msgsize += iif->hcmsg.BC[0];
556
557 /* High Layer Compatibility (optional) */
558 if (at_state->str_var[STR_ZHLC]) {
559 /* pass on HLC from Gigaset */
560 if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
561 MAX_HLC_OCTETS) < 0) {
562 dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
563 at_state->str_var[STR_ZHLC]);
564 return ICALL_IGNORE;
565 }
566 iif->hcmsg.HLC = iif->hlc_buf;
567 msgsize += iif->hcmsg.HLC[0];
568
569 /* look up corresponding CIP value */
570 /* keep BC based CIP value if none found */
571 if (at_state->str_var[STR_ZBC])
572 for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
573 if (cip2bchlc[i].hlc != NULL &&
574 !strcmp(cip2bchlc[i].hlc,
575 at_state->str_var[STR_ZHLC]) &&
576 !strcmp(cip2bchlc[i].bc,
577 at_state->str_var[STR_ZBC])) {
578 iif->hcmsg.CIPValue = i;
579 break;
580 }
581 }
582
583 /* Called Party Number (optional) */
584 if (at_state->str_var[STR_ZCPN]) {
585 i = strlen(at_state->str_var[STR_ZCPN]);
586 if (i > MAX_NUMBER_DIGITS) {
587 dev_warn(cs->dev, "RING ignored - bad number %s\n",
588 at_state->str_var[STR_ZBC]);
589 return ICALL_IGNORE;
590 }
591 iif->cdpty_buf[0] = i + 1;
592 iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
593 memcpy(iif->cdpty_buf+2, at_state->str_var[STR_ZCPN], i);
594 iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
595 msgsize += iif->hcmsg.CalledPartyNumber[0];
596 }
597
598 /* Calling Party Number (optional) */
599 if (at_state->str_var[STR_NMBR]) {
600 i = strlen(at_state->str_var[STR_NMBR]);
601 if (i > MAX_NUMBER_DIGITS) {
602 dev_warn(cs->dev, "RING ignored - bad number %s\n",
603 at_state->str_var[STR_ZBC]);
604 return ICALL_IGNORE;
605 }
606 iif->cgpty_buf[0] = i + 2;
607 iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
608 iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
609 memcpy(iif->cgpty_buf+3, at_state->str_var[STR_NMBR], i);
610 iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
611 msgsize += iif->hcmsg.CallingPartyNumber[0];
612 }
613
614 /* remaining parameters (not supported, always left NULL):
615 * - CalledPartySubaddress
616 * - CallingPartySubaddress
617 * - AdditionalInfo
618 * - BChannelinformation
619 * - Keypadfacility
620 * - Useruserdata
621 * - Facilitydataarray
622 */
623
624 gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
625 iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
626 format_ie(iif->hcmsg.BC));
627 gig_dbg(DEBUG_CMD, "icall: HLC %s",
628 format_ie(iif->hcmsg.HLC));
629 gig_dbg(DEBUG_CMD, "icall: CgPty %s",
630 format_ie(iif->hcmsg.CallingPartyNumber));
631 gig_dbg(DEBUG_CMD, "icall: CdPty %s",
632 format_ie(iif->hcmsg.CalledPartyNumber));
633
634 /* scan application list for matching listeners */
635 bcs->ap = NULL;
636 actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
637 list_for_each_entry(ap, &iif->appls, ctrlist)
638 if (actCIPmask & ap->listenCIPmask) {
639 /* build CONNECT_IND message for this application */
640 iif->hcmsg.ApplId = ap->id;
641 iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
642
643 skb = alloc_skb(msgsize, GFP_ATOMIC);
644 if (!skb) {
645 dev_err(cs->dev, "%s: out of memory\n",
646 __func__);
647 break;
648 }
649 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
650 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
651
652 /* add to listeners on this B channel, update state */
653 ap->bcnext = bcs->ap;
654 bcs->ap = ap;
655 bcs->chstate |= CHS_NOTIFY_LL;
656 ap->connected = APCONN_SETUP;
657
658 /* emit message */
659 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
660 }
661
662 /*
663 * Return "accept" if any listeners.
664 * Gigaset will send ALERTING.
665 * There doesn't seem to be a way to avoid this.
666 */
667 return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
668}
669
670/*
671 * send a DISCONNECT_IND message to an application
672 * does not sleep, clobbers the controller's hcmsg structure
673 */
674static void send_disconnect_ind(struct bc_state *bcs,
675 struct gigaset_capi_appl *ap, u16 reason)
676{
677 struct cardstate *cs = bcs->cs;
678 struct gigaset_capi_ctr *iif = cs->iif;
679 struct sk_buff *skb;
680
681 if (ap->connected == APCONN_NONE)
682 return;
683
684 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
685 ap->nextMessageNumber++,
686 iif->ctr.cnr | ((bcs->channel + 1) << 8));
687 iif->hcmsg.Reason = reason;
688 skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
689 if (!skb) {
690 dev_err(cs->dev, "%s: out of memory\n", __func__);
691 return;
692 }
693 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN));
694 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
695 ap->connected = APCONN_NONE;
696 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
697}
698
699/*
700 * send a DISCONNECT_B3_IND message to an application
701 * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
702 * does not sleep, clobbers the controller's hcmsg structure
703 */
704static void send_disconnect_b3_ind(struct bc_state *bcs,
705 struct gigaset_capi_appl *ap)
706{
707 struct cardstate *cs = bcs->cs;
708 struct gigaset_capi_ctr *iif = cs->iif;
709 struct sk_buff *skb;
710
711 /* nothing to do if no logical connection active */
712 if (ap->connected < APCONN_ACTIVE)
713 return;
714 ap->connected = APCONN_SETUP;
715
716 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
717 ap->nextMessageNumber++,
718 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
719 skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
720 if (!skb) {
721 dev_err(cs->dev, "%s: out of memory\n", __func__);
722 return;
723 }
724 capi_cmsg2message(&iif->hcmsg,
725 __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN));
726 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
727 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
728}
729
730/**
731 * gigaset_isdn_connD() - signal D channel connect
732 * @bcs: B channel descriptor structure.
733 *
734 * Called by main module at tasklet level to notify the LL that the D channel
735 * connection has been established.
736 */
737void gigaset_isdn_connD(struct bc_state *bcs)
738{
739 struct cardstate *cs = bcs->cs;
740 struct gigaset_capi_ctr *iif = cs->iif;
741 struct gigaset_capi_appl *ap = bcs->ap;
742 struct sk_buff *skb;
743 unsigned int msgsize;
744
745 if (!ap) {
746 dev_err(cs->dev, "%s: no application\n", __func__);
747 return;
748 }
749 while (ap->bcnext) {
750 /* this should never happen */
751 dev_warn(cs->dev, "%s: dropping extra application %u\n",
752 __func__, ap->bcnext->id);
753 send_disconnect_ind(bcs, ap->bcnext,
754 CapiCallGivenToOtherApplication);
755 ap->bcnext = ap->bcnext->bcnext;
756 }
757 if (ap->connected == APCONN_NONE) {
758 dev_warn(cs->dev, "%s: application %u not connected\n",
759 __func__, ap->id);
760 return;
761 }
762
763 /* prepare CONNECT_ACTIVE_IND message
764 * Note: LLC not supported by device
765 */
766 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
767 ap->nextMessageNumber++,
768 iif->ctr.cnr | ((bcs->channel + 1) << 8));
769
770 /* minimum size, all structs empty */
771 msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
772
773 /* ToDo: set parameter: Connected number
774 * (requires ev-layer state machine extension to collect
775 * ZCON device reply)
776 */
777
778 /* build and emit CONNECT_ACTIVE_IND message */
779 skb = alloc_skb(msgsize, GFP_ATOMIC);
780 if (!skb) {
781 dev_err(cs->dev, "%s: out of memory\n", __func__);
782 return;
783 }
784 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
785 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
786 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
787}
788
789/**
790 * gigaset_isdn_hupD() - signal D channel hangup
791 * @bcs: B channel descriptor structure.
792 *
793 * Called by main module at tasklet level to notify the LL that the D channel
794 * connection has been shut down.
795 */
796void gigaset_isdn_hupD(struct bc_state *bcs)
797{
798 struct gigaset_capi_appl *ap;
799
800 /*
801 * ToDo: pass on reason code reported by device
802 * (requires ev-layer state machine extension to collect
803 * ZCAU device reply)
804 */
805 for (ap = bcs->ap; ap != NULL; ap = ap->bcnext) {
806 send_disconnect_b3_ind(bcs, ap);
807 send_disconnect_ind(bcs, ap, 0);
808 }
809 bcs->ap = NULL;
810}
811
812/**
813 * gigaset_isdn_connB() - signal B channel connect
814 * @bcs: B channel descriptor structure.
815 *
816 * Called by main module at tasklet level to notify the LL that the B channel
817 * connection has been established.
818 */
819void gigaset_isdn_connB(struct bc_state *bcs)
820{
821 struct cardstate *cs = bcs->cs;
822 struct gigaset_capi_ctr *iif = cs->iif;
823 struct gigaset_capi_appl *ap = bcs->ap;
824 struct sk_buff *skb;
825 unsigned int msgsize;
826 u8 command;
827
828 if (!ap) {
829 dev_err(cs->dev, "%s: no application\n", __func__);
830 return;
831 }
832 while (ap->bcnext) {
833 /* this should never happen */
834 dev_warn(cs->dev, "%s: dropping extra application %u\n",
835 __func__, ap->bcnext->id);
836 send_disconnect_ind(bcs, ap->bcnext,
837 CapiCallGivenToOtherApplication);
838 ap->bcnext = ap->bcnext->bcnext;
839 }
840 if (!ap->connected) {
841 dev_warn(cs->dev, "%s: application %u not connected\n",
842 __func__, ap->id);
843 return;
844 }
845
846 /*
847 * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
848 * otherwise we have to emit CONNECT_B3_IND first, and follow up with
849 * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
850 * Parameters in both cases always: NCCI = 1, NCPI empty
851 */
852 if (ap->connected >= APCONN_ACTIVE) {
853 command = CAPI_CONNECT_B3_ACTIVE;
854 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
855 } else {
856 command = CAPI_CONNECT_B3;
857 msgsize = CAPI_CONNECT_B3_IND_BASELEN;
858 }
859 capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
860 ap->nextMessageNumber++,
861 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
862 skb = alloc_skb(msgsize, GFP_ATOMIC);
863 if (!skb) {
864 dev_err(cs->dev, "%s: out of memory\n", __func__);
865 return;
866 }
867 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
868 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
869 ap->connected = APCONN_ACTIVE;
870 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
871}
872
873/**
874 * gigaset_isdn_hupB() - signal B channel hangup
875 * @bcs: B channel descriptor structure.
876 *
877 * Called by main module to notify the LL that the B channel connection has
878 * been shut down.
879 */
880void gigaset_isdn_hupB(struct bc_state *bcs)
881{
882 struct cardstate *cs = bcs->cs;
883 struct gigaset_capi_appl *ap = bcs->ap;
884
885 /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
886
887 if (!ap) {
888 dev_err(cs->dev, "%s: no application\n", __func__);
889 return;
890 }
891
892 send_disconnect_b3_ind(bcs, ap);
893}
894
895/**
896 * gigaset_isdn_start() - signal device availability
897 * @cs: device descriptor structure.
898 *
899 * Called by main module to notify the LL that the device is available for
900 * use.
901 */
902void gigaset_isdn_start(struct cardstate *cs)
903{
904 struct gigaset_capi_ctr *iif = cs->iif;
905
906 /* fill profile data: manufacturer name */
907 strcpy(iif->ctr.manu, "Siemens");
908 /* CAPI and device version */
909 iif->ctr.version.majorversion = 2; /* CAPI 2.0 */
910 iif->ctr.version.minorversion = 0;
911 /* ToDo: check/assert cs->gotfwver? */
912 iif->ctr.version.majormanuversion = cs->fwver[0];
913 iif->ctr.version.minormanuversion = cs->fwver[1];
914 /* number of B channels supported */
915 iif->ctr.profile.nbchannel = cs->channels;
916 /* global options: internal controller, supplementary services */
917 iif->ctr.profile.goptions = 0x11;
918 /* B1 protocols: 64 kbit/s HDLC or transparent */
919 iif->ctr.profile.support1 = 0x03;
920 /* B2 protocols: transparent only */
921 /* ToDo: X.75 SLP ? */
922 iif->ctr.profile.support2 = 0x02;
923 /* B3 protocols: transparent only */
924 iif->ctr.profile.support3 = 0x01;
925 /* no serial number */
926 strcpy(iif->ctr.serial, "0");
927 capi_ctr_ready(&iif->ctr);
928}
929
930/**
931 * gigaset_isdn_stop() - signal device unavailability
932 * @cs: device descriptor structure.
933 *
934 * Called by main module to notify the LL that the device is no longer
935 * available for use.
936 */
937void gigaset_isdn_stop(struct cardstate *cs)
938{
939 struct gigaset_capi_ctr *iif = cs->iif;
940 capi_ctr_down(&iif->ctr);
941}
942
943/*
944 * kernel CAPI callback methods
945 * ============================
946 */
947
948/*
949 * load firmware
950 */
951static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
952{
953 struct cardstate *cs = ctr->driverdata;
954
955 /* AVM specific operation, not needed for Gigaset -- ignore */
956 dev_notice(cs->dev, "load_firmware ignored\n");
957
958 return 0;
959}
960
961/*
962 * reset (deactivate) controller
963 */
964static void gigaset_reset_ctr(struct capi_ctr *ctr)
965{
966 struct cardstate *cs = ctr->driverdata;
967
968 /* AVM specific operation, not needed for Gigaset -- ignore */
969 dev_notice(cs->dev, "reset_ctr ignored\n");
970}
971
972/*
973 * register CAPI application
974 */
975static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
976 capi_register_params *rp)
977{
978 struct gigaset_capi_ctr *iif
979 = container_of(ctr, struct gigaset_capi_ctr, ctr);
980 struct cardstate *cs = ctr->driverdata;
981 struct gigaset_capi_appl *ap;
982
983 list_for_each_entry(ap, &iif->appls, ctrlist)
984 if (ap->id == appl) {
985 dev_notice(cs->dev,
986 "application %u already registered\n", appl);
987 return;
988 }
989
990 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
991 if (!ap) {
992 dev_err(cs->dev, "%s: out of memory\n", __func__);
993 return;
994 }
995 ap->id = appl;
996
997 list_add(&ap->ctrlist, &iif->appls);
998}
999
1000/*
1001 * release CAPI application
1002 */
1003static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
1004{
1005 struct gigaset_capi_ctr *iif
1006 = container_of(ctr, struct gigaset_capi_ctr, ctr);
1007 struct cardstate *cs = iif->ctr.driverdata;
1008 struct gigaset_capi_appl *ap, *tmp;
1009
1010 list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
1011 if (ap->id == appl) {
1012 if (ap->connected != APCONN_NONE) {
1013 dev_err(cs->dev,
1014 "%s: application %u still connected\n",
1015 __func__, ap->id);
1016 /* ToDo: clear active connection */
1017 }
1018 list_del(&ap->ctrlist);
1019 kfree(ap);
1020 }
1021
1022}
1023
1024/*
1025 * =====================================================================
1026 * outgoing CAPI message handler
1027 * =====================================================================
1028 */
1029
1030/*
1031 * helper function: emit reply message with given Info value
1032 */
1033static void send_conf(struct gigaset_capi_ctr *iif,
1034 struct gigaset_capi_appl *ap,
1035 struct sk_buff *skb,
1036 u16 info)
1037{
1038 /*
1039 * _CONF replies always only have NCCI and Info parameters
1040 * so they'll fit into the _REQ message skb
1041 */
1042 capi_cmsg_answer(&iif->acmsg);
1043 iif->acmsg.Info = info;
1044 capi_cmsg2message(&iif->acmsg, skb->data);
1045 __skb_trim(skb, CAPI_STDCONF_LEN);
1046 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1047 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
1048}
1049
1050/*
1051 * process FACILITY_REQ message
1052 */
1053static void do_facility_req(struct gigaset_capi_ctr *iif,
1054 struct gigaset_capi_appl *ap,
1055 struct sk_buff *skb)
1056{
1057 struct cardstate *cs = iif->ctr.driverdata;
1058 _cmsg *cmsg = &iif->acmsg;
1059 struct sk_buff *cskb;
1060 u8 *pparam;
1061 unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
1062 u16 function, info;
1063 static u8 confparam[10]; /* max. 9 octets + length byte */
1064
1065 /* decode message */
1066 capi_message2cmsg(cmsg, skb->data);
1067 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1068
1069 /*
1070 * Facility Request Parameter is not decoded by capi_message2cmsg()
1071 * encoding depends on Facility Selector
1072 */
1073 switch (cmsg->FacilitySelector) {
1074 case CAPI_FACILITY_DTMF: /* ToDo */
1075 info = CapiFacilityNotSupported;
1076 confparam[0] = 2; /* length */
1077 /* DTMF information: Unknown DTMF request */
1078 capimsg_setu16(confparam, 1, 2);
1079 break;
1080
1081 case CAPI_FACILITY_V42BIS: /* not supported */
1082 info = CapiFacilityNotSupported;
1083 confparam[0] = 2; /* length */
1084 /* V.42 bis information: not available */
1085 capimsg_setu16(confparam, 1, 1);
1086 break;
1087
1088 case CAPI_FACILITY_SUPPSVC:
1089 /* decode Function parameter */
1090 pparam = cmsg->FacilityRequestParameter;
1091 if (pparam == NULL || *pparam < 2) {
1092 dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
1093 "Facility Request Parameter");
1094 send_conf(iif, ap, skb, CapiIllMessageParmCoding);
1095 return;
1096 }
1097 function = CAPIMSG_U16(pparam, 1);
1098 switch (function) {
1099 case CAPI_SUPPSVC_GETSUPPORTED:
1100 info = CapiSuccess;
1101 /* Supplementary Service specific parameter */
1102 confparam[3] = 6; /* length */
1103 /* Supplementary services info: Success */
1104 capimsg_setu16(confparam, 4, CapiSuccess);
1105 /* Supported Services: none */
1106 capimsg_setu32(confparam, 6, 0);
1107 break;
1108 /* ToDo: add supported services */
1109 default:
1110 info = CapiFacilitySpecificFunctionNotSupported;
1111 /* Supplementary Service specific parameter */
1112 confparam[3] = 2; /* length */
1113 /* Supplementary services info: not supported */
1114 capimsg_setu16(confparam, 4,
1115 CapiSupplementaryServiceNotSupported);
1116 }
1117
1118 /* Facility confirmation parameter */
1119 confparam[0] = confparam[3] + 3; /* total length */
1120 /* Function: copy from _REQ message */
1121 capimsg_setu16(confparam, 1, function);
1122 /* Supplementary Service specific parameter already set above */
1123 break;
1124
1125 case CAPI_FACILITY_WAKEUP: /* ToDo */
1126 info = CapiFacilityNotSupported;
1127 confparam[0] = 2; /* length */
1128 /* Number of accepted awake request parameters: 0 */
1129 capimsg_setu16(confparam, 1, 0);
1130 break;
1131
1132 default:
1133 info = CapiFacilityNotSupported;
1134 confparam[0] = 0; /* empty struct */
1135 }
1136
1137 /* send FACILITY_CONF with given Info and confirmation parameter */
1138 capi_cmsg_answer(cmsg);
1139 cmsg->Info = info;
1140 cmsg->FacilityConfirmationParameter = confparam;
1141 msgsize += confparam[0]; /* length */
1142 cskb = alloc_skb(msgsize, GFP_ATOMIC);
1143 if (!cskb) {
1144 dev_err(cs->dev, "%s: out of memory\n", __func__);
1145 return;
1146 }
1147 capi_cmsg2message(cmsg, __skb_put(cskb, msgsize));
1148 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1149 capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
1150}
1151
1152
1153/*
1154 * process LISTEN_REQ message
1155 * just store the masks in the application data structure
1156 */
1157static void do_listen_req(struct gigaset_capi_ctr *iif,
1158 struct gigaset_capi_appl *ap,
1159 struct sk_buff *skb)
1160{
1161 /* decode message */
1162 capi_message2cmsg(&iif->acmsg, skb->data);
1163 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1164
1165 /* store listening parameters */
1166 ap->listenInfoMask = iif->acmsg.InfoMask;
1167 ap->listenCIPmask = iif->acmsg.CIPmask;
1168 send_conf(iif, ap, skb, CapiSuccess);
1169}
1170
1171/*
1172 * process ALERT_REQ message
1173 * nothing to do, Gigaset always alerts anyway
1174 */
1175static void do_alert_req(struct gigaset_capi_ctr *iif,
1176 struct gigaset_capi_appl *ap,
1177 struct sk_buff *skb)
1178{
1179 /* decode message */
1180 capi_message2cmsg(&iif->acmsg, skb->data);
1181 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1182 send_conf(iif, ap, skb, CapiAlertAlreadySent);
1183}
1184
1185/*
1186 * process CONNECT_REQ message
1187 * allocate a B channel, prepare dial commands, queue a DIAL event,
1188 * emit CONNECT_CONF reply
1189 */
1190static void do_connect_req(struct gigaset_capi_ctr *iif,
1191 struct gigaset_capi_appl *ap,
1192 struct sk_buff *skb)
1193{
1194 struct cardstate *cs = iif->ctr.driverdata;
1195 _cmsg *cmsg = &iif->acmsg;
1196 struct bc_state *bcs;
1197 char **commands;
1198 char *s;
1199 u8 *pp;
1200 int i, l;
1201 u16 info;
1202
1203 /* decode message */
1204 capi_message2cmsg(cmsg, skb->data);
1205 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1206
1207 /* get free B channel & construct PLCI */
1208 bcs = gigaset_get_free_channel(cs);
1209 if (!bcs) {
1210 dev_notice(cs->dev, "%s: no B channel available\n",
1211 "CONNECT_REQ");
1212 send_conf(iif, ap, skb, CapiNoPlciAvailable);
1213 return;
1214 }
1215 ap->bcnext = NULL;
1216 bcs->ap = ap;
1217 cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
1218
1219 /* build command table */
1220 commands = kzalloc(AT_NUM*(sizeof *commands), GFP_KERNEL);
1221 if (!commands)
1222 goto oom;
1223
1224 /* encode parameter: Called party number */
1225 pp = cmsg->CalledPartyNumber;
1226 if (pp == NULL || *pp == 0) {
1227 dev_notice(cs->dev, "%s: %s missing\n",
1228 "CONNECT_REQ", "Called party number");
1229 info = CapiIllMessageParmCoding;
1230 goto error;
1231 }
1232 l = *pp++;
1233 /* check type of number/numbering plan byte */
1234 switch (*pp) {
1235 case 0x80: /* unknown type / unknown numbering plan */
1236 case 0x81: /* unknown type / ISDN/Telephony numbering plan */
1237 break;
1238 default: /* others: warn about potential misinterpretation */
1239 dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
1240 "CONNECT_REQ", "Called party number", *pp);
1241 }
1242 pp++;
1243 l--;
1244 /* translate "**" internal call prefix to CTP value */
1245 if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
1246 s = "^SCTP=0\r";
1247 pp += 2;
1248 l -= 2;
1249 } else {
1250 s = "^SCTP=1\r";
1251 }
1252 commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
1253 if (!commands[AT_TYPE])
1254 goto oom;
1255 commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL);
1256 if (!commands[AT_DIAL])
1257 goto oom;
1258 snprintf(commands[AT_DIAL], l+3, "D%.*s\r", l, pp);
1259
1260 /* encode parameter: Calling party number */
1261 pp = cmsg->CallingPartyNumber;
1262 if (pp != NULL && *pp > 0) {
1263 l = *pp++;
1264
1265 /* check type of number/numbering plan byte */
1266 /* ToDo: allow for/handle Ext=1? */
1267 switch (*pp) {
1268 case 0x00: /* unknown type / unknown numbering plan */
1269 case 0x01: /* unknown type / ISDN/Telephony num. plan */
1270 break;
1271 default:
1272 dev_notice(cs->dev,
1273 "%s: %s type/plan 0x%02x unsupported\n",
1274 "CONNECT_REQ", "Calling party number", *pp);
1275 }
1276 pp++;
1277 l--;
1278
1279 /* check presentation indicator */
1280 if (!l) {
1281 dev_notice(cs->dev, "%s: %s IE truncated\n",
1282 "CONNECT_REQ", "Calling party number");
1283 info = CapiIllMessageParmCoding;
1284 goto error;
1285 }
1286 switch (*pp & 0xfc) { /* ignore Screening indicator */
1287 case 0x80: /* Presentation allowed */
1288 s = "^SCLIP=1\r";
1289 break;
1290 case 0xa0: /* Presentation restricted */
1291 s = "^SCLIP=0\r";
1292 break;
1293 default:
1294 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1295 "CONNECT_REQ",
1296 "Presentation/Screening indicator",
1297 *pp);
1298 s = "^SCLIP=1\r";
1299 }
1300 commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
1301 if (!commands[AT_CLIP])
1302 goto oom;
1303 pp++;
1304 l--;
1305
1306 if (l) {
1307 /* number */
1308 commands[AT_MSN] = kmalloc(l+8, GFP_KERNEL);
1309 if (!commands[AT_MSN])
1310 goto oom;
1311 snprintf(commands[AT_MSN], l+8, "^SMSN=%*s\r", l, pp);
1312 }
1313 }
1314
1315 /* check parameter: CIP Value */
1316 if (cmsg->CIPValue > ARRAY_SIZE(cip2bchlc) ||
1317 (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
1318 dev_notice(cs->dev, "%s: unknown CIP value %d\n",
1319 "CONNECT_REQ", cmsg->CIPValue);
1320 info = CapiCipValueUnknown;
1321 goto error;
1322 }
1323
1324 /* check/encode parameter: BC */
1325 if (cmsg->BC && cmsg->BC[0]) {
1326 /* explicit BC overrides CIP */
1327 l = 2*cmsg->BC[0] + 7;
1328 commands[AT_BC] = kmalloc(l, GFP_KERNEL);
1329 if (!commands[AT_BC])
1330 goto oom;
1331 strcpy(commands[AT_BC], "^SBC=");
1332 decode_ie(cmsg->BC, commands[AT_BC]+5);
1333 strcpy(commands[AT_BC] + l - 2, "\r");
1334 } else if (cip2bchlc[cmsg->CIPValue].bc) {
1335 l = strlen(cip2bchlc[cmsg->CIPValue].bc) + 7;
1336 commands[AT_BC] = kmalloc(l, GFP_KERNEL);
1337 if (!commands[AT_BC])
1338 goto oom;
1339 snprintf(commands[AT_BC], l, "^SBC=%s\r",
1340 cip2bchlc[cmsg->CIPValue].bc);
1341 }
1342
1343 /* check/encode parameter: HLC */
1344 if (cmsg->HLC && cmsg->HLC[0]) {
1345 /* explicit HLC overrides CIP */
1346 l = 2*cmsg->HLC[0] + 7;
1347 commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
1348 if (!commands[AT_HLC])
1349 goto oom;
1350 strcpy(commands[AT_HLC], "^SHLC=");
1351 decode_ie(cmsg->HLC, commands[AT_HLC]+5);
1352 strcpy(commands[AT_HLC] + l - 2, "\r");
1353 } else if (cip2bchlc[cmsg->CIPValue].hlc) {
1354 l = strlen(cip2bchlc[cmsg->CIPValue].hlc) + 7;
1355 commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
1356 if (!commands[AT_HLC])
1357 goto oom;
1358 snprintf(commands[AT_HLC], l, "^SHLC=%s\r",
1359 cip2bchlc[cmsg->CIPValue].hlc);
1360 }
1361
1362 /* check/encode parameter: B Protocol */
1363 if (cmsg->BProtocol == CAPI_DEFAULT) {
1364 bcs->proto2 = L2_HDLC;
1365 dev_warn(cs->dev,
1366 "B2 Protocol X.75 SLP unsupported, using Transparent\n");
1367 } else {
1368 switch (cmsg->B1protocol) {
1369 case 0:
1370 bcs->proto2 = L2_HDLC;
1371 break;
1372 case 1:
1373 bcs->proto2 = L2_BITSYNC;
1374 break;
1375 default:
1376 dev_warn(cs->dev,
1377 "B1 Protocol %u unsupported, using Transparent\n",
1378 cmsg->B1protocol);
1379 bcs->proto2 = L2_BITSYNC;
1380 }
1381 if (cmsg->B2protocol != 1)
1382 dev_warn(cs->dev,
1383 "B2 Protocol %u unsupported, using Transparent\n",
1384 cmsg->B2protocol);
1385 if (cmsg->B3protocol != 0)
1386 dev_warn(cs->dev,
1387 "B3 Protocol %u unsupported, using Transparent\n",
1388 cmsg->B3protocol);
1389 ignore_cstruct_param(cs, cmsg->B1configuration,
1390 "CONNECT_REQ", "B1 Configuration");
1391 ignore_cstruct_param(cs, cmsg->B2configuration,
1392 "CONNECT_REQ", "B2 Configuration");
1393 ignore_cstruct_param(cs, cmsg->B3configuration,
1394 "CONNECT_REQ", "B3 Configuration");
1395 }
1396 commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
1397 if (!commands[AT_PROTO])
1398 goto oom;
1399 snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
1400
1401 /* ToDo: check/encode remaining parameters */
1402 ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
1403 "CONNECT_REQ", "Called pty subaddr");
1404 ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
1405 "CONNECT_REQ", "Calling pty subaddr");
1406 ignore_cstruct_param(cs, cmsg->LLC,
1407 "CONNECT_REQ", "LLC");
1408 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1409 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1410 "CONNECT_REQ", "B Channel Information");
1411 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1412 "CONNECT_REQ", "Keypad Facility");
1413 ignore_cstruct_param(cs, cmsg->Useruserdata,
1414 "CONNECT_REQ", "User-User Data");
1415 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1416 "CONNECT_REQ", "Facility Data Array");
1417 }
1418
1419 /* encode parameter: B channel to use */
1420 commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
1421 if (!commands[AT_ISO])
1422 goto oom;
1423 snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
1424 (unsigned) bcs->channel + 1);
1425
1426 /* queue & schedule EV_DIAL event */
1427 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
1428 bcs->at_state.seq_index, NULL))
1429 goto oom;
1430 gig_dbg(DEBUG_CMD, "scheduling DIAL");
1431 gigaset_schedule_event(cs);
1432 ap->connected = APCONN_SETUP;
1433 send_conf(iif, ap, skb, CapiSuccess);
1434 return;
1435
1436oom:
1437 dev_err(cs->dev, "%s: out of memory\n", __func__);
1438 info = CAPI_MSGOSRESOURCEERR;
1439error:
1440 if (commands)
1441 for (i = 0; i < AT_NUM; i++)
1442 kfree(commands[i]);
1443 kfree(commands);
1444 gigaset_free_channel(bcs);
1445 send_conf(iif, ap, skb, info);
1446}
1447
1448/*
1449 * process CONNECT_RESP message
1450 * checks protocol parameters and queues an ACCEPT or HUP event
1451 */
1452static void do_connect_resp(struct gigaset_capi_ctr *iif,
1453 struct gigaset_capi_appl *ap,
1454 struct sk_buff *skb)
1455{
1456 struct cardstate *cs = iif->ctr.driverdata;
1457 _cmsg *cmsg = &iif->acmsg;
1458 struct bc_state *bcs;
1459 struct gigaset_capi_appl *oap;
1460 int channel;
1461
1462 /* decode message */
1463 capi_message2cmsg(cmsg, skb->data);
1464 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1465 dev_kfree_skb_any(skb);
1466
1467 /* extract and check channel number from PLCI */
1468 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1469 if (!channel || channel > cs->channels) {
1470 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1471 "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
1472 return;
1473 }
1474 bcs = cs->bcs + channel - 1;
1475
1476 switch (cmsg->Reject) {
1477 case 0: /* Accept */
1478 /* drop all competing applications, keep only this one */
1479 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
1480 if (oap != ap)
1481 send_disconnect_ind(bcs, oap,
1482 CapiCallGivenToOtherApplication);
1483 ap->bcnext = NULL;
1484 bcs->ap = ap;
1485 bcs->chstate |= CHS_NOTIFY_LL;
1486
1487 /* check/encode B channel protocol */
1488 if (cmsg->BProtocol == CAPI_DEFAULT) {
1489 bcs->proto2 = L2_HDLC;
1490 dev_warn(cs->dev,
1491 "B2 Protocol X.75 SLP unsupported, using Transparent\n");
1492 } else {
1493 switch (cmsg->B1protocol) {
1494 case 0:
1495 bcs->proto2 = L2_HDLC;
1496 break;
1497 case 1:
1498 bcs->proto2 = L2_BITSYNC;
1499 break;
1500 default:
1501 dev_warn(cs->dev,
1502 "B1 Protocol %u unsupported, using Transparent\n",
1503 cmsg->B1protocol);
1504 bcs->proto2 = L2_BITSYNC;
1505 }
1506 if (cmsg->B2protocol != 1)
1507 dev_warn(cs->dev,
1508 "B2 Protocol %u unsupported, using Transparent\n",
1509 cmsg->B2protocol);
1510 if (cmsg->B3protocol != 0)
1511 dev_warn(cs->dev,
1512 "B3 Protocol %u unsupported, using Transparent\n",
1513 cmsg->B3protocol);
1514 ignore_cstruct_param(cs, cmsg->B1configuration,
1515 "CONNECT_RESP", "B1 Configuration");
1516 ignore_cstruct_param(cs, cmsg->B2configuration,
1517 "CONNECT_RESP", "B2 Configuration");
1518 ignore_cstruct_param(cs, cmsg->B3configuration,
1519 "CONNECT_RESP", "B3 Configuration");
1520 }
1521
1522 /* ToDo: check/encode remaining parameters */
1523 ignore_cstruct_param(cs, cmsg->ConnectedNumber,
1524 "CONNECT_RESP", "Connected Number");
1525 ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
1526 "CONNECT_RESP", "Connected Subaddress");
1527 ignore_cstruct_param(cs, cmsg->LLC,
1528 "CONNECT_RESP", "LLC");
1529 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1530 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1531 "CONNECT_RESP", "BChannel Information");
1532 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1533 "CONNECT_RESP", "Keypad Facility");
1534 ignore_cstruct_param(cs, cmsg->Useruserdata,
1535 "CONNECT_RESP", "User-User Data");
1536 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1537 "CONNECT_RESP", "Facility Data Array");
1538 }
1539
1540 /* Accept call */
1541 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
1542 EV_ACCEPT, NULL, 0, NULL))
1543 return;
1544 gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
1545 gigaset_schedule_event(cs);
1546 return;
1547
1548 case 1: /* Ignore */
1549 /* send DISCONNECT_IND to this application */
1550 send_disconnect_ind(bcs, ap, 0);
1551
1552 /* remove it from the list of listening apps */
1553 if (bcs->ap == ap) {
1554 bcs->ap = ap->bcnext;
1555 if (bcs->ap == NULL)
1556 /* last one: stop ev-layer hupD notifications */
1557 bcs->chstate &= ~CHS_NOTIFY_LL;
1558 return;
1559 }
1560 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
1561 if (oap->bcnext == ap) {
1562 oap->bcnext = oap->bcnext->bcnext;
1563 return;
1564 }
1565 }
1566 dev_err(cs->dev, "%s: application %u not found\n",
1567 __func__, ap->id);
1568 return;
1569
1570 default: /* Reject */
1571 /* drop all competing applications, keep only this one */
1572 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
1573 if (oap != ap)
1574 send_disconnect_ind(bcs, oap,
1575 CapiCallGivenToOtherApplication);
1576 ap->bcnext = NULL;
1577 bcs->ap = ap;
1578
1579 /* reject call - will trigger DISCONNECT_IND for this app */
1580 dev_info(cs->dev, "%s: Reject=%x\n",
1581 "CONNECT_RESP", cmsg->Reject);
1582 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
1583 EV_HUP, NULL, 0, NULL))
1584 return;
1585 gig_dbg(DEBUG_CMD, "scheduling HUP");
1586 gigaset_schedule_event(cs);
1587 return;
1588 }
1589}
1590
1591/*
1592 * process CONNECT_B3_REQ message
1593 * build NCCI and emit CONNECT_B3_CONF reply
1594 */
1595static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1596 struct gigaset_capi_appl *ap,
1597 struct sk_buff *skb)
1598{
1599 struct cardstate *cs = iif->ctr.driverdata;
1600 _cmsg *cmsg = &iif->acmsg;
1601 int channel;
1602
1603 /* decode message */
1604 capi_message2cmsg(cmsg, skb->data);
1605 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1606
1607 /* extract and check channel number from PLCI */
1608 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1609 if (!channel || channel > cs->channels) {
1610 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1611 "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
1612 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1613 return;
1614 }
1615
1616 /* mark logical connection active */
1617 ap->connected = APCONN_ACTIVE;
1618
1619 /* build NCCI: always 1 (one B3 connection only) */
1620 cmsg->adr.adrNCCI |= 1 << 16;
1621
1622 /* NCPI parameter: not applicable for B3 Transparent */
1623 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
1624 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
1625 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1626}
1627
1628/*
1629 * process CONNECT_B3_RESP message
1630 * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
1631 * or queue EV_HUP and emit DISCONNECT_B3_IND.
1632 * The emitted message is always shorter than the received one,
1633 * allowing to reuse the skb.
1634 */
1635static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1636 struct gigaset_capi_appl *ap,
1637 struct sk_buff *skb)
1638{
1639 struct cardstate *cs = iif->ctr.driverdata;
1640 _cmsg *cmsg = &iif->acmsg;
1641 struct bc_state *bcs;
1642 int channel;
1643 unsigned int msgsize;
1644 u8 command;
1645
1646 /* decode message */
1647 capi_message2cmsg(cmsg, skb->data);
1648 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1649
1650 /* extract and check channel number and NCCI */
1651 channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
1652 if (!channel || channel > cs->channels ||
1653 ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
1654 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1655 "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
1656 dev_kfree_skb_any(skb);
1657 return;
1658 }
1659 bcs = &cs->bcs[channel-1];
1660
1661 if (cmsg->Reject) {
1662 /* Reject: clear B3 connect received flag */
1663 ap->connected = APCONN_SETUP;
1664
1665 /* trigger hangup, causing eventual DISCONNECT_IND */
1666 if (!gigaset_add_event(cs, &bcs->at_state,
1667 EV_HUP, NULL, 0, NULL)) {
1668 dev_err(cs->dev, "%s: out of memory\n", __func__);
1669 dev_kfree_skb_any(skb);
1670 return;
1671 }
1672 gig_dbg(DEBUG_CMD, "scheduling HUP");
1673 gigaset_schedule_event(cs);
1674
1675 /* emit DISCONNECT_B3_IND */
1676 command = CAPI_DISCONNECT_B3;
1677 msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
1678 } else {
1679 /*
1680 * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
1681 * we only send CONNECT_B3_IND if the B channel is up
1682 */
1683 command = CAPI_CONNECT_B3_ACTIVE;
1684 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
1685 }
1686 capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
1687 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
1688 __skb_trim(skb, msgsize);
1689 capi_cmsg2message(cmsg, skb->data);
1690 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1691 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
1692}
1693
1694/*
1695 * process DISCONNECT_REQ message
1696 * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
1697 * emit DISCONNECT_CONF reply
1698 */
1699static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1700 struct gigaset_capi_appl *ap,
1701 struct sk_buff *skb)
1702{
1703 struct cardstate *cs = iif->ctr.driverdata;
1704 _cmsg *cmsg = &iif->acmsg;
1705 struct bc_state *bcs;
1706 _cmsg *b3cmsg;
1707 struct sk_buff *b3skb;
1708 int channel;
1709
1710 /* decode message */
1711 capi_message2cmsg(cmsg, skb->data);
1712 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1713
1714 /* extract and check channel number from PLCI */
1715 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1716 if (!channel || channel > cs->channels) {
1717 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1718 "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
1719 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1720 return;
1721 }
1722 bcs = cs->bcs + channel - 1;
1723
1724 /* ToDo: process parameter: Additional info */
1725 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1726 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1727 "DISCONNECT_REQ", "B Channel Information");
1728 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1729 "DISCONNECT_REQ", "Keypad Facility");
1730 ignore_cstruct_param(cs, cmsg->Useruserdata,
1731 "DISCONNECT_REQ", "User-User Data");
1732 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1733 "DISCONNECT_REQ", "Facility Data Array");
1734 }
1735
1736 /* skip if DISCONNECT_IND already sent */
1737 if (!ap->connected)
1738 return;
1739
1740 /* check for active logical connection */
1741 if (ap->connected >= APCONN_ACTIVE) {
1742 /*
1743 * emit DISCONNECT_B3_IND with cause 0x3301
1744 * use separate cmsg structure, as the content of iif->acmsg
1745 * is still needed for creating the _CONF message
1746 */
1747 b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
1748 if (!b3cmsg) {
1749 dev_err(cs->dev, "%s: out of memory\n", __func__);
1750 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1751 return;
1752 }
1753 capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
1754 ap->nextMessageNumber++,
1755 cmsg->adr.adrPLCI | (1 << 16));
1756 b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
1757 b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
1758 if (b3skb == NULL) {
1759 dev_err(cs->dev, "%s: out of memory\n", __func__);
1760 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1761 return;
1762 }
1763 capi_cmsg2message(b3cmsg,
1764 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
1765 kfree(b3cmsg);
1766 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
1767 }
1768
1769 /* trigger hangup, causing eventual DISCONNECT_IND */
1770 if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
1771 dev_err(cs->dev, "%s: out of memory\n", __func__);
1772 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1773 return;
1774 }
1775 gig_dbg(DEBUG_CMD, "scheduling HUP");
1776 gigaset_schedule_event(cs);
1777
1778 /* emit reply */
1779 send_conf(iif, ap, skb, CapiSuccess);
1780}
1781
1782/*
1783 * process DISCONNECT_B3_REQ message
1784 * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
1785 */
1786static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1787 struct gigaset_capi_appl *ap,
1788 struct sk_buff *skb)
1789{
1790 struct cardstate *cs = iif->ctr.driverdata;
1791 _cmsg *cmsg = &iif->acmsg;
1792 int channel;
1793
1794 /* decode message */
1795 capi_message2cmsg(cmsg, skb->data);
1796 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1797
1798 /* extract and check channel number and NCCI */
1799 channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
1800 if (!channel || channel > cs->channels ||
1801 ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
1802 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1803 "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
1804 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1805 return;
1806 }
1807
1808 /* reject if logical connection not active */
1809 if (ap->connected < APCONN_ACTIVE) {
1810 send_conf(iif, ap, skb,
1811 CapiMessageNotSupportedInCurrentState);
1812 return;
1813 }
1814
1815 /* trigger hangup, causing eventual DISCONNECT_B3_IND */
1816 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
1817 EV_HUP, NULL, 0, NULL)) {
1818 dev_err(cs->dev, "%s: out of memory\n", __func__);
1819 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1820 return;
1821 }
1822 gig_dbg(DEBUG_CMD, "scheduling HUP");
1823 gigaset_schedule_event(cs);
1824
1825 /* NCPI parameter: not applicable for B3 Transparent */
1826 ignore_cstruct_param(cs, cmsg->NCPI,
1827 "DISCONNECT_B3_REQ", "NCPI");
1828 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
1829 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1830}
1831
1832/*
1833 * process DATA_B3_REQ message
1834 */
1835static void do_data_b3_req(struct gigaset_capi_ctr *iif,
1836 struct gigaset_capi_appl *ap,
1837 struct sk_buff *skb)
1838{
1839 struct cardstate *cs = iif->ctr.driverdata;
1840 int channel = CAPIMSG_PLCI_PART(skb->data);
1841 u16 ncci = CAPIMSG_NCCI_PART(skb->data);
1842 u16 msglen = CAPIMSG_LEN(skb->data);
1843 u16 datalen = CAPIMSG_DATALEN(skb->data);
1844 u16 flags = CAPIMSG_FLAGS(skb->data);
1845
1846 /* frequent message, avoid _cmsg overhead */
1847 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data);
1848
1849 gig_dbg(DEBUG_LLDATA,
1850 "Receiving data from LL (ch: %d, flg: %x, sz: %d|%d)",
1851 channel, flags, msglen, datalen);
1852
1853 /* check parameters */
1854 if (channel == 0 || channel > cs->channels || ncci != 1) {
1855 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1856 "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
1857 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1858 return;
1859 }
1860 if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
1861 dev_notice(cs->dev, "%s: unexpected length %d\n",
1862 "DATA_B3_REQ", msglen);
1863 if (msglen + datalen != skb->len)
1864 dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
1865 "DATA_B3_REQ", msglen, datalen, skb->len);
1866 if (msglen + datalen > skb->len) {
1867 /* message too short for announced data length */
1868 send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
1869 return;
1870 }
1871 if (flags & CAPI_FLAGS_RESERVED) {
1872 dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
1873 "DATA_B3_REQ", flags);
1874 send_conf(iif, ap, skb, CapiIllMessageParmCoding);
1875 return;
1876 }
1877
1878 /* reject if logical connection not active */
1879 if (ap->connected < APCONN_ACTIVE) {
1880 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
1881 return;
1882 }
1883
1884 /* pull CAPI message into link layer header */
1885 skb_reset_mac_header(skb);
1886 skb->mac_len = msglen;
1887 skb_pull(skb, msglen);
1888
1889 /* pass to device-specific module */
1890 if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) {
1891 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1892 return;
1893 }
1894
1895 /* DATA_B3_CONF reply will be sent by gigaset_skb_sent() */
1896
1897 /*
1898 * ToDo: honor unset "delivery confirmation" bit
1899 * (send DATA_B3_CONF immediately?)
1900 */
1901}
1902
1903/*
1904 * process RESET_B3_REQ message
1905 * just always reply "not supported by current protocol"
1906 */
1907static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
1908 struct gigaset_capi_appl *ap,
1909 struct sk_buff *skb)
1910{
1911 /* decode message */
1912 capi_message2cmsg(&iif->acmsg, skb->data);
1913 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1914 send_conf(iif, ap, skb,
1915 CapiResetProcedureNotSupportedByCurrentProtocol);
1916}
1917
1918/*
1919 * dump unsupported/ignored messages at most twice per minute,
1920 * some apps send those very frequently
1921 */
1922static unsigned long ignored_msg_dump_time;
1923
1924/*
1925 * unsupported CAPI message handler
1926 */
1927static void do_unsupported(struct gigaset_capi_ctr *iif,
1928 struct gigaset_capi_appl *ap,
1929 struct sk_buff *skb)
1930{
1931 /* decode message */
1932 capi_message2cmsg(&iif->acmsg, skb->data);
1933 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000))
1934 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1935 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
1936}
1937
1938/*
1939 * CAPI message handler: no-op
1940 */
1941static void do_nothing(struct gigaset_capi_ctr *iif,
1942 struct gigaset_capi_appl *ap,
1943 struct sk_buff *skb)
1944{
1945 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) {
1946 /* decode message */
1947 capi_message2cmsg(&iif->acmsg, skb->data);
1948 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1949 }
1950 dev_kfree_skb_any(skb);
1951}
1952
1953static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
1954 struct gigaset_capi_appl *ap,
1955 struct sk_buff *skb)
1956{
1957 dump_rawmsg(DEBUG_LLDATA, __func__, skb->data);
1958 dev_kfree_skb_any(skb);
1959}
1960
1961/* table of outgoing CAPI message handlers with lookup function */
1962typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
1963 struct gigaset_capi_appl *,
1964 struct sk_buff *);
1965
1966static struct {
1967 u16 cmd;
1968 capi_send_handler_t handler;
1969} capi_send_handler_table[] = {
1970 /* most frequent messages first for faster lookup */
1971 { CAPI_DATA_B3_REQ, do_data_b3_req },
1972 { CAPI_DATA_B3_RESP, do_data_b3_resp },
1973
1974 { CAPI_ALERT_REQ, do_alert_req },
1975 { CAPI_CONNECT_ACTIVE_RESP, do_nothing },
1976 { CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
1977 { CAPI_CONNECT_B3_REQ, do_connect_b3_req },
1978 { CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
1979 { CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
1980 { CAPI_CONNECT_REQ, do_connect_req },
1981 { CAPI_CONNECT_RESP, do_connect_resp },
1982 { CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
1983 { CAPI_DISCONNECT_B3_RESP, do_nothing },
1984 { CAPI_DISCONNECT_REQ, do_disconnect_req },
1985 { CAPI_DISCONNECT_RESP, do_nothing },
1986 { CAPI_FACILITY_REQ, do_facility_req },
1987 { CAPI_FACILITY_RESP, do_nothing },
1988 { CAPI_LISTEN_REQ, do_listen_req },
1989 { CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
1990 { CAPI_RESET_B3_REQ, do_reset_b3_req },
1991 { CAPI_RESET_B3_RESP, do_nothing },
1992
1993 /*
1994 * ToDo: support overlap sending (requires ev-layer state
1995 * machine extension to generate additional ATD commands)
1996 */
1997 { CAPI_INFO_REQ, do_unsupported },
1998 { CAPI_INFO_RESP, do_nothing },
1999
2000 /*
2001 * ToDo: what's the proper response for these?
2002 */
2003 { CAPI_MANUFACTURER_REQ, do_nothing },
2004 { CAPI_MANUFACTURER_RESP, do_nothing },
2005};
2006
2007/* look up handler */
2008static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
2009{
2010 size_t i;
2011
2012 for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
2013 if (capi_send_handler_table[i].cmd == cmd)
2014 return capi_send_handler_table[i].handler;
2015 return NULL;
2016}
2017
2018
2019/**
2020 * gigaset_send_message() - accept a CAPI message from an application
2021 * @ctr: controller descriptor structure.
2022 * @skb: CAPI message.
2023 *
2024 * Return value: CAPI error code
2025 * Note: capidrv (and probably others, too) only uses the return value to
2026 * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
2027 */
2028static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
2029{
2030 struct gigaset_capi_ctr *iif
2031 = container_of(ctr, struct gigaset_capi_ctr, ctr);
2032 struct cardstate *cs = ctr->driverdata;
2033 struct gigaset_capi_appl *ap;
2034 capi_send_handler_t handler;
2035
2036 /* can only handle linear sk_buffs */
2037 if (skb_linearize(skb) < 0) {
2038 dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
2039 return CAPI_MSGOSRESOURCEERR;
2040 }
2041
2042 /* retrieve application data structure */
2043 ap = get_appl(iif, CAPIMSG_APPID(skb->data));
2044 if (!ap) {
2045 dev_notice(cs->dev, "%s: application %u not registered\n",
2046 __func__, CAPIMSG_APPID(skb->data));
2047 return CAPI_ILLAPPNR;
2048 }
2049
2050 /* look up command */
2051 handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
2052 if (!handler) {
2053 /* unknown/unsupported message type */
2054 if (printk_ratelimit())
2055 dev_notice(cs->dev, "%s: unsupported message %u\n",
2056 __func__, CAPIMSG_CMD(skb->data));
2057 return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
2058 }
2059
2060 /* serialize */
2061 if (atomic_add_return(1, &iif->sendqlen) > 1) {
2062 /* queue behind other messages */
2063 skb_queue_tail(&iif->sendqueue, skb);
2064 return CAPI_NOERROR;
2065 }
2066
2067 /* process message */
2068 handler(iif, ap, skb);
2069
2070 /* process other messages arrived in the meantime */
2071 while (atomic_sub_return(1, &iif->sendqlen) > 0) {
2072 skb = skb_dequeue(&iif->sendqueue);
2073 if (!skb) {
2074 /* should never happen */
2075 dev_err(cs->dev, "%s: send queue empty\n", __func__);
2076 continue;
2077 }
2078 ap = get_appl(iif, CAPIMSG_APPID(skb->data));
2079 if (!ap) {
2080 /* could that happen? */
2081 dev_warn(cs->dev, "%s: application %u vanished\n",
2082 __func__, CAPIMSG_APPID(skb->data));
2083 continue;
2084 }
2085 handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
2086 if (!handler) {
2087 /* should never happen */
2088 dev_err(cs->dev, "%s: handler %x vanished\n",
2089 __func__, CAPIMSG_CMD(skb->data));
2090 continue;
2091 }
2092 handler(iif, ap, skb);
2093 }
2094
2095 return CAPI_NOERROR;
2096}
2097
2098/**
2099 * gigaset_procinfo() - build single line description for controller
2100 * @ctr: controller descriptor structure.
2101 *
2102 * Return value: pointer to generated string (null terminated)
2103 */
2104static char *gigaset_procinfo(struct capi_ctr *ctr)
2105{
2106 return ctr->name; /* ToDo: more? */
2107}
2108
2109/**
2110 * gigaset_ctr_read_proc() - build controller proc file entry
2111 * @page: buffer of PAGE_SIZE bytes for receiving the entry.
2112 * @start: unused.
2113 * @off: unused.
2114 * @count: unused.
2115 * @eof: unused.
2116 * @ctr: controller descriptor structure.
2117 *
2118 * Return value: length of generated entry
2119 */
2120static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2121 int count, int *eof, struct capi_ctr *ctr)
2122{
2123 struct cardstate *cs = ctr->driverdata;
2124 char *s;
2125 int i;
2126 int len = 0;
2127 len += sprintf(page+len, "%-16s %s\n", "name", ctr->name);
2128 len += sprintf(page+len, "%-16s %s %s\n", "dev",
2129 dev_driver_string(cs->dev), dev_name(cs->dev));
2130 len += sprintf(page+len, "%-16s %d\n", "id", cs->myid);
2131 if (cs->gotfwver)
2132 len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware",
2133 cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
2134 len += sprintf(page+len, "%-16s %d\n", "channels",
2135 cs->channels);
2136 len += sprintf(page+len, "%-16s %s\n", "onechannel",
2137 cs->onechannel ? "yes" : "no");
2138
2139 switch (cs->mode) {
2140 case M_UNKNOWN:
2141 s = "unknown";
2142 break;
2143 case M_CONFIG:
2144 s = "config";
2145 break;
2146 case M_UNIMODEM:
2147 s = "Unimodem";
2148 break;
2149 case M_CID:
2150 s = "CID";
2151 break;
2152 default:
2153 s = "??";
2154 }
2155 len += sprintf(page+len, "%-16s %s\n", "mode", s);
2156
2157 switch (cs->mstate) {
2158 case MS_UNINITIALIZED:
2159 s = "uninitialized";
2160 break;
2161 case MS_INIT:
2162 s = "init";
2163 break;
2164 case MS_LOCKED:
2165 s = "locked";
2166 break;
2167 case MS_SHUTDOWN:
2168 s = "shutdown";
2169 break;
2170 case MS_RECOVER:
2171 s = "recover";
2172 break;
2173 case MS_READY:
2174 s = "ready";
2175 break;
2176 default:
2177 s = "??";
2178 }
2179 len += sprintf(page+len, "%-16s %s\n", "mstate", s);
2180
2181 len += sprintf(page+len, "%-16s %s\n", "running",
2182 cs->running ? "yes" : "no");
2183 len += sprintf(page+len, "%-16s %s\n", "connected",
2184 cs->connected ? "yes" : "no");
2185 len += sprintf(page+len, "%-16s %s\n", "isdn_up",
2186 cs->isdn_up ? "yes" : "no");
2187 len += sprintf(page+len, "%-16s %s\n", "cidmode",
2188 cs->cidmode ? "yes" : "no");
2189
2190 for (i = 0; i < cs->channels; i++) {
2191 len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted",
2192 cs->bcs[i].corrupted);
2193 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down",
2194 cs->bcs[i].trans_down);
2195 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up",
2196 cs->bcs[i].trans_up);
2197 len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate",
2198 cs->bcs[i].chstate);
2199 switch (cs->bcs[i].proto2) {
2200 case L2_BITSYNC:
2201 s = "bitsync";
2202 break;
2203 case L2_HDLC:
2204 s = "HDLC";
2205 break;
2206 case L2_VOICE:
2207 s = "voice";
2208 break;
2209 default:
2210 s = "??";
2211 }
2212 len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s);
2213 }
2214 return len;
2215}
2216
2217
2218static struct capi_driver capi_driver_gigaset = {
2219 .name = "gigaset",
2220 .revision = "1.0",
2221};
2222
2223/**
2224 * gigaset_isdn_register() - register to LL
2225 * @cs: device descriptor structure.
2226 * @isdnid: device name.
2227 *
2228 * Called by main module to register the device with the LL.
2229 *
2230 * Return value: 1 for success, 0 for failure
2231 */
2232int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
2233{
2234 struct gigaset_capi_ctr *iif;
2235 int rc;
2236
2237 pr_info("Kernel CAPI interface\n");
2238
2239 iif = kmalloc(sizeof(*iif), GFP_KERNEL);
2240 if (!iif) {
2241 pr_err("%s: out of memory\n", __func__);
2242 return 0;
2243 }
2244
2245 /* register driver with CAPI (ToDo: what for?) */
2246 register_capi_driver(&capi_driver_gigaset);
2247
2248 /* prepare controller structure */
2249 iif->ctr.owner = THIS_MODULE;
2250 iif->ctr.driverdata = cs;
2251 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
2252 iif->ctr.driver_name = "gigaset";
2253 iif->ctr.load_firmware = gigaset_load_firmware;
2254 iif->ctr.reset_ctr = gigaset_reset_ctr;
2255 iif->ctr.register_appl = gigaset_register_appl;
2256 iif->ctr.release_appl = gigaset_release_appl;
2257 iif->ctr.send_message = gigaset_send_message;
2258 iif->ctr.procinfo = gigaset_procinfo;
2259 iif->ctr.ctr_read_proc = gigaset_ctr_read_proc;
2260 INIT_LIST_HEAD(&iif->appls);
2261 skb_queue_head_init(&iif->sendqueue);
2262 atomic_set(&iif->sendqlen, 0);
2263
2264 /* register controller with CAPI */
2265 rc = attach_capi_ctr(&iif->ctr);
2266 if (rc) {
2267 pr_err("attach_capi_ctr failed (%d)\n", rc);
2268 unregister_capi_driver(&capi_driver_gigaset);
2269 kfree(iif);
2270 return 0;
2271 }
2272
2273 cs->iif = iif;
2274 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
2275 return 1;
2276}
2277
2278/**
2279 * gigaset_isdn_unregister() - unregister from LL
2280 * @cs: device descriptor structure.
2281 *
2282 * Called by main module to unregister the device from the LL.
2283 */
2284void gigaset_isdn_unregister(struct cardstate *cs)
2285{
2286 struct gigaset_capi_ctr *iif = cs->iif;
2287
2288 detach_capi_ctr(&iif->ctr);
2289 kfree(iif);
2290 cs->iif = NULL;
2291 unregister_capi_driver(&capi_driver_gigaset);
2292}
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 33dcd8d72b7c..82ed1cd14ff5 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -108,7 +108,7 @@ int gigaset_enterconfigmode(struct cardstate *cs)
108{ 108{
109 int i, r; 109 int i, r;
110 110
111 cs->control_state = TIOCM_RTS; //FIXME 111 cs->control_state = TIOCM_RTS;
112 112
113 r = setflags(cs, TIOCM_DTR, 200); 113 r = setflags(cs, TIOCM_DTR, 200);
114 if (r < 0) 114 if (r < 0)
@@ -132,10 +132,10 @@ int gigaset_enterconfigmode(struct cardstate *cs)
132 132
133error: 133error:
134 dev_err(cs->dev, "error %d on setuartbits\n", -r); 134 dev_err(cs->dev, "error %d on setuartbits\n", -r);
135 cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value? 135 cs->control_state = TIOCM_RTS|TIOCM_DTR;
136 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR); 136 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
137 137
138 return -1; //r 138 return -1;
139} 139}
140 140
141static int test_timeout(struct at_state_t *at_state) 141static int test_timeout(struct at_state_t *at_state)
@@ -150,10 +150,9 @@ static int test_timeout(struct at_state_t *at_state)
150 } 150 }
151 151
152 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, 152 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
153 at_state->timer_index, NULL)) { 153 at_state->timer_index, NULL))
154 //FIXME what should we do? 154 dev_err(at_state->cs->dev, "%s: out of memory\n",
155 } 155 __func__);
156
157 return 1; 156 return 1;
158} 157}
159 158
@@ -207,6 +206,32 @@ int gigaset_get_channel(struct bc_state *bcs)
207 return 1; 206 return 1;
208} 207}
209 208
209struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
210{
211 unsigned long flags;
212 int i;
213
214 spin_lock_irqsave(&cs->lock, flags);
215 if (!try_module_get(cs->driver->owner)) {
216 gig_dbg(DEBUG_ANY,
217 "could not get module for allocating channel");
218 spin_unlock_irqrestore(&cs->lock, flags);
219 return NULL;
220 }
221 for (i = 0; i < cs->channels; ++i)
222 if (!cs->bcs[i].use_count) {
223 ++cs->bcs[i].use_count;
224 cs->bcs[i].busy = 1;
225 spin_unlock_irqrestore(&cs->lock, flags);
226 gig_dbg(DEBUG_ANY, "allocated channel %d", i);
227 return cs->bcs + i;
228 }
229 module_put(cs->driver->owner);
230 spin_unlock_irqrestore(&cs->lock, flags);
231 gig_dbg(DEBUG_ANY, "no free channel");
232 return NULL;
233}
234
210void gigaset_free_channel(struct bc_state *bcs) 235void gigaset_free_channel(struct bc_state *bcs)
211{ 236{
212 unsigned long flags; 237 unsigned long flags;
@@ -367,16 +392,15 @@ static void gigaset_freebcs(struct bc_state *bcs)
367 int i; 392 int i;
368 393
369 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); 394 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
370 if (!bcs->cs->ops->freebcshw(bcs)) { 395 if (!bcs->cs->ops->freebcshw(bcs))
371 gig_dbg(DEBUG_INIT, "failed"); 396 gig_dbg(DEBUG_INIT, "failed");
372 }
373 397
374 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); 398 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
375 clear_at_state(&bcs->at_state); 399 clear_at_state(&bcs->at_state);
376 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); 400 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
401 dev_kfree_skb(bcs->skb);
402 bcs->skb = NULL;
377 403
378 if (bcs->skb)
379 dev_kfree_skb(bcs->skb);
380 for (i = 0; i < AT_NUM; ++i) { 404 for (i = 0; i < AT_NUM; ++i) {
381 kfree(bcs->commands[i]); 405 kfree(bcs->commands[i]);
382 bcs->commands[i] = NULL; 406 bcs->commands[i] = NULL;
@@ -463,6 +487,12 @@ void gigaset_freecs(struct cardstate *cs)
463 487
464 switch (cs->cs_init) { 488 switch (cs->cs_init) {
465 default: 489 default:
490 /* clear B channel structures */
491 for (i = 0; i < cs->channels; ++i) {
492 gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
493 gigaset_freebcs(cs->bcs + i);
494 }
495
466 /* clear device sysfs */ 496 /* clear device sysfs */
467 gigaset_free_dev_sysfs(cs); 497 gigaset_free_dev_sysfs(cs);
468 498
@@ -471,28 +501,20 @@ void gigaset_freecs(struct cardstate *cs)
471 gig_dbg(DEBUG_INIT, "clearing hw"); 501 gig_dbg(DEBUG_INIT, "clearing hw");
472 cs->ops->freecshw(cs); 502 cs->ops->freecshw(cs);
473 503
474 //FIXME cmdbuf
475
476 /* fall through */ 504 /* fall through */
477 case 2: /* error in initcshw */ 505 case 2: /* error in initcshw */
478 /* Deregister from LL */ 506 /* Deregister from LL */
479 make_invalid(cs, VALID_ID); 507 make_invalid(cs, VALID_ID);
480 gig_dbg(DEBUG_INIT, "clearing iif"); 508 gigaset_isdn_unregister(cs);
481 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
482 509
483 /* fall through */ 510 /* fall through */
484 case 1: /* error when regestering to LL */ 511 case 1: /* error when registering to LL */
485 gig_dbg(DEBUG_INIT, "clearing at_state"); 512 gig_dbg(DEBUG_INIT, "clearing at_state");
486 clear_at_state(&cs->at_state); 513 clear_at_state(&cs->at_state);
487 dealloc_at_states(cs); 514 dealloc_at_states(cs);
488 515
489 /* fall through */ 516 /* fall through */
490 case 0: /* error in one call to initbcs */ 517 case 0: /* error in basic setup */
491 for (i = 0; i < cs->channels; ++i) {
492 gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
493 gigaset_freebcs(cs->bcs + i);
494 }
495
496 clear_events(cs); 518 clear_events(cs);
497 gig_dbg(DEBUG_INIT, "freeing inbuf"); 519 gig_dbg(DEBUG_INIT, "freeing inbuf");
498 kfree(cs->inbuf); 520 kfree(cs->inbuf);
@@ -534,16 +556,13 @@ void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
534} 556}
535 557
536 558
537static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, 559static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
538 struct cardstate *cs, int inputstate)
539/* inbuf->read must be allocated before! */ 560/* inbuf->read must be allocated before! */
540{ 561{
541 inbuf->head = 0; 562 inbuf->head = 0;
542 inbuf->tail = 0; 563 inbuf->tail = 0;
543 inbuf->cs = cs; 564 inbuf->cs = cs;
544 inbuf->bcs = bcs; /*base driver: NULL*/ 565 inbuf->inputstate = INS_command;
545 inbuf->rcvbuf = NULL;
546 inbuf->inputstate = inputstate;
547} 566}
548 567
549/** 568/**
@@ -599,7 +618,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
599{ 618{
600 int i; 619 int i;
601 620
602 bcs->tx_skb = NULL; //FIXME -> hw part 621 bcs->tx_skb = NULL;
603 622
604 skb_queue_head_init(&bcs->squeue); 623 skb_queue_head_init(&bcs->squeue);
605 624
@@ -618,13 +637,13 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
618 bcs->fcs = PPP_INITFCS; 637 bcs->fcs = PPP_INITFCS;
619 bcs->inputstate = 0; 638 bcs->inputstate = 0;
620 if (cs->ignoreframes) { 639 if (cs->ignoreframes) {
621 bcs->inputstate |= INS_skip_frame;
622 bcs->skb = NULL; 640 bcs->skb = NULL;
623 } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) 641 } else {
624 skb_reserve(bcs->skb, HW_HDR_LEN); 642 bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
625 else { 643 if (bcs->skb != NULL)
626 pr_err("out of memory\n"); 644 skb_reserve(bcs->skb, cs->hw_hdr_len);
627 bcs->inputstate |= INS_skip_frame; 645 else
646 pr_err("out of memory\n");
628 } 647 }
629 648
630 bcs->channel = channel; 649 bcs->channel = channel;
@@ -645,8 +664,8 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
645 gig_dbg(DEBUG_INIT, " failed"); 664 gig_dbg(DEBUG_INIT, " failed");
646 665
647 gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel); 666 gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
648 if (bcs->skb) 667 dev_kfree_skb(bcs->skb);
649 dev_kfree_skb(bcs->skb); 668 bcs->skb = NULL;
650 669
651 return NULL; 670 return NULL;
652} 671}
@@ -673,12 +692,13 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
673 int onechannel, int ignoreframes, 692 int onechannel, int ignoreframes,
674 int cidmode, const char *modulename) 693 int cidmode, const char *modulename)
675{ 694{
676 struct cardstate *cs = NULL; 695 struct cardstate *cs;
677 unsigned long flags; 696 unsigned long flags;
678 int i; 697 int i;
679 698
680 gig_dbg(DEBUG_INIT, "allocating cs"); 699 gig_dbg(DEBUG_INIT, "allocating cs");
681 if (!(cs = alloc_cs(drv))) { 700 cs = alloc_cs(drv);
701 if (!cs) {
682 pr_err("maximum number of devices exceeded\n"); 702 pr_err("maximum number of devices exceeded\n");
683 return NULL; 703 return NULL;
684 } 704 }
@@ -707,7 +727,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
707 cs->ev_tail = 0; 727 cs->ev_tail = 0;
708 cs->ev_head = 0; 728 cs->ev_head = 0;
709 729
710 tasklet_init(&cs->event_tasklet, &gigaset_handle_event, 730 tasklet_init(&cs->event_tasklet, gigaset_handle_event,
711 (unsigned long) cs); 731 (unsigned long) cs);
712 cs->commands_pending = 0; 732 cs->commands_pending = 0;
713 cs->cur_at_seq = 0; 733 cs->cur_at_seq = 0;
@@ -726,14 +746,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
726 cs->mode = M_UNKNOWN; 746 cs->mode = M_UNKNOWN;
727 cs->mstate = MS_UNINITIALIZED; 747 cs->mstate = MS_UNINITIALIZED;
728 748
729 for (i = 0; i < channels; ++i) {
730 gig_dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
731 if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
732 pr_err("could not allocate channel %d data\n", i);
733 goto error;
734 }
735 }
736
737 ++cs->cs_init; 749 ++cs->cs_init;
738 750
739 gig_dbg(DEBUG_INIT, "setting up at_state"); 751 gig_dbg(DEBUG_INIT, "setting up at_state");
@@ -743,10 +755,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
743 cs->cbytes = 0; 755 cs->cbytes = 0;
744 756
745 gig_dbg(DEBUG_INIT, "setting up inbuf"); 757 gig_dbg(DEBUG_INIT, "setting up inbuf");
746 if (onechannel) { //FIXME distinction necessary? 758 gigaset_inbuf_init(cs->inbuf, cs);
747 gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
748 } else
749 gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command);
750 759
751 cs->connected = 0; 760 cs->connected = 0;
752 cs->isdn_up = 0; 761 cs->isdn_up = 0;
@@ -758,7 +767,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
758 cs->cmdbytes = 0; 767 cs->cmdbytes = 0;
759 768
760 gig_dbg(DEBUG_INIT, "setting up iif"); 769 gig_dbg(DEBUG_INIT, "setting up iif");
761 if (!gigaset_register_to_LL(cs, modulename)) { 770 if (!gigaset_isdn_register(cs, modulename)) {
762 pr_err("error registering ISDN device\n"); 771 pr_err("error registering ISDN device\n");
763 goto error; 772 goto error;
764 } 773 }
@@ -777,6 +786,15 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
777 /* set up device sysfs */ 786 /* set up device sysfs */
778 gigaset_init_dev_sysfs(cs); 787 gigaset_init_dev_sysfs(cs);
779 788
789 /* set up channel data structures */
790 for (i = 0; i < channels; ++i) {
791 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
792 if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
793 pr_err("could not allocate channel %d data\n", i);
794 goto error;
795 }
796 }
797
780 spin_lock_irqsave(&cs->lock, flags); 798 spin_lock_irqsave(&cs->lock, flags);
781 cs->running = 1; 799 cs->running = 1;
782 spin_unlock_irqrestore(&cs->lock, flags); 800 spin_unlock_irqrestore(&cs->lock, flags);
@@ -824,9 +842,10 @@ void gigaset_bcs_reinit(struct bc_state *bcs)
824 bcs->chstate = 0; 842 bcs->chstate = 0;
825 843
826 bcs->ignore = cs->ignoreframes; 844 bcs->ignore = cs->ignoreframes;
827 if (bcs->ignore) 845 if (bcs->ignore) {
828 bcs->inputstate |= INS_skip_frame; 846 dev_kfree_skb(bcs->skb);
829 847 bcs->skb = NULL;
848 }
830 849
831 cs->ops->reinitbcshw(bcs); 850 cs->ops->reinitbcshw(bcs);
832} 851}
@@ -847,8 +866,6 @@ static void cleanup_cs(struct cardstate *cs)
847 free_strings(&cs->at_state); 866 free_strings(&cs->at_state);
848 gigaset_at_init(&cs->at_state, NULL, cs, 0); 867 gigaset_at_init(&cs->at_state, NULL, cs, 0);
849 868
850 kfree(cs->inbuf->rcvbuf);
851 cs->inbuf->rcvbuf = NULL;
852 cs->inbuf->inputstate = INS_command; 869 cs->inbuf->inputstate = INS_command;
853 cs->inbuf->head = 0; 870 cs->inbuf->head = 0;
854 cs->inbuf->tail = 0; 871 cs->inbuf->tail = 0;
@@ -911,15 +928,13 @@ int gigaset_start(struct cardstate *cs)
911 cs->ops->baud_rate(cs, B115200); 928 cs->ops->baud_rate(cs, B115200);
912 cs->ops->set_line_ctrl(cs, CS8); 929 cs->ops->set_line_ctrl(cs, CS8);
913 cs->control_state = TIOCM_DTR|TIOCM_RTS; 930 cs->control_state = TIOCM_DTR|TIOCM_RTS;
914 } else {
915 //FIXME use some saved values?
916 } 931 }
917 932
918 cs->waiting = 1; 933 cs->waiting = 1;
919 934
920 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { 935 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
921 cs->waiting = 0; 936 cs->waiting = 0;
922 //FIXME what should we do? 937 dev_err(cs->dev, "%s: out of memory\n", __func__);
923 goto error; 938 goto error;
924 } 939 }
925 940
@@ -959,7 +974,7 @@ int gigaset_shutdown(struct cardstate *cs)
959 cs->waiting = 1; 974 cs->waiting = 1;
960 975
961 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) { 976 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
962 //FIXME what should we do? 977 dev_err(cs->dev, "%s: out of memory\n", __func__);
963 goto exit; 978 goto exit;
964 } 979 }
965 980
@@ -990,7 +1005,7 @@ void gigaset_stop(struct cardstate *cs)
990 cs->waiting = 1; 1005 cs->waiting = 1;
991 1006
992 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) { 1007 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
993 //FIXME what should we do? 1008 dev_err(cs->dev, "%s: out of memory\n", __func__);
994 goto exit; 1009 goto exit;
995 } 1010 }
996 1011
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
new file mode 100644
index 000000000000..5b27c996af6d
--- /dev/null
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -0,0 +1,68 @@
1/*
2 * Dummy LL interface for the Gigaset driver
3 *
4 * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 */
13
14#include "gigaset.h"
15
16void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
17{
18}
19EXPORT_SYMBOL_GPL(gigaset_skb_sent);
20
21void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
22{
23}
24EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
25
26void gigaset_isdn_rcv_err(struct bc_state *bcs)
27{
28}
29EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
30
31int gigaset_isdn_icall(struct at_state_t *at_state)
32{
33 return ICALL_IGNORE;
34}
35
36void gigaset_isdn_connD(struct bc_state *bcs)
37{
38}
39
40void gigaset_isdn_hupD(struct bc_state *bcs)
41{
42}
43
44void gigaset_isdn_connB(struct bc_state *bcs)
45{
46}
47
48void gigaset_isdn_hupB(struct bc_state *bcs)
49{
50}
51
52void gigaset_isdn_start(struct cardstate *cs)
53{
54}
55
56void gigaset_isdn_stop(struct cardstate *cs)
57{
58}
59
60int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
61{
62 pr_info("no ISDN subsystem interface\n");
63 return 1;
64}
65
66void gigaset_isdn_unregister(struct cardstate *cs)
67{
68}
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index cc768caa38f5..ddeb0456d202 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -40,8 +40,8 @@
40 40
41/* Possible ASCII responses */ 41/* Possible ASCII responses */
42#define RSP_OK 0 42#define RSP_OK 0
43//#define RSP_BUSY 1 43#define RSP_BUSY 1
44//#define RSP_CONNECT 2 44#define RSP_CONNECT 2
45#define RSP_ZGCI 3 45#define RSP_ZGCI 3
46#define RSP_RING 4 46#define RSP_RING 4
47#define RSP_ZAOC 5 47#define RSP_ZAOC 5
@@ -68,7 +68,6 @@
68#define RSP_ZHLC (RSP_STR + STR_ZHLC) 68#define RSP_ZHLC (RSP_STR + STR_ZHLC)
69#define RSP_ERROR -1 /* ERROR */ 69#define RSP_ERROR -1 /* ERROR */
70#define RSP_WRONG_CID -2 /* unknown cid in cmd */ 70#define RSP_WRONG_CID -2 /* unknown cid in cmd */
71//#define RSP_EMPTY -3
72#define RSP_UNKNOWN -4 /* unknown response */ 71#define RSP_UNKNOWN -4 /* unknown response */
73#define RSP_FAIL -5 /* internal error */ 72#define RSP_FAIL -5 /* internal error */
74#define RSP_INVAL -6 /* invalid response */ 73#define RSP_INVAL -6 /* invalid response */
@@ -76,9 +75,9 @@
76#define RSP_NONE -19 75#define RSP_NONE -19
77#define RSP_STRING -20 76#define RSP_STRING -20
78#define RSP_NULL -21 77#define RSP_NULL -21
79//#define RSP_RETRYFAIL -22 78#define RSP_RETRYFAIL -22
80//#define RSP_RETRY -23 79#define RSP_RETRY -23
81//#define RSP_SKIP -24 80#define RSP_SKIP -24
82#define RSP_INIT -27 81#define RSP_INIT -27
83#define RSP_ANY -26 82#define RSP_ANY -26
84#define RSP_LAST -28 83#define RSP_LAST -28
@@ -127,7 +126,6 @@
127#define ACT_NOTIFY_BC_UP 39 126#define ACT_NOTIFY_BC_UP 39
128#define ACT_DIAL 40 127#define ACT_DIAL 40
129#define ACT_ACCEPT 41 128#define ACT_ACCEPT 41
130#define ACT_PROTO_L2 42
131#define ACT_HUP 43 129#define ACT_HUP 43
132#define ACT_IF_LOCK 44 130#define ACT_IF_LOCK 44
133#define ACT_START 45 131#define ACT_START 45
@@ -159,229 +157,229 @@
159#define SEQ_UMMODE 11 157#define SEQ_UMMODE 11
160 158
161 159
162// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring 160/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
161 * 400: hup, 500: reset, 600: dial, 700: ring */
163struct reply_t gigaset_tab_nocid[] = 162struct reply_t gigaset_tab_nocid[] =
164{ 163{
165 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ 164/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
166 165 * action, command */
167 /* initialize device, set cid mode if possible */ 166
168 //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}}, 167/* initialize device, set cid mode if possible */
169 //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}}, 168{RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
170 //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT, 169
171 // {ACT_TIMEOUT}}, 170{EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
172 171{RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
173 {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT, 172 "+GMR\r"},
174 {ACT_TIMEOUT}}, /* wait until device is ready */ 173
175 174{EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
176 {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */ 175{RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
177 {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */ 176
178 177{EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
179 {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */ 178 "^SDLE=0\r"},
180 {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */ 179{RSP_OK, 108, 108, -1, 104, -1},
181 180{RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
182 {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */ 181{EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
183 {RSP_OK, 108,108, -1, 104,-1}, 182{RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
184 {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"}, 183
185 {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}}, 184{EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
186 {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}}, 185 ACT_HUPMODEM,
187 186 ACT_TIMEOUT} },
188 {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0, 187{EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
189 ACT_HUPMODEM, 188
190 ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */ 189{RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
191 {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"}, 190{RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
192 191{RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
193 {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */ 192{EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
194 {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}}, 193
195 {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}}, 194{RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
196 {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}}, 195{EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
197 196
198 {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}}, 197{RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
199 {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}}, 198
200 199{EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
201 {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}}, 200 ACT_INIT} },
202 201{RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
203 {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, 202 ACT_INIT} },
204 {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, 203{RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
205 {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}}, 204 ACT_INIT} },
206 205
207 /* leave dle mode */ 206/* leave dle mode */
208 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 207{RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
209 {RSP_OK, 201,201, -1, 202,-1}, 208{RSP_OK, 201, 201, -1, 202, -1},
210 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, 209{RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
211 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, 210{RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
212 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 211{RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
213 {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 212{EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
214 213
215 /* enter dle mode */ 214/* enter dle mode */
216 {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, 215{RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
217 {RSP_OK, 251,251, -1, 252,-1}, 216{RSP_OK, 251, 251, -1, 252, -1},
218 {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}}, 217{RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
219 {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, 218{RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
220 {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, 219{EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
221 220
222 /* incoming call */ 221/* incoming call */
223 {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}}, 222{RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
224 223
225 /* get cid */ 224/* get cid */
226 //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}}, 225{RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
227 //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}}, 226{RSP_OK, 301, 301, -1, 302, -1},
228 //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"}, 227{RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
229 228{RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
230 {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, 229{EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
231 {RSP_OK, 301,301, -1, 302,-1}, 230
232 {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}}, 231/* enter cid mode */
233 {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}}, 232{RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
234 {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}}, 233{RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
235 234{RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
236 /* enter cid mode */ 235{EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
237 {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, 236
238 {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}}, 237/* leave cid mode */
239 {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, 238{RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
240 {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, 239{RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
241 240{RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
242 /* leave cid mode */ 241{EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
243 //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"}, 242
244 {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"}, 243/* abort getting cid */
245 {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}}, 244{RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
246 {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, 245
247 {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, 246/* reset */
248 247{RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
249 /* abort getting cid */ 248{RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
250 {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}}, 249{RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
251 250{EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
252 /* reset */ 251{RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
253 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, 252
254 {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}}, 253{EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
255 {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, 254{EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
256 {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, 255{EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
257 {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}}, 256{EV_START, -1, -1, -1, -1, -1, {ACT_START} },
258 257{EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
259 {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME 258{EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
260 {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME 259
261 {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME 260/* misc. */
262 {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME 261{RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
263 {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME 262{RSP_ZCFGT, -1, -1, -1, -1, -1, {ACT_DEBUG} },
264 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME 263{RSP_ZCFG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
265 264{RSP_ZLOG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
266 /* misc. */ 265{RSP_ZMWI, -1, -1, -1, -1, -1, {ACT_DEBUG} },
267 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, 266{RSP_ZABINFO, -1, -1, -1, -1, -1, {ACT_DEBUG} },
268 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 267{RSP_ZSMLSTCHG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
269 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 268
270 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 269{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
271 {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 270{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
272 {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 271{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
273 {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 272{RSP_LAST}
274 {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
275
276 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
277 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
278 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
279 {RSP_LAST}
280}; 273};
281 274
282// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall 275/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
276 * 400: hup, 750: accepted icall */
283struct reply_t gigaset_tab_cid[] = 277struct reply_t gigaset_tab_cid[] =
284{ 278{
285 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ 279/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
286 280 * action, command */
287 /* dial */ 281
288 {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME 282/* dial */
289 {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}}, 283{EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
290 {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}}, 284{RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC} },
291 {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, 285{RSP_OK, 601, 601, -1, 602, 5, {ACT_CMD+AT_HLC} },
292 {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, 286{RSP_NULL, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
293 {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}}, 287{RSP_OK, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
294 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, 288{RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD+AT_TYPE} },
295 {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, 289{RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD+AT_MSN} },
296 {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, 290{RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
297 {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, 291{RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
298 {RSP_OK, 607,607, -1, 608,-1}, 292{RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} },
299 {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}}, 293{RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} },
300 {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}}, 294{RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
301 295{RSP_OK, 608, 608, -1, 609, -1},
302 {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, 296{RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD+AT_DIAL} },
303 {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, 297{RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
304 298
305 /* optional dialing responses */ 299{RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
306 {EV_BC_OPEN, 650,650, -1, 651,-1}, 300{EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
307 {RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}}, 301
308 {RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}}, 302/* optional dialing responses */
309 {RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}}, 303{EV_BC_OPEN, 650, 650, -1, 651, -1},
310 {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, 304{RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
311 305{RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
312 /* connect */ 306{RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
313 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, 307{RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
314 {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, 308
315 ACT_NOTIFY_BC_UP}}, 309/* connect */
316 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, 310{RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
317 {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, 311{RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
318 ACT_NOTIFY_BC_UP}}, 312 ACT_NOTIFY_BC_UP} },
319 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, 313{RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
320 314{RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
321 /* remote hangup */ 315 ACT_NOTIFY_BC_UP} },
322 {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, 316{EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
323 {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 317
324 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 318/* remote hangup */
325 319{RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
326 /* hangup */ 320{RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
327 {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME 321{RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
328 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? 322
329 {RSP_OK, 401,401, -1, 402, 5}, 323/* hangup */
330 {RSP_ZVLS, 402,402, 0, 403, 5}, 324{EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
331 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, 325{RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
332 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, 326{RSP_OK, 401, 401, -1, 402, 5},
333 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, 327{RSP_ZVLS, 402, 402, 0, 403, 5},
334 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, 328{RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
335 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, 329{RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
336 330{RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
337 {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout 331{RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
338 332{EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
339 /* ring */ 333
340 {RSP_ZBC, 700,700, -1, -1,-1, {0}}, 334{EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
341 {RSP_ZHLC, 700,700, -1, -1,-1, {0}}, 335
342 {RSP_NMBR, 700,700, -1, -1,-1, {0}}, 336/* ring */
343 {RSP_ZCPN, 700,700, -1, -1,-1, {0}}, 337{RSP_ZBC, 700, 700, -1, -1, -1, {0} },
344 {RSP_ZCTP, 700,700, -1, -1,-1, {0}}, 338{RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
345 {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}}, 339{RSP_NMBR, 700, 700, -1, -1, -1, {0} },
346 {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, 340{RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
347 341{RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
348 /*accept icall*/ 342{EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
349 {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME 343{EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
350 {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}}, 344
351 {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}}, 345/*accept icall*/
352 {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ 346{EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
353 {RSP_OK, 723,723, -1, 724, 5, {0}}, 347{RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO} },
354 {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}}, 348{RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD+AT_ISO} },
355 {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, 349{RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
356 {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, 350{RSP_OK, 723, 723, -1, 724, 5, {0} },
357 {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}}, 351{RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
358 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, 352{RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
359 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, 353{EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
360 354{RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
361 {EV_BC_OPEN, 750,750, -1, 751,-1}, 355{RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
362 {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}}, 356{RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
363 357
364 /* B channel closed (general case) */ 358{EV_BC_OPEN, 750, 750, -1, 751, -1},
365 {EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME 359{EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
366 360
367 /* misc. */ 361/* B channel closed (general case) */
368 {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME 362{EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
369 363
370 {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 364/* misc. */
371 {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 365{RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
372 {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 366{RSP_ZCCR, -1, -1, -1, -1, -1, {ACT_DEBUG} },
373 {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 367{RSP_ZAOC, -1, -1, -1, -1, -1, {ACT_DEBUG} },
374 368{RSP_ZCSTR, -1, -1, -1, -1, -1, {ACT_DEBUG} },
375 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, 369
376 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, 370{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
377 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, 371{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
378 {RSP_LAST} 372{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
373{RSP_LAST}
379}; 374};
380 375
381 376
382static const struct resp_type_t resp_type[] = 377static const struct resp_type_t {
378 unsigned char *response;
379 int resp_code;
380 int type;
381} resp_type[] =
383{ 382{
384 /*{"", RSP_EMPTY, RT_NOTHING},*/
385 {"OK", RSP_OK, RT_NOTHING}, 383 {"OK", RSP_OK, RT_NOTHING},
386 {"ERROR", RSP_ERROR, RT_NOTHING}, 384 {"ERROR", RSP_ERROR, RT_NOTHING},
387 {"ZSAU", RSP_ZSAU, RT_ZSAU}, 385 {"ZSAU", RSP_ZSAU, RT_ZSAU},
@@ -405,7 +403,21 @@ static const struct resp_type_t resp_type[] =
405 {"ZLOG", RSP_ZLOG, RT_NOTHING}, 403 {"ZLOG", RSP_ZLOG, RT_NOTHING},
406 {"ZABINFO", RSP_ZABINFO, RT_NOTHING}, 404 {"ZABINFO", RSP_ZABINFO, RT_NOTHING},
407 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING}, 405 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING},
408 {NULL,0,0} 406 {NULL, 0, 0}
407};
408
409static const struct zsau_resp_t {
410 unsigned char *str;
411 int code;
412} zsau_resp[] =
413{
414 {"OUTGOING_CALL_PROCEEDING", ZSAU_OUTGOING_CALL_PROCEEDING},
415 {"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
416 {"ACTIVE", ZSAU_ACTIVE},
417 {"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
418 {"NULL", ZSAU_NULL},
419 {"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
420 {NULL, ZSAU_UNKNOWN}
409}; 421};
410 422
411/* 423/*
@@ -470,7 +482,6 @@ static int cid_of_response(char *s)
470 if (cid < 1 || cid > 65535) 482 if (cid < 1 || cid > 65535)
471 return -1; /* CID out of range */ 483 return -1; /* CID out of range */
472 return cid; 484 return cid;
473 //FIXME is ;<digit>+ at end of non-CID response really impossible?
474} 485}
475 486
476/** 487/**
@@ -487,6 +498,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
487 int params; 498 int params;
488 int i, j; 499 int i, j;
489 const struct resp_type_t *rt; 500 const struct resp_type_t *rt;
501 const struct zsau_resp_t *zr;
490 int curarg; 502 int curarg;
491 unsigned long flags; 503 unsigned long flags;
492 unsigned next, tail, head; 504 unsigned next, tail, head;
@@ -613,24 +625,14 @@ void gigaset_handle_modem_response(struct cardstate *cs)
613 event->parameter = ZSAU_NONE; 625 event->parameter = ZSAU_NONE;
614 break; 626 break;
615 } 627 }
616 if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING")) 628 for (zr = zsau_resp; zr->str; ++zr)
617 event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING; 629 if (!strcmp(argv[curarg], zr->str))
618 else if (!strcmp(argv[curarg], "CALL_DELIVERED")) 630 break;
619 event->parameter = ZSAU_CALL_DELIVERED; 631 event->parameter = zr->code;
620 else if (!strcmp(argv[curarg], "ACTIVE")) 632 if (!zr->str)
621 event->parameter = ZSAU_ACTIVE;
622 else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
623 event->parameter = ZSAU_DISCONNECT_IND;
624 else if (!strcmp(argv[curarg], "NULL"))
625 event->parameter = ZSAU_NULL;
626 else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
627 event->parameter = ZSAU_DISCONNECT_REQ;
628 else {
629 event->parameter = ZSAU_UNKNOWN;
630 dev_warn(cs->dev, 633 dev_warn(cs->dev,
631 "%s: unknown parameter %s after ZSAU\n", 634 "%s: unknown parameter %s after ZSAU\n",
632 __func__, argv[curarg]); 635 __func__, argv[curarg]);
633 }
634 ++curarg; 636 ++curarg;
635 break; 637 break;
636 case RT_STRING: 638 case RT_STRING:
@@ -714,7 +716,7 @@ static void disconnect(struct at_state_t **at_state_p)
714 /* notify LL */ 716 /* notify LL */
715 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { 717 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
716 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); 718 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
717 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); 719 gigaset_isdn_hupD(bcs);
718 } 720 }
719 } else { 721 } else {
720 /* no B channel assigned: just deallocate */ 722 /* no B channel assigned: just deallocate */
@@ -872,12 +874,12 @@ static void bchannel_down(struct bc_state *bcs)
872{ 874{
873 if (bcs->chstate & CHS_B_UP) { 875 if (bcs->chstate & CHS_B_UP) {
874 bcs->chstate &= ~CHS_B_UP; 876 bcs->chstate &= ~CHS_B_UP;
875 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP); 877 gigaset_isdn_hupB(bcs);
876 } 878 }
877 879
878 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { 880 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
879 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); 881 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
880 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); 882 gigaset_isdn_hupD(bcs);
881 } 883 }
882 884
883 gigaset_free_channel(bcs); 885 gigaset_free_channel(bcs);
@@ -894,15 +896,17 @@ static void bchannel_up(struct bc_state *bcs)
894 } 896 }
895 897
896 bcs->chstate |= CHS_B_UP; 898 bcs->chstate |= CHS_B_UP;
897 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN); 899 gigaset_isdn_connB(bcs);
898} 900}
899 901
900static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_index) 902static void start_dial(struct at_state_t *at_state, void *data,
903 unsigned seq_index)
901{ 904{
902 struct bc_state *bcs = at_state->bcs; 905 struct bc_state *bcs = at_state->bcs;
903 struct cardstate *cs = at_state->cs; 906 struct cardstate *cs = at_state->cs;
904 int retval; 907 char **commands = data;
905 unsigned long flags; 908 unsigned long flags;
909 int i;
906 910
907 bcs->chstate |= CHS_NOTIFY_LL; 911 bcs->chstate |= CHS_NOTIFY_LL;
908 912
@@ -913,10 +917,10 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
913 } 917 }
914 spin_unlock_irqrestore(&cs->lock, flags); 918 spin_unlock_irqrestore(&cs->lock, flags);
915 919
916 retval = gigaset_isdn_setup_dial(at_state, data); 920 for (i = 0; i < AT_NUM; ++i) {
917 if (retval != 0) 921 kfree(bcs->commands[i]);
918 goto error; 922 bcs->commands[i] = commands[i];
919 923 }
920 924
921 at_state->pending_commands |= PC_CID; 925 at_state->pending_commands |= PC_CID;
922 gig_dbg(DEBUG_CMD, "Scheduling PC_CID"); 926 gig_dbg(DEBUG_CMD, "Scheduling PC_CID");
@@ -924,6 +928,10 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
924 return; 928 return;
925 929
926error: 930error:
931 for (i = 0; i < AT_NUM; ++i) {
932 kfree(commands[i]);
933 commands[i] = NULL;
934 }
927 at_state->pending_commands |= PC_NOCID; 935 at_state->pending_commands |= PC_NOCID;
928 gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID"); 936 gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID");
929 cs->commands_pending = 1; 937 cs->commands_pending = 1;
@@ -933,20 +941,31 @@ error:
933static void start_accept(struct at_state_t *at_state) 941static void start_accept(struct at_state_t *at_state)
934{ 942{
935 struct cardstate *cs = at_state->cs; 943 struct cardstate *cs = at_state->cs;
936 int retval; 944 struct bc_state *bcs = at_state->bcs;
945 int i;
937 946
938 retval = gigaset_isdn_setup_accept(at_state); 947 for (i = 0; i < AT_NUM; ++i) {
948 kfree(bcs->commands[i]);
949 bcs->commands[i] = NULL;
950 }
939 951
940 if (retval == 0) { 952 bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
941 at_state->pending_commands |= PC_ACCEPT; 953 bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
942 gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT"); 954 if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
943 cs->commands_pending = 1; 955 dev_err(at_state->cs->dev, "out of memory\n");
944 } else {
945 /* error reset */ 956 /* error reset */
946 at_state->pending_commands |= PC_HUP; 957 at_state->pending_commands |= PC_HUP;
947 gig_dbg(DEBUG_CMD, "Scheduling PC_HUP"); 958 gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
948 cs->commands_pending = 1; 959 cs->commands_pending = 1;
960 return;
949 } 961 }
962
963 snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
964 snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
965
966 at_state->pending_commands |= PC_ACCEPT;
967 gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
968 cs->commands_pending = 1;
950} 969}
951 970
952static void do_start(struct cardstate *cs) 971static void do_start(struct cardstate *cs)
@@ -957,9 +976,7 @@ static void do_start(struct cardstate *cs)
957 schedule_init(cs, MS_INIT); 976 schedule_init(cs, MS_INIT);
958 977
959 cs->isdn_up = 1; 978 cs->isdn_up = 1;
960 gigaset_i4l_cmd(cs, ISDN_STAT_RUN); 979 gigaset_isdn_start(cs);
961 // FIXME: not in locked mode
962 // FIXME 2: only after init sequence
963 980
964 cs->waiting = 0; 981 cs->waiting = 0;
965 wake_up(&cs->waitqueue); 982 wake_up(&cs->waitqueue);
@@ -975,7 +992,7 @@ static void finish_shutdown(struct cardstate *cs)
975 /* Tell the LL that the device is not available .. */ 992 /* Tell the LL that the device is not available .. */
976 if (cs->isdn_up) { 993 if (cs->isdn_up) {
977 cs->isdn_up = 0; 994 cs->isdn_up = 0;
978 gigaset_i4l_cmd(cs, ISDN_STAT_STOP); 995 gigaset_isdn_stop(cs);
979 } 996 }
980 997
981 /* The rest is done by cleanup_cs () in user mode. */ 998 /* The rest is done by cleanup_cs () in user mode. */
@@ -1113,7 +1130,6 @@ static int do_lock(struct cardstate *cs)
1113 1130
1114 break; 1131 break;
1115 case MS_LOCKED: 1132 case MS_LOCKED:
1116 //retval = -EACCES;
1117 break; 1133 break;
1118 default: 1134 default:
1119 return -EBUSY; 1135 return -EBUSY;
@@ -1276,7 +1292,7 @@ static void do_action(int action, struct cardstate *cs,
1276 break; 1292 break;
1277 } 1293 }
1278 bcs->chstate |= CHS_D_UP; 1294 bcs->chstate |= CHS_D_UP;
1279 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); 1295 gigaset_isdn_connD(bcs);
1280 cs->ops->init_bchannel(bcs); 1296 cs->ops->init_bchannel(bcs);
1281 break; 1297 break;
1282 case ACT_DLE1: 1298 case ACT_DLE1:
@@ -1284,7 +1300,7 @@ static void do_action(int action, struct cardstate *cs,
1284 bcs = cs->bcs + cs->curchannel; 1300 bcs = cs->bcs + cs->curchannel;
1285 1301
1286 bcs->chstate |= CHS_D_UP; 1302 bcs->chstate |= CHS_D_UP;
1287 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); 1303 gigaset_isdn_connD(bcs);
1288 cs->ops->init_bchannel(bcs); 1304 cs->ops->init_bchannel(bcs);
1289 break; 1305 break;
1290 case ACT_FAKEHUP: 1306 case ACT_FAKEHUP:
@@ -1369,7 +1385,7 @@ static void do_action(int action, struct cardstate *cs,
1369 cs->cur_at_seq = SEQ_NONE; 1385 cs->cur_at_seq = SEQ_NONE;
1370 break; 1386 break;
1371 1387
1372 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */ 1388 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
1373 disconnect(p_at_state); 1389 disconnect(p_at_state);
1374 break; 1390 break;
1375 1391
@@ -1443,17 +1459,6 @@ static void do_action(int action, struct cardstate *cs,
1443 __func__, at_state->ConState); 1459 __func__, at_state->ConState);
1444 cs->cur_at_seq = SEQ_NONE; 1460 cs->cur_at_seq = SEQ_NONE;
1445 break; 1461 break;
1446#ifdef CONFIG_GIGASET_DEBUG
1447 case ACT_TEST:
1448 {
1449 static int count = 3; //2; //1;
1450 *p_genresp = 1;
1451 *p_resp_code = count ? RSP_ERROR : RSP_OK;
1452 if (count > 0)
1453 --count;
1454 }
1455 break;
1456#endif
1457 case ACT_DEBUG: 1462 case ACT_DEBUG:
1458 gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d", 1463 gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
1459 __func__, ev->type, at_state->ConState); 1464 __func__, ev->type, at_state->ConState);
@@ -1474,11 +1479,6 @@ static void do_action(int action, struct cardstate *cs,
1474 case ACT_ACCEPT: 1479 case ACT_ACCEPT:
1475 start_accept(at_state); 1480 start_accept(at_state);
1476 break; 1481 break;
1477 case ACT_PROTO_L2:
1478 gig_dbg(DEBUG_CMD, "set protocol to %u",
1479 (unsigned) ev->parameter);
1480 at_state->bcs->proto2 = ev->parameter;
1481 break;
1482 case ACT_HUP: 1482 case ACT_HUP:
1483 at_state->pending_commands |= PC_HUP; 1483 at_state->pending_commands |= PC_HUP;
1484 cs->commands_pending = 1; 1484 cs->commands_pending = 1;
@@ -1493,7 +1493,7 @@ static void do_action(int action, struct cardstate *cs,
1493 do_start(cs); 1493 do_start(cs);
1494 break; 1494 break;
1495 1495
1496 /* events from the interface */ // FIXME without ACT_xxxx? 1496 /* events from the interface */
1497 case ACT_IF_LOCK: 1497 case ACT_IF_LOCK:
1498 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); 1498 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1499 cs->waiting = 0; 1499 cs->waiting = 0;
@@ -1512,7 +1512,7 @@ static void do_action(int action, struct cardstate *cs,
1512 wake_up(&cs->waitqueue); 1512 wake_up(&cs->waitqueue);
1513 break; 1513 break;
1514 1514
1515 /* events from the proc file system */ // FIXME without ACT_xxxx? 1515 /* events from the proc file system */
1516 case ACT_PROC_CIDMODE: 1516 case ACT_PROC_CIDMODE:
1517 spin_lock_irqsave(&cs->lock, flags); 1517 spin_lock_irqsave(&cs->lock, flags);
1518 if (ev->parameter != cs->cidmode) { 1518 if (ev->parameter != cs->cidmode) {
@@ -1649,7 +1649,8 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
1649 for (curact = 0; curact < MAXACT; ++curact) { 1649 for (curact = 0; curact < MAXACT; ++curact) {
1650 /* The row tells us what we should do .. 1650 /* The row tells us what we should do ..
1651 */ 1651 */
1652 do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev); 1652 do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
1653 &genresp, &resp_code, ev);
1653 if (!at_state) 1654 if (!at_state)
1654 break; /* may be freed after disconnect */ 1655 break; /* may be freed after disconnect */
1655 } 1656 }
@@ -1661,13 +1662,14 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
1661 1662
1662 if (genresp) { 1663 if (genresp) {
1663 spin_lock_irqsave(&cs->lock, flags); 1664 spin_lock_irqsave(&cs->lock, flags);
1664 at_state->timer_expires = 0; //FIXME 1665 at_state->timer_expires = 0;
1665 at_state->timer_active = 0; //FIXME 1666 at_state->timer_active = 0;
1666 spin_unlock_irqrestore(&cs->lock, flags); 1667 spin_unlock_irqrestore(&cs->lock, flags);
1667 gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL); 1668 gigaset_add_event(cs, at_state, resp_code,
1669 NULL, 0, NULL);
1668 } else { 1670 } else {
1669 /* Send command to modem if not NULL... */ 1671 /* Send command to modem if not NULL... */
1670 if (p_command/*rep->command*/) { 1672 if (p_command) {
1671 if (cs->connected) 1673 if (cs->connected)
1672 send_command(cs, p_command, 1674 send_command(cs, p_command,
1673 sendcid, cs->dle, 1675 sendcid, cs->dle,
@@ -1754,7 +1756,8 @@ static void process_command_flags(struct cardstate *cs)
1754 } 1756 }
1755 } 1757 }
1756 1758
1757 /* only switch back to unimodem mode, if no commands are pending and no channels are up */ 1759 /* only switch back to unimodem mode if no commands are pending and
1760 * no channels are up */
1758 spin_lock_irqsave(&cs->lock, flags); 1761 spin_lock_irqsave(&cs->lock, flags);
1759 if (cs->at_state.pending_commands == PC_UMMODE 1762 if (cs->at_state.pending_commands == PC_UMMODE
1760 && !cs->cidmode 1763 && !cs->cidmode
@@ -1813,9 +1816,8 @@ static void process_command_flags(struct cardstate *cs)
1813 1816
1814 if (cs->at_state.pending_commands & PC_INIT) { 1817 if (cs->at_state.pending_commands & PC_INIT) {
1815 cs->at_state.pending_commands &= ~PC_INIT; 1818 cs->at_state.pending_commands &= ~PC_INIT;
1816 cs->dle = 0; //FIXME 1819 cs->dle = 0;
1817 cs->inbuf->inputstate = INS_command; 1820 cs->inbuf->inputstate = INS_command;
1818 //FIXME reset card state (or -> LOCK0)?
1819 schedule_sequence(cs, &cs->at_state, SEQ_INIT); 1821 schedule_sequence(cs, &cs->at_state, SEQ_INIT);
1820 return; 1822 return;
1821 } 1823 }
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index a2f6125739eb..e963a6c2e86d 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -23,7 +23,6 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/isdnif.h>
27#include <linux/usb.h> 26#include <linux/usb.h>
28#include <linux/skbuff.h> 27#include <linux/skbuff.h>
29#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -35,12 +34,11 @@
35#include <linux/list.h> 34#include <linux/list.h>
36#include <asm/atomic.h> 35#include <asm/atomic.h>
37 36
38#define GIG_VERSION {0,5,0,0} 37#define GIG_VERSION {0, 5, 0, 0}
39#define GIG_COMPAT {0,4,0,0} 38#define GIG_COMPAT {0, 4, 0, 0}
40 39
41#define MAX_REC_PARAMS 10 /* Max. number of params in response string */ 40#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
42#define MAX_RESP_SIZE 512 /* Max. size of a response string */ 41#define MAX_RESP_SIZE 512 /* Max. size of a response string */
43#define HW_HDR_LEN 2 /* Header size used to store ack info */
44 42
45#define MAX_EVENTS 64 /* size of event queue */ 43#define MAX_EVENTS 64 /* size of event queue */
46 44
@@ -135,35 +133,32 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
135#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) 133#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
136#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) 134#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
137 135
138/* int-in-events 3070 */ 136/* interrupt pipe messages */
139#define HD_B1_FLOW_CONTROL 0x80 137#define HD_B1_FLOW_CONTROL 0x80
140#define HD_B2_FLOW_CONTROL 0x81 138#define HD_B2_FLOW_CONTROL 0x81
141#define HD_RECEIVEATDATA_ACK (0x35) // 3070 139#define HD_RECEIVEATDATA_ACK (0x35) /* 3070 */
142 // att: HD_RECEIVE>>AT<<DATA_ACK 140#define HD_READY_SEND_ATDATA (0x36) /* 3070 */
143#define HD_READY_SEND_ATDATA (0x36) // 3070 141#define HD_OPEN_ATCHANNEL_ACK (0x37) /* 3070 */
144#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070 142#define HD_CLOSE_ATCHANNEL_ACK (0x38) /* 3070 */
145#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070 143#define HD_DEVICE_INIT_OK (0x11) /* ISurf USB + 3070 */
146#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070 144#define HD_OPEN_B1CHANNEL_ACK (0x51) /* ISurf USB + 3070 */
147#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070 145#define HD_OPEN_B2CHANNEL_ACK (0x52) /* ISurf USB + 3070 */
148#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070 146#define HD_CLOSE_B1CHANNEL_ACK (0x53) /* ISurf USB + 3070 */
149#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070 147#define HD_CLOSE_B2CHANNEL_ACK (0x54) /* ISurf USB + 3070 */
150#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070 148#define HD_SUSPEND_END (0x61) /* ISurf USB */
151// Powermangment 149#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) /* ISurf USB + 3070 */
152#define HD_SUSPEND_END (0x61) // ISurf USB 150
153// Configuration 151/* control requests */
154#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070 152#define HD_OPEN_B1CHANNEL (0x23) /* ISurf USB + 3070 */
155 153#define HD_CLOSE_B1CHANNEL (0x24) /* ISurf USB + 3070 */
156/* control requests 3070 */ 154#define HD_OPEN_B2CHANNEL (0x25) /* ISurf USB + 3070 */
157#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070 155#define HD_CLOSE_B2CHANNEL (0x26) /* ISurf USB + 3070 */
158#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070 156#define HD_RESET_INTERRUPT_PIPE (0x27) /* ISurf USB + 3070 */
159#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070 157#define HD_DEVICE_INIT_ACK (0x34) /* ISurf USB + 3070 */
160#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070 158#define HD_WRITE_ATMESSAGE (0x12) /* 3070 */
161#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070 159#define HD_READ_ATMESSAGE (0x13) /* 3070 */
162#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070 160#define HD_OPEN_ATCHANNEL (0x28) /* 3070 */
163#define HD_WRITE_ATMESSAGE (0x12) // 3070 161#define HD_CLOSE_ATCHANNEL (0x29) /* 3070 */
164#define HD_READ_ATMESSAGE (0x13) // 3070
165#define HD_OPEN_ATCHANNEL (0x28) // 3070
166#define HD_CLOSE_ATCHANNEL (0x29) // 3070
167 162
168/* number of B channels supported by base driver */ 163/* number of B channels supported by base driver */
169#define BAS_CHANNELS 2 164#define BAS_CHANNELS 2
@@ -193,7 +188,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
193#define AT_PROTO 4 188#define AT_PROTO 4
194#define AT_TYPE 5 189#define AT_TYPE 5
195#define AT_HLC 6 190#define AT_HLC 6
196#define AT_NUM 7 191#define AT_CLIP 7
192/* total number */
193#define AT_NUM 8
197 194
198/* variables in struct at_state_t */ 195/* variables in struct at_state_t */
199#define VAR_ZSAU 0 196#define VAR_ZSAU 0
@@ -216,7 +213,6 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
216#define EV_START -110 213#define EV_START -110
217#define EV_STOP -111 214#define EV_STOP -111
218#define EV_IF_LOCK -112 215#define EV_IF_LOCK -112
219#define EV_PROTO_L2 -113
220#define EV_ACCEPT -114 216#define EV_ACCEPT -114
221#define EV_DIAL -115 217#define EV_DIAL -115
222#define EV_HUP -116 218#define EV_HUP -116
@@ -224,12 +220,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
224#define EV_BC_CLOSED -118 220#define EV_BC_CLOSED -118
225 221
226/* input state */ 222/* input state */
227#define INS_command 0x0001 223#define INS_command 0x0001 /* receiving messages (not payload data) */
228#define INS_DLE_char 0x0002 224#define INS_DLE_char 0x0002 /* DLE flag received (in DLE mode) */
229#define INS_byte_stuff 0x0004 225#define INS_byte_stuff 0x0004
230#define INS_have_data 0x0008 226#define INS_have_data 0x0008
231#define INS_skip_frame 0x0010 227#define INS_DLE_command 0x0020 /* DLE message start (<DLE> X) received */
232#define INS_DLE_command 0x0020
233#define INS_flag_hunt 0x0040 228#define INS_flag_hunt 0x0040
234 229
235/* channel state */ 230/* channel state */
@@ -259,6 +254,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
259#define SM_LOCKED 0 254#define SM_LOCKED 0
260#define SM_ISDN 1 /* default */ 255#define SM_ISDN 1 /* default */
261 256
257/* layer 2 protocols (AT^SBPR=...) */
258#define L2_BITSYNC 0
259#define L2_HDLC 1
260#define L2_VOICE 2
261
262struct gigaset_ops; 262struct gigaset_ops;
263struct gigaset_driver; 263struct gigaset_driver;
264 264
@@ -286,8 +286,6 @@ extern struct reply_t gigaset_tab_cid[];
286extern struct reply_t gigaset_tab_nocid[]; 286extern struct reply_t gigaset_tab_nocid[];
287 287
288struct inbuf_t { 288struct inbuf_t {
289 unsigned char *rcvbuf; /* usb-gigaset receive buffer */
290 struct bc_state *bcs;
291 struct cardstate *cs; 289 struct cardstate *cs;
292 int inputstate; 290 int inputstate;
293 int head, tail; 291 int head, tail;
@@ -359,12 +357,6 @@ struct at_state_t {
359 struct bc_state *bcs; 357 struct bc_state *bcs;
360}; 358};
361 359
362struct resp_type_t {
363 unsigned char *response;
364 int resp_code; /* RSP_XXXX */
365 int type; /* RT_XXXX */
366};
367
368struct event_t { 360struct event_t {
369 int type; 361 int type;
370 void *ptr, *arg; 362 void *ptr, *arg;
@@ -395,7 +387,7 @@ struct bc_state {
395 387
396 unsigned chstate; /* bitmap (CHS_*) */ 388 unsigned chstate; /* bitmap (CHS_*) */
397 int ignore; 389 int ignore;
398 unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */ 390 unsigned proto2; /* layer 2 protocol (L2_*) */
399 char *commands[AT_NUM]; /* see AT_XXXX */ 391 char *commands[AT_NUM]; /* see AT_XXXX */
400 392
401#ifdef CONFIG_GIGASET_DEBUG 393#ifdef CONFIG_GIGASET_DEBUG
@@ -410,6 +402,8 @@ struct bc_state {
410 struct usb_bc_state *usb; /* usb hardware driver (m105) */ 402 struct usb_bc_state *usb; /* usb hardware driver (m105) */
411 struct bas_bc_state *bas; /* usb hardware driver (base) */ 403 struct bas_bc_state *bas; /* usb hardware driver (base) */
412 } hw; 404 } hw;
405
406 void *ap; /* LL application structure */
413}; 407};
414 408
415struct cardstate { 409struct cardstate {
@@ -456,12 +450,13 @@ struct cardstate {
456 450
457 unsigned running; /* !=0 if events are handled */ 451 unsigned running; /* !=0 if events are handled */
458 unsigned connected; /* !=0 if hardware is connected */ 452 unsigned connected; /* !=0 if hardware is connected */
459 unsigned isdn_up; /* !=0 after ISDN_STAT_RUN */ 453 unsigned isdn_up; /* !=0 after gigaset_isdn_start() */
460 454
461 unsigned cidmode; 455 unsigned cidmode;
462 456
463 int myid; /* id for communication with LL */ 457 int myid; /* id for communication with LL */
464 isdn_if iif; 458 void *iif; /* LL interface structure */
459 unsigned short hw_hdr_len; /* headroom needed in data skbs */
465 460
466 struct reply_t *tabnocid; 461 struct reply_t *tabnocid;
467 struct reply_t *tabcid; 462 struct reply_t *tabcid;
@@ -476,8 +471,8 @@ struct cardstate {
476 471
477 struct timer_list timer; 472 struct timer_list timer;
478 int retry_count; 473 int retry_count;
479 int dle; /* !=0 if modem commands/responses are 474 int dle; /* !=0 if DLE mode is active
480 dle encoded */ 475 (ZDLE=1 received -- M10x only) */
481 int cur_at_seq; /* sequence of AT commands being 476 int cur_at_seq; /* sequence of AT commands being
482 processed */ 477 processed */
483 int curchannel; /* channel those commands are meant 478 int curchannel; /* channel those commands are meant
@@ -616,7 +611,9 @@ struct gigaset_ops {
616 int (*baud_rate)(struct cardstate *cs, unsigned cflag); 611 int (*baud_rate)(struct cardstate *cs, unsigned cflag);
617 int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag); 612 int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
618 613
619 /* Called from i4l.c to put an skb into the send-queue. */ 614 /* Called from LL interface to put an skb into the send-queue.
615 * After sending is completed, gigaset_skb_sent() must be called
616 * with the skb's link layer header preserved. */
620 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb); 617 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
621 618
622 /* Called from ev-layer.c to process a block of data 619 /* Called from ev-layer.c to process a block of data
@@ -625,7 +622,8 @@ struct gigaset_ops {
625 622
626}; 623};
627 624
628/* = Common structures and definitions ======================================= */ 625/* = Common structures and definitions =======================================
626 */
629 627
630/* Parser states for DLE-Event: 628/* Parser states for DLE-Event:
631 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "." 629 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
@@ -638,8 +636,7 @@ struct gigaset_ops {
638 * Functions implemented in asyncdata.c 636 * Functions implemented in asyncdata.c
639 */ 637 */
640 638
641/* Called from i4l.c to put an skb into the send-queue. 639/* Called from LL interface to put an skb into the send queue. */
642 * After sending gigaset_skb_sent() should be called. */
643int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb); 640int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
644 641
645/* Called from ev-layer.c to process a block of data 642/* Called from ev-layer.c to process a block of data
@@ -650,8 +647,7 @@ void gigaset_m10x_input(struct inbuf_t *inbuf);
650 * Functions implemented in isocdata.c 647 * Functions implemented in isocdata.c
651 */ 648 */
652 649
653/* Called from i4l.c to put an skb into the send-queue. 650/* Called from LL interface to put an skb into the send queue. */
654 * After sending gigaset_skb_sent() should be called. */
655int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb); 651int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
656 652
657/* Called from ev-layer.c to process a block of data 653/* Called from ev-layer.c to process a block of data
@@ -674,36 +670,26 @@ void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
674int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size); 670int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
675 671
676/* =========================================================================== 672/* ===========================================================================
677 * Functions implemented in i4l.c/gigaset.h 673 * Functions implemented in LL interface
678 */ 674 */
679 675
680/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */ 676/* Called from common.c for setting up/shutting down with the ISDN subsystem */
681int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid); 677int gigaset_isdn_register(struct cardstate *cs, const char *isdnid);
678void gigaset_isdn_unregister(struct cardstate *cs);
682 679
683/* Called from xxx-gigaset.c to indicate completion of sending an skb */ 680/* Called from hardware module to indicate completion of an skb */
684void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); 681void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
682void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
683void gigaset_isdn_rcv_err(struct bc_state *bcs);
685 684
686/* Called from common.c/ev-layer.c to indicate events relevant to the LL */ 685/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
686void gigaset_isdn_start(struct cardstate *cs);
687void gigaset_isdn_stop(struct cardstate *cs);
687int gigaset_isdn_icall(struct at_state_t *at_state); 688int gigaset_isdn_icall(struct at_state_t *at_state);
688int gigaset_isdn_setup_accept(struct at_state_t *at_state); 689void gigaset_isdn_connD(struct bc_state *bcs);
689int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data); 690void gigaset_isdn_hupD(struct bc_state *bcs);
690 691void gigaset_isdn_connB(struct bc_state *bcs);
691void gigaset_i4l_cmd(struct cardstate *cs, int cmd); 692void gigaset_isdn_hupB(struct bc_state *bcs);
692void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd);
693
694
695static inline void gigaset_isdn_rcv_err(struct bc_state *bcs)
696{
697 isdn_ctrl response;
698
699 /* error -> LL */
700 gig_dbg(DEBUG_CMD, "sending L1ERR");
701 response.driver = bcs->cs->myid;
702 response.command = ISDN_STAT_L1ERR;
703 response.arg = bcs->channel;
704 response.parm.errcode = ISDN_STAT_L1ERR_RECV;
705 bcs->cs->iif.statcallb(&response);
706}
707 693
708/* =========================================================================== 694/* ===========================================================================
709 * Functions implemented in ev-layer.c 695 * Functions implemented in ev-layer.c
@@ -732,6 +718,7 @@ void gigaset_bcs_reinit(struct bc_state *bcs);
732void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, 718void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
733 struct cardstate *cs, int cid); 719 struct cardstate *cs, int cid);
734int gigaset_get_channel(struct bc_state *bcs); 720int gigaset_get_channel(struct bc_state *bcs);
721struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
735void gigaset_free_channel(struct bc_state *bcs); 722void gigaset_free_channel(struct bc_state *bcs);
736int gigaset_get_channels(struct cardstate *cs); 723int gigaset_get_channels(struct cardstate *cs);
737void gigaset_free_channels(struct cardstate *cs); 724void gigaset_free_channels(struct cardstate *cs);
@@ -781,7 +768,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
781 void *ptr, int parameter, void *arg); 768 void *ptr, int parameter, void *arg);
782 769
783/* Called on CONFIG1 command from frontend. */ 770/* Called on CONFIG1 command from frontend. */
784int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode 771int gigaset_enterconfigmode(struct cardstate *cs);
785 772
786/* cs->lock must not be locked */ 773/* cs->lock must not be locked */
787static inline void gigaset_schedule_event(struct cardstate *cs) 774static inline void gigaset_schedule_event(struct cardstate *cs)
@@ -816,35 +803,6 @@ static inline void gigaset_bchannel_up(struct bc_state *bcs)
816/* handling routines for sk_buff */ 803/* handling routines for sk_buff */
817/* ============================= */ 804/* ============================= */
818 805
819/* pass received skb to LL
820 * Warning: skb must not be accessed anymore!
821 */
822static inline void gigaset_rcv_skb(struct sk_buff *skb,
823 struct cardstate *cs,
824 struct bc_state *bcs)
825{
826 cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb);
827 bcs->trans_down++;
828}
829
830/* handle reception of corrupted skb
831 * Warning: skb must not be accessed anymore!
832 */
833static inline void gigaset_rcv_error(struct sk_buff *procskb,
834 struct cardstate *cs,
835 struct bc_state *bcs)
836{
837 if (procskb)
838 dev_kfree_skb(procskb);
839
840 if (bcs->ignore)
841 --bcs->ignore;
842 else {
843 ++bcs->corrupted;
844 gigaset_isdn_rcv_err(bcs);
845 }
846}
847
848/* append received bytes to inbuf */ 806/* append received bytes to inbuf */
849int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 807int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
850 unsigned numbytes); 808 unsigned numbytes);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 654489d836cd..c129ee47a8fb 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -14,6 +14,9 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/isdnif.h>
18
19#define HW_HDR_LEN 2 /* Header size used to store ack info */
17 20
18/* == Handling of I4L IO =====================================================*/ 21/* == Handling of I4L IO =====================================================*/
19 22
@@ -36,12 +39,12 @@
36static int writebuf_from_LL(int driverID, int channel, int ack, 39static int writebuf_from_LL(int driverID, int channel, int ack,
37 struct sk_buff *skb) 40 struct sk_buff *skb)
38{ 41{
39 struct cardstate *cs; 42 struct cardstate *cs = gigaset_get_cs_by_id(driverID);
40 struct bc_state *bcs; 43 struct bc_state *bcs;
44 unsigned char *ack_header;
41 unsigned len; 45 unsigned len;
42 unsigned skblen;
43 46
44 if (!(cs = gigaset_get_cs_by_id(driverID))) { 47 if (!cs) {
45 pr_err("%s: invalid driver ID (%d)\n", __func__, driverID); 48 pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
46 return -ENODEV; 49 return -ENODEV;
47 } 50 }
@@ -75,11 +78,23 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
75 return -EINVAL; 78 return -EINVAL;
76 } 79 }
77 80
78 skblen = ack ? len : 0; 81 /* set up acknowledgement header */
79 skb->head[0] = skblen & 0xff; 82 if (skb_headroom(skb) < HW_HDR_LEN) {
80 skb->head[1] = skblen >> 8; 83 /* should never happen */
81 gig_dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", 84 dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
82 len, skblen, (unsigned) skb->head[0], (unsigned) skb->head[1]); 85 return -ENOMEM;
86 }
87 skb_set_mac_header(skb, -HW_HDR_LEN);
88 skb->mac_len = HW_HDR_LEN;
89 ack_header = skb_mac_header(skb);
90 if (ack) {
91 ack_header[0] = len & 0xff;
92 ack_header[1] = len >> 8;
93 } else {
94 ack_header[0] = ack_header[1] = 0;
95 }
96 gig_dbg(DEBUG_MCMD, "skb: len=%u, ack=%d: %02x %02x",
97 len, ack, ack_header[0], ack_header[1]);
83 98
84 /* pass to device-specific module */ 99 /* pass to device-specific module */
85 return cs->ops->send_skb(bcs, skb); 100 return cs->ops->send_skb(bcs, skb);
@@ -95,6 +110,8 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
95 */ 110 */
96void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) 111void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
97{ 112{
113 isdn_if *iif = bcs->cs->iif;
114 unsigned char *ack_header = skb_mac_header(skb);
98 unsigned len; 115 unsigned len;
99 isdn_ctrl response; 116 isdn_ctrl response;
100 117
@@ -104,8 +121,7 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
104 dev_warn(bcs->cs->dev, "%s: skb->len==%d\n", 121 dev_warn(bcs->cs->dev, "%s: skb->len==%d\n",
105 __func__, skb->len); 122 __func__, skb->len);
106 123
107 len = (unsigned char) skb->head[0] | 124 len = ack_header[0] + ((unsigned) ack_header[1] << 8);
108 (unsigned) (unsigned char) skb->head[1] << 8;
109 if (len) { 125 if (len) {
110 gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)", 126 gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)",
111 bcs->cs->myid, bcs->channel, len); 127 bcs->cs->myid, bcs->channel, len);
@@ -114,71 +130,177 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
114 response.command = ISDN_STAT_BSENT; 130 response.command = ISDN_STAT_BSENT;
115 response.arg = bcs->channel; 131 response.arg = bcs->channel;
116 response.parm.length = len; 132 response.parm.length = len;
117 bcs->cs->iif.statcallb(&response); 133 iif->statcallb(&response);
118 } 134 }
119} 135}
120EXPORT_SYMBOL_GPL(gigaset_skb_sent); 136EXPORT_SYMBOL_GPL(gigaset_skb_sent);
121 137
138/**
139 * gigaset_skb_rcvd() - pass received skb to LL
140 * @bcs: B channel descriptor structure.
141 * @skb: received data.
142 *
143 * Called by hardware module {bas,ser,usb}_gigaset when user data has
144 * been successfully received, for passing to the LL.
145 * Warning: skb must not be accessed anymore!
146 */
147void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
148{
149 isdn_if *iif = bcs->cs->iif;
150
151 iif->rcvcallb_skb(bcs->cs->myid, bcs->channel, skb);
152 bcs->trans_down++;
153}
154EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
155
156/**
157 * gigaset_isdn_rcv_err() - signal receive error
158 * @bcs: B channel descriptor structure.
159 *
160 * Called by hardware module {bas,ser,usb}_gigaset when a receive error
161 * has occurred, for signalling to the LL.
162 */
163void gigaset_isdn_rcv_err(struct bc_state *bcs)
164{
165 isdn_if *iif = bcs->cs->iif;
166 isdn_ctrl response;
167
168 /* if currently ignoring packets, just count down */
169 if (bcs->ignore) {
170 bcs->ignore--;
171 return;
172 }
173
174 /* update statistics */
175 bcs->corrupted++;
176
177 /* error -> LL */
178 gig_dbg(DEBUG_CMD, "sending L1ERR");
179 response.driver = bcs->cs->myid;
180 response.command = ISDN_STAT_L1ERR;
181 response.arg = bcs->channel;
182 response.parm.errcode = ISDN_STAT_L1ERR_RECV;
183 iif->statcallb(&response);
184}
185EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
186
122/* This function will be called by LL to send commands 187/* This function will be called by LL to send commands
123 * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL, 188 * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
124 * so don't put too much effort into it. 189 * so don't put too much effort into it.
125 */ 190 */
126static int command_from_LL(isdn_ctrl *cntrl) 191static int command_from_LL(isdn_ctrl *cntrl)
127{ 192{
128 struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver); 193 struct cardstate *cs;
129 struct bc_state *bcs; 194 struct bc_state *bcs;
130 int retval = 0; 195 int retval = 0;
131 struct setup_parm *sp; 196 char **commands;
197 int ch;
198 int i;
199 size_t l;
132 200
133 gigaset_debugdrivers(); 201 gigaset_debugdrivers();
134 202
135 if (!cs) { 203 gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx",
204 cntrl->driver, cntrl->command, cntrl->arg);
205
206 cs = gigaset_get_cs_by_id(cntrl->driver);
207 if (cs == NULL) {
136 pr_err("%s: invalid driver ID (%d)\n", __func__, cntrl->driver); 208 pr_err("%s: invalid driver ID (%d)\n", __func__, cntrl->driver);
137 return -ENODEV; 209 return -ENODEV;
138 } 210 }
211 ch = cntrl->arg & 0xff;
139 212
140 switch (cntrl->command) { 213 switch (cntrl->command) {
141 case ISDN_CMD_IOCTL: 214 case ISDN_CMD_IOCTL:
142 gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)",
143 cntrl->driver, cntrl->arg);
144
145 dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n"); 215 dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n");
146 return -EINVAL; 216 return -EINVAL;
147 217
148 case ISDN_CMD_DIAL: 218 case ISDN_CMD_DIAL:
149 gig_dbg(DEBUG_ANY, 219 gig_dbg(DEBUG_ANY,
150 "ISDN_CMD_DIAL (driver: %d, ch: %ld, " 220 "ISDN_CMD_DIAL (phone: %s, msn: %s, si1: %d, si2: %d)",
151 "phone: %s, ownmsn: %s, si1: %d, si2: %d)",
152 cntrl->driver, cntrl->arg,
153 cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn, 221 cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
154 cntrl->parm.setup.si1, cntrl->parm.setup.si2); 222 cntrl->parm.setup.si1, cntrl->parm.setup.si2);
155 223
156 if (cntrl->arg >= cs->channels) { 224 if (ch >= cs->channels) {
157 dev_err(cs->dev, 225 dev_err(cs->dev,
158 "ISDN_CMD_DIAL: invalid channel (%d)\n", 226 "ISDN_CMD_DIAL: invalid channel (%d)\n", ch);
159 (int) cntrl->arg);
160 return -EINVAL; 227 return -EINVAL;
161 } 228 }
162 229 bcs = cs->bcs + ch;
163 bcs = cs->bcs + cntrl->arg;
164
165 if (!gigaset_get_channel(bcs)) { 230 if (!gigaset_get_channel(bcs)) {
166 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); 231 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
167 return -EBUSY; 232 return -EBUSY;
168 } 233 }
169 234
170 sp = kmalloc(sizeof *sp, GFP_ATOMIC); 235 commands = kzalloc(AT_NUM*(sizeof *commands), GFP_ATOMIC);
171 if (!sp) { 236 if (!commands) {
172 gigaset_free_channel(bcs); 237 gigaset_free_channel(bcs);
173 dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n"); 238 dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
174 return -ENOMEM; 239 return -ENOMEM;
175 } 240 }
176 *sp = cntrl->parm.setup;
177 241
178 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp, 242 l = 3 + strlen(cntrl->parm.setup.phone);
243 commands[AT_DIAL] = kmalloc(l, GFP_ATOMIC);
244 if (!commands[AT_DIAL])
245 goto oom;
246 if (cntrl->parm.setup.phone[0] == '*' &&
247 cntrl->parm.setup.phone[1] == '*') {
248 /* internal call: translate ** prefix to CTP value */
249 commands[AT_TYPE] = kstrdup("^SCTP=0\r", GFP_ATOMIC);
250 if (!commands[AT_TYPE])
251 goto oom;
252 snprintf(commands[AT_DIAL], l,
253 "D%s\r", cntrl->parm.setup.phone+2);
254 } else {
255 commands[AT_TYPE] = kstrdup("^SCTP=1\r", GFP_ATOMIC);
256 if (!commands[AT_TYPE])
257 goto oom;
258 snprintf(commands[AT_DIAL], l,
259 "D%s\r", cntrl->parm.setup.phone);
260 }
261
262 l = strlen(cntrl->parm.setup.eazmsn);
263 if (l) {
264 l += 8;
265 commands[AT_MSN] = kmalloc(l, GFP_ATOMIC);
266 if (!commands[AT_MSN])
267 goto oom;
268 snprintf(commands[AT_MSN], l, "^SMSN=%s\r",
269 cntrl->parm.setup.eazmsn);
270 }
271
272 switch (cntrl->parm.setup.si1) {
273 case 1: /* audio */
274 /* BC = 9090A3: 3.1 kHz audio, A-law */
275 commands[AT_BC] = kstrdup("^SBC=9090A3\r", GFP_ATOMIC);
276 if (!commands[AT_BC])
277 goto oom;
278 break;
279 case 7: /* data */
280 default: /* hope the app knows what it is doing */
281 /* BC = 8890: unrestricted digital information */
282 commands[AT_BC] = kstrdup("^SBC=8890\r", GFP_ATOMIC);
283 if (!commands[AT_BC])
284 goto oom;
285 }
286 /* ToDo: other si1 values, inspect si2, set HLC/LLC */
287
288 commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
289 if (!commands[AT_PROTO])
290 goto oom;
291 snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
292
293 commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
294 if (!commands[AT_ISO])
295 goto oom;
296 snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
297 (unsigned) bcs->channel + 1);
298
299 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
179 bcs->at_state.seq_index, NULL)) { 300 bcs->at_state.seq_index, NULL)) {
180 //FIXME what should we do? 301 for (i = 0; i < AT_NUM; ++i)
181 kfree(sp); 302 kfree(commands[i]);
303 kfree(commands);
182 gigaset_free_channel(bcs); 304 gigaset_free_channel(bcs);
183 return -ENOMEM; 305 return -ENOMEM;
184 } 306 }
@@ -186,115 +308,102 @@ static int command_from_LL(isdn_ctrl *cntrl)
186 gig_dbg(DEBUG_CMD, "scheduling DIAL"); 308 gig_dbg(DEBUG_CMD, "scheduling DIAL");
187 gigaset_schedule_event(cs); 309 gigaset_schedule_event(cs);
188 break; 310 break;
189 case ISDN_CMD_ACCEPTD: //FIXME 311 case ISDN_CMD_ACCEPTD:
190 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD"); 312 if (ch >= cs->channels) {
191
192 if (cntrl->arg >= cs->channels) {
193 dev_err(cs->dev, 313 dev_err(cs->dev,
194 "ISDN_CMD_ACCEPTD: invalid channel (%d)\n", 314 "ISDN_CMD_ACCEPTD: invalid channel (%d)\n", ch);
195 (int) cntrl->arg);
196 return -EINVAL; 315 return -EINVAL;
197 } 316 }
198 317 bcs = cs->bcs + ch;
199 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, 318 if (!gigaset_add_event(cs, &bcs->at_state,
200 EV_ACCEPT, NULL, 0, NULL)) { 319 EV_ACCEPT, NULL, 0, NULL))
201 //FIXME what should we do?
202 return -ENOMEM; 320 return -ENOMEM;
203 }
204 321
205 gig_dbg(DEBUG_CMD, "scheduling ACCEPT"); 322 gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
206 gigaset_schedule_event(cs); 323 gigaset_schedule_event(cs);
207 324
208 break; 325 break;
209 case ISDN_CMD_ACCEPTB: 326 case ISDN_CMD_ACCEPTB:
210 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB");
211 break; 327 break;
212 case ISDN_CMD_HANGUP: 328 case ISDN_CMD_HANGUP:
213 gig_dbg(DEBUG_ANY, "ISDN_CMD_HANGUP (ch: %d)", 329 if (ch >= cs->channels) {
214 (int) cntrl->arg);
215
216 if (cntrl->arg >= cs->channels) {
217 dev_err(cs->dev, 330 dev_err(cs->dev,
218 "ISDN_CMD_HANGUP: invalid channel (%d)\n", 331 "ISDN_CMD_HANGUP: invalid channel (%d)\n", ch);
219 (int) cntrl->arg);
220 return -EINVAL; 332 return -EINVAL;
221 } 333 }
222 334 bcs = cs->bcs + ch;
223 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, 335 if (!gigaset_add_event(cs, &bcs->at_state,
224 EV_HUP, NULL, 0, NULL)) { 336 EV_HUP, NULL, 0, NULL))
225 //FIXME what should we do?
226 return -ENOMEM; 337 return -ENOMEM;
227 }
228 338
229 gig_dbg(DEBUG_CMD, "scheduling HUP"); 339 gig_dbg(DEBUG_CMD, "scheduling HUP");
230 gigaset_schedule_event(cs); 340 gigaset_schedule_event(cs);
231 341
232 break; 342 break;
233 case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME 343 case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */
234 gig_dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ"); 344 dev_info(cs->dev, "ignoring ISDN_CMD_CLREAZ\n");
235 break; 345 break;
236 case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME 346 case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */
237 gig_dbg(DEBUG_ANY, 347 dev_info(cs->dev, "ignoring ISDN_CMD_SETEAZ (%s)\n",
238 "ISDN_CMD_SETEAZ (id: %d, ch: %ld, number: %s)", 348 cntrl->parm.num);
239 cntrl->driver, cntrl->arg, cntrl->parm.num);
240 break; 349 break;
241 case ISDN_CMD_SETL2: /* Set L2 to given protocol */ 350 case ISDN_CMD_SETL2: /* Set L2 to given protocol */
242 gig_dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (ch: %ld, proto: %lx)", 351 if (ch >= cs->channels) {
243 cntrl->arg & 0xff, (cntrl->arg >> 8));
244
245 if ((cntrl->arg & 0xff) >= cs->channels) {
246 dev_err(cs->dev, 352 dev_err(cs->dev,
247 "ISDN_CMD_SETL2: invalid channel (%d)\n", 353 "ISDN_CMD_SETL2: invalid channel (%d)\n", ch);
248 (int) cntrl->arg & 0xff);
249 return -EINVAL; 354 return -EINVAL;
250 } 355 }
251 356 bcs = cs->bcs + ch;
252 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state, 357 if (bcs->chstate & CHS_D_UP) {
253 EV_PROTO_L2, NULL, cntrl->arg >> 8, 358 dev_err(cs->dev,
254 NULL)) { 359 "ISDN_CMD_SETL2: channel active (%d)\n", ch);
255 //FIXME what should we do? 360 return -EINVAL;
256 return -ENOMEM; 361 }
362 switch (cntrl->arg >> 8) {
363 case ISDN_PROTO_L2_HDLC:
364 gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_HDLC");
365 bcs->proto2 = L2_HDLC;
366 break;
367 case ISDN_PROTO_L2_TRANS:
368 gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_VOICE");
369 bcs->proto2 = L2_VOICE;
370 break;
371 default:
372 dev_err(cs->dev,
373 "ISDN_CMD_SETL2: unsupported protocol (%lu)\n",
374 cntrl->arg >> 8);
375 return -EINVAL;
257 } 376 }
258
259 gig_dbg(DEBUG_CMD, "scheduling PROTO_L2");
260 gigaset_schedule_event(cs);
261 break; 377 break;
262 case ISDN_CMD_SETL3: /* Set L3 to given protocol */ 378 case ISDN_CMD_SETL3: /* Set L3 to given protocol */
263 gig_dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (ch: %ld, proto: %lx)", 379 if (ch >= cs->channels) {
264 cntrl->arg & 0xff, (cntrl->arg >> 8));
265
266 if ((cntrl->arg & 0xff) >= cs->channels) {
267 dev_err(cs->dev, 380 dev_err(cs->dev,
268 "ISDN_CMD_SETL3: invalid channel (%d)\n", 381 "ISDN_CMD_SETL3: invalid channel (%d)\n", ch);
269 (int) cntrl->arg & 0xff);
270 return -EINVAL; 382 return -EINVAL;
271 } 383 }
272 384
273 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) { 385 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
274 dev_err(cs->dev, 386 dev_err(cs->dev,
275 "ISDN_CMD_SETL3: invalid protocol %lu\n", 387 "ISDN_CMD_SETL3: unsupported protocol (%lu)\n",
276 cntrl->arg >> 8); 388 cntrl->arg >> 8);
277 return -EINVAL; 389 return -EINVAL;
278 } 390 }
279 391
280 break; 392 break;
281 case ISDN_CMD_PROCEED: 393 case ISDN_CMD_PROCEED:
282 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME 394 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED");
283 break; 395 break;
284 case ISDN_CMD_ALERT: 396 case ISDN_CMD_ALERT:
285 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME 397 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT");
286 if (cntrl->arg >= cs->channels) { 398 if (cntrl->arg >= cs->channels) {
287 dev_err(cs->dev, 399 dev_err(cs->dev,
288 "ISDN_CMD_ALERT: invalid channel (%d)\n", 400 "ISDN_CMD_ALERT: invalid channel (%d)\n",
289 (int) cntrl->arg); 401 (int) cntrl->arg);
290 return -EINVAL; 402 return -EINVAL;
291 } 403 }
292 //bcs = cs->bcs + cntrl->arg;
293 //bcs->proto2 = -1;
294 // FIXME
295 break; 404 break;
296 case ISDN_CMD_REDIR: 405 case ISDN_CMD_REDIR:
297 gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME 406 gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR");
298 break; 407 break;
299 case ISDN_CMD_PROT_IO: 408 case ISDN_CMD_PROT_IO:
300 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO"); 409 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
@@ -324,149 +433,34 @@ static int command_from_LL(isdn_ctrl *cntrl)
324 } 433 }
325 434
326 return retval; 435 return retval;
436
437oom:
438 dev_err(bcs->cs->dev, "out of memory\n");
439 for (i = 0; i < AT_NUM; ++i)
440 kfree(commands[i]);
441 return -ENOMEM;
327} 442}
328 443
329void gigaset_i4l_cmd(struct cardstate *cs, int cmd) 444static void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
330{ 445{
446 isdn_if *iif = cs->iif;
331 isdn_ctrl command; 447 isdn_ctrl command;
332 448
333 command.driver = cs->myid; 449 command.driver = cs->myid;
334 command.command = cmd; 450 command.command = cmd;
335 command.arg = 0; 451 command.arg = 0;
336 cs->iif.statcallb(&command); 452 iif->statcallb(&command);
337} 453}
338 454
339void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd) 455static void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
340{ 456{
457 isdn_if *iif = bcs->cs->iif;
341 isdn_ctrl command; 458 isdn_ctrl command;
342 459
343 command.driver = bcs->cs->myid; 460 command.driver = bcs->cs->myid;
344 command.command = cmd; 461 command.command = cmd;
345 command.arg = bcs->channel; 462 command.arg = bcs->channel;
346 bcs->cs->iif.statcallb(&command); 463 iif->statcallb(&command);
347}
348
349int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data)
350{
351 struct bc_state *bcs = at_state->bcs;
352 unsigned proto;
353 const char *bc;
354 size_t length[AT_NUM];
355 size_t l;
356 int i;
357 struct setup_parm *sp = data;
358
359 switch (bcs->proto2) {
360 case ISDN_PROTO_L2_HDLC:
361 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
362 break;
363 case ISDN_PROTO_L2_TRANS:
364 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
365 break;
366 default:
367 dev_err(bcs->cs->dev, "%s: invalid L2 protocol: %u\n",
368 __func__, bcs->proto2);
369 return -EINVAL;
370 }
371
372 switch (sp->si1) {
373 case 1: /* audio */
374 bc = "9090A3"; /* 3.1 kHz audio, A-law */
375 break;
376 case 7: /* data */
377 default: /* hope the app knows what it is doing */
378 bc = "8890"; /* unrestricted digital information */
379 }
380 //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC
381
382 length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1;
383 l = strlen(sp->eazmsn);
384 length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0;
385 length[AT_BC ] = 5 + strlen(bc) + 1 + 1;
386 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
387 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
388 length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */
389 length[AT_HLC ] = 0;
390
391 for (i = 0; i < AT_NUM; ++i) {
392 kfree(bcs->commands[i]);
393 bcs->commands[i] = NULL;
394 if (length[i] &&
395 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
396 dev_err(bcs->cs->dev, "out of memory\n");
397 return -ENOMEM;
398 }
399 }
400
401 /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */
402 if (sp->phone[0] == '*' && sp->phone[1] == '*') {
403 /* internal call: translate ** prefix to CTP value */
404 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
405 "D%s\r", sp->phone+2);
406 strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]);
407 } else {
408 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
409 "D%s\r", sp->phone);
410 strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]);
411 }
412
413 if (bcs->commands[AT_MSN])
414 snprintf(bcs->commands[AT_MSN], length[AT_MSN],
415 "^SMSN=%s\r", sp->eazmsn);
416 snprintf(bcs->commands[AT_BC ], length[AT_BC ],
417 "^SBC=%s\r", bc);
418 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO],
419 "^SBPR=%u\r", proto);
420 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ],
421 "^SISO=%u\r", (unsigned)bcs->channel + 1);
422
423 return 0;
424}
425
426int gigaset_isdn_setup_accept(struct at_state_t *at_state)
427{
428 unsigned proto;
429 size_t length[AT_NUM];
430 int i;
431 struct bc_state *bcs = at_state->bcs;
432
433 switch (bcs->proto2) {
434 case ISDN_PROTO_L2_HDLC:
435 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
436 break;
437 case ISDN_PROTO_L2_TRANS:
438 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
439 break;
440 default:
441 dev_err(at_state->cs->dev, "%s: invalid protocol: %u\n",
442 __func__, bcs->proto2);
443 return -EINVAL;
444 }
445
446 length[AT_DIAL ] = 0;
447 length[AT_MSN ] = 0;
448 length[AT_BC ] = 0;
449 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
450 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
451 length[AT_TYPE ] = 0;
452 length[AT_HLC ] = 0;
453
454 for (i = 0; i < AT_NUM; ++i) {
455 kfree(bcs->commands[i]);
456 bcs->commands[i] = NULL;
457 if (length[i] &&
458 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
459 dev_err(at_state->cs->dev, "out of memory\n");
460 return -ENOMEM;
461 }
462 }
463
464 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO],
465 "^SBPR=%u\r", proto);
466 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ],
467 "^SISO=%u\r", (unsigned) bcs->channel + 1);
468
469 return 0;
470} 464}
471 465
472/** 466/**
@@ -482,13 +476,14 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
482{ 476{
483 struct cardstate *cs = at_state->cs; 477 struct cardstate *cs = at_state->cs;
484 struct bc_state *bcs = at_state->bcs; 478 struct bc_state *bcs = at_state->bcs;
479 isdn_if *iif = cs->iif;
485 isdn_ctrl response; 480 isdn_ctrl response;
486 int retval; 481 int retval;
487 482
488 /* fill ICALL structure */ 483 /* fill ICALL structure */
489 response.parm.setup.si1 = 0; /* default: unknown */ 484 response.parm.setup.si1 = 0; /* default: unknown */
490 response.parm.setup.si2 = 0; 485 response.parm.setup.si2 = 0;
491 response.parm.setup.screen = 0; //FIXME how to set these? 486 response.parm.setup.screen = 0;
492 response.parm.setup.plan = 0; 487 response.parm.setup.plan = 0;
493 if (!at_state->str_var[STR_ZBC]) { 488 if (!at_state->str_var[STR_ZBC]) {
494 /* no BC (internal call): assume speech, A-law */ 489 /* no BC (internal call): assume speech, A-law */
@@ -509,29 +504,27 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
509 return ICALL_IGNORE; 504 return ICALL_IGNORE;
510 } 505 }
511 if (at_state->str_var[STR_NMBR]) { 506 if (at_state->str_var[STR_NMBR]) {
512 strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR], 507 strlcpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
513 sizeof response.parm.setup.phone - 1); 508 sizeof response.parm.setup.phone);
514 response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
515 } else 509 } else
516 response.parm.setup.phone[0] = 0; 510 response.parm.setup.phone[0] = 0;
517 if (at_state->str_var[STR_ZCPN]) { 511 if (at_state->str_var[STR_ZCPN]) {
518 strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN], 512 strlcpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
519 sizeof response.parm.setup.eazmsn - 1); 513 sizeof response.parm.setup.eazmsn);
520 response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
521 } else 514 } else
522 response.parm.setup.eazmsn[0] = 0; 515 response.parm.setup.eazmsn[0] = 0;
523 516
524 if (!bcs) { 517 if (!bcs) {
525 dev_notice(cs->dev, "no channel for incoming call\n"); 518 dev_notice(cs->dev, "no channel for incoming call\n");
526 response.command = ISDN_STAT_ICALLW; 519 response.command = ISDN_STAT_ICALLW;
527 response.arg = 0; //FIXME 520 response.arg = 0;
528 } else { 521 } else {
529 gig_dbg(DEBUG_CMD, "Sending ICALL"); 522 gig_dbg(DEBUG_CMD, "Sending ICALL");
530 response.command = ISDN_STAT_ICALL; 523 response.command = ISDN_STAT_ICALL;
531 response.arg = bcs->channel; //FIXME 524 response.arg = bcs->channel;
532 } 525 }
533 response.driver = cs->myid; 526 response.driver = cs->myid;
534 retval = cs->iif.statcallb(&response); 527 retval = iif->statcallb(&response);
535 gig_dbg(DEBUG_CMD, "Response: %d", retval); 528 gig_dbg(DEBUG_CMD, "Response: %d", retval);
536 switch (retval) { 529 switch (retval) {
537 case 0: /* no takers */ 530 case 0: /* no takers */
@@ -560,16 +553,109 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
560 } 553 }
561} 554}
562 555
563/* Set Callback function pointer */ 556/**
564int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid) 557 * gigaset_isdn_connD() - signal D channel connect
558 * @bcs: B channel descriptor structure.
559 *
560 * Called by main module to notify the LL that the D channel connection has
561 * been established.
562 */
563void gigaset_isdn_connD(struct bc_state *bcs)
565{ 564{
566 isdn_if *iif = &cs->iif; 565 gig_dbg(DEBUG_CMD, "sending DCONN");
566 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
567}
567 568
568 gig_dbg(DEBUG_ANY, "Register driver capabilities to LL"); 569/**
570 * gigaset_isdn_hupD() - signal D channel hangup
571 * @bcs: B channel descriptor structure.
572 *
573 * Called by main module to notify the LL that the D channel connection has
574 * been shut down.
575 */
576void gigaset_isdn_hupD(struct bc_state *bcs)
577{
578 gig_dbg(DEBUG_CMD, "sending DHUP");
579 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
580}
581
582/**
583 * gigaset_isdn_connB() - signal B channel connect
584 * @bcs: B channel descriptor structure.
585 *
586 * Called by main module to notify the LL that the B channel connection has
587 * been established.
588 */
589void gigaset_isdn_connB(struct bc_state *bcs)
590{
591 gig_dbg(DEBUG_CMD, "sending BCONN");
592 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
593}
594
595/**
596 * gigaset_isdn_hupB() - signal B channel hangup
597 * @bcs: B channel descriptor structure.
598 *
599 * Called by main module to notify the LL that the B channel connection has
600 * been shut down.
601 */
602void gigaset_isdn_hupB(struct bc_state *bcs)
603{
604 gig_dbg(DEBUG_CMD, "sending BHUP");
605 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
606}
607
608/**
609 * gigaset_isdn_start() - signal device availability
610 * @cs: device descriptor structure.
611 *
612 * Called by main module to notify the LL that the device is available for
613 * use.
614 */
615void gigaset_isdn_start(struct cardstate *cs)
616{
617 gig_dbg(DEBUG_CMD, "sending RUN");
618 gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
619}
620
621/**
622 * gigaset_isdn_stop() - signal device unavailability
623 * @cs: device descriptor structure.
624 *
625 * Called by main module to notify the LL that the device is no longer
626 * available for use.
627 */
628void gigaset_isdn_stop(struct cardstate *cs)
629{
630 gig_dbg(DEBUG_CMD, "sending STOP");
631 gigaset_i4l_cmd(cs, ISDN_STAT_STOP);
632}
633
634/**
635 * gigaset_isdn_register() - register to LL
636 * @cs: device descriptor structure.
637 * @isdnid: device name.
638 *
639 * Called by main module to register the device with the LL.
640 *
641 * Return value: 1 for success, 0 for failure
642 */
643int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
644{
645 isdn_if *iif;
646
647 pr_info("ISDN4Linux interface\n");
648
649 iif = kmalloc(sizeof *iif, GFP_KERNEL);
650 if (!iif) {
651 pr_err("out of memory\n");
652 return 0;
653 }
569 654
570 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) 655 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
571 >= sizeof iif->id) { 656 >= sizeof iif->id) {
572 pr_err("ID too long: %s\n", isdnid); 657 pr_err("ID too long: %s\n", isdnid);
658 kfree(iif);
573 return 0; 659 return 0;
574 } 660 }
575 661
@@ -593,9 +679,26 @@ int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
593 679
594 if (!register_isdn(iif)) { 680 if (!register_isdn(iif)) {
595 pr_err("register_isdn failed\n"); 681 pr_err("register_isdn failed\n");
682 kfree(iif);
596 return 0; 683 return 0;
597 } 684 }
598 685
686 cs->iif = iif;
599 cs->myid = iif->channels; /* Set my device id */ 687 cs->myid = iif->channels; /* Set my device id */
688 cs->hw_hdr_len = HW_HDR_LEN;
600 return 1; 689 return 1;
601} 690}
691
692/**
693 * gigaset_isdn_unregister() - unregister from LL
694 * @cs: device descriptor structure.
695 *
696 * Called by main module to unregister the device from the LL.
697 */
698void gigaset_isdn_unregister(struct cardstate *cs)
699{
700 gig_dbg(DEBUG_CMD, "sending UNLOAD");
701 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
702 kfree(cs->iif);
703 cs->iif = NULL;
704}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 6a8e1384e7bd..d2260b0055fc 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -162,7 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
162 return -ENODEV; 162 return -ENODEV;
163 163
164 if (mutex_lock_interruptible(&cs->mutex)) 164 if (mutex_lock_interruptible(&cs->mutex))
165 return -ERESTARTSYS; // FIXME -EINTR? 165 return -ERESTARTSYS;
166 tty->driver_data = cs; 166 tty->driver_data = cs;
167 167
168 ++cs->open_count; 168 ++cs->open_count;
@@ -171,7 +171,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
171 spin_lock_irqsave(&cs->lock, flags); 171 spin_lock_irqsave(&cs->lock, flags);
172 cs->tty = tty; 172 cs->tty = tty;
173 spin_unlock_irqrestore(&cs->lock, flags); 173 spin_unlock_irqrestore(&cs->lock, flags);
174 tty->low_latency = 1; //FIXME test 174 tty->low_latency = 1;
175 } 175 }
176 176
177 mutex_unlock(&cs->mutex); 177 mutex_unlock(&cs->mutex);
@@ -228,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
228 gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd); 228 gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
229 229
230 if (mutex_lock_interruptible(&cs->mutex)) 230 if (mutex_lock_interruptible(&cs->mutex))
231 return -ERESTARTSYS; // FIXME -EINTR? 231 return -ERESTARTSYS;
232 232
233 if (!cs->connected) { 233 if (!cs->connected) {
234 gig_dbg(DEBUG_IF, "not connected"); 234 gig_dbg(DEBUG_IF, "not connected");
@@ -299,9 +299,8 @@ static int if_tiocmget(struct tty_struct *tty, struct file *file)
299 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 299 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
300 300
301 if (mutex_lock_interruptible(&cs->mutex)) 301 if (mutex_lock_interruptible(&cs->mutex))
302 return -ERESTARTSYS; // FIXME -EINTR? 302 return -ERESTARTSYS;
303 303
304 // FIXME read from device?
305 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR); 304 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
306 305
307 mutex_unlock(&cs->mutex); 306 mutex_unlock(&cs->mutex);
@@ -326,7 +325,7 @@ static int if_tiocmset(struct tty_struct *tty, struct file *file,
326 cs->minor_index, __func__, set, clear); 325 cs->minor_index, __func__, set, clear);
327 326
328 if (mutex_lock_interruptible(&cs->mutex)) 327 if (mutex_lock_interruptible(&cs->mutex))
329 return -ERESTARTSYS; // FIXME -EINTR? 328 return -ERESTARTSYS;
330 329
331 if (!cs->connected) { 330 if (!cs->connected) {
332 gig_dbg(DEBUG_IF, "not connected"); 331 gig_dbg(DEBUG_IF, "not connected");
@@ -356,7 +355,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
356 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 355 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
357 356
358 if (mutex_lock_interruptible(&cs->mutex)) 357 if (mutex_lock_interruptible(&cs->mutex))
359 return -ERESTARTSYS; // FIXME -EINTR? 358 return -ERESTARTSYS;
360 359
361 if (!cs->connected) { 360 if (!cs->connected) {
362 gig_dbg(DEBUG_IF, "not connected"); 361 gig_dbg(DEBUG_IF, "not connected");
@@ -390,7 +389,7 @@ static int if_write_room(struct tty_struct *tty)
390 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 389 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
391 390
392 if (mutex_lock_interruptible(&cs->mutex)) 391 if (mutex_lock_interruptible(&cs->mutex))
393 return -ERESTARTSYS; // FIXME -EINTR? 392 return -ERESTARTSYS;
394 393
395 if (!cs->connected) { 394 if (!cs->connected) {
396 gig_dbg(DEBUG_IF, "not connected"); 395 gig_dbg(DEBUG_IF, "not connected");
@@ -455,9 +454,8 @@ static void if_throttle(struct tty_struct *tty)
455 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ 454 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
456 else if (!cs->open_count) 455 else if (!cs->open_count)
457 dev_warn(cs->dev, "%s: device not opened\n", __func__); 456 dev_warn(cs->dev, "%s: device not opened\n", __func__);
458 else { 457 else
459 //FIXME 458 gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
460 }
461 459
462 mutex_unlock(&cs->mutex); 460 mutex_unlock(&cs->mutex);
463} 461}
@@ -480,9 +478,8 @@ static void if_unthrottle(struct tty_struct *tty)
480 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ 478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
481 else if (!cs->open_count) 479 else if (!cs->open_count)
482 dev_warn(cs->dev, "%s: device not opened\n", __func__); 480 dev_warn(cs->dev, "%s: device not opened\n", __func__);
483 else { 481 else
484 //FIXME 482 gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
485 }
486 483
487 mutex_unlock(&cs->mutex); 484 mutex_unlock(&cs->mutex);
488} 485}
@@ -515,10 +512,9 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
515 goto out; 512 goto out;
516 } 513 }
517 514
518 // stolen from mct_u232.c
519 iflag = tty->termios->c_iflag; 515 iflag = tty->termios->c_iflag;
520 cflag = tty->termios->c_cflag; 516 cflag = tty->termios->c_cflag;
521 old_cflag = old ? old->c_cflag : cflag; //FIXME? 517 old_cflag = old ? old->c_cflag : cflag;
522 gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", 518 gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
523 cs->minor_index, iflag, cflag, old_cflag); 519 cs->minor_index, iflag, cflag, old_cflag);
524 520
@@ -588,7 +584,7 @@ void gigaset_if_init(struct cardstate *cs)
588 if (!drv->have_tty) 584 if (!drv->have_tty)
589 return; 585 return;
590 586
591 tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs); 587 tasklet_init(&cs->if_wake_tasklet, if_wake, (unsigned long) cs);
592 588
593 mutex_lock(&cs->mutex); 589 mutex_lock(&cs->mutex);
594 cs->tty_dev = tty_register_device(drv->tty, cs->minor_index, NULL); 590 cs->tty_dev = tty_register_device(drv->tty, cs->minor_index, NULL);
@@ -632,7 +628,8 @@ void gigaset_if_receive(struct cardstate *cs,
632 struct tty_struct *tty; 628 struct tty_struct *tty;
633 629
634 spin_lock_irqsave(&cs->lock, flags); 630 spin_lock_irqsave(&cs->lock, flags);
635 if ((tty = cs->tty) == NULL) 631 tty = cs->tty;
632 if (tty == NULL)
636 gig_dbg(DEBUG_ANY, "receive on closed device"); 633 gig_dbg(DEBUG_ANY, "receive on closed device");
637 else { 634 else {
638 tty_buffer_request_room(tty, len); 635 tty_buffer_request_room(tty, len);
@@ -659,9 +656,9 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
659 656
660 drv->have_tty = 0; 657 drv->have_tty = 0;
661 658
662 if ((drv->tty = alloc_tty_driver(minors)) == NULL) 659 drv->tty = tty = alloc_tty_driver(minors);
660 if (tty == NULL)
663 goto enomem; 661 goto enomem;
664 tty = drv->tty;
665 662
666 tty->magic = TTY_DRIVER_MAGIC, 663 tty->magic = TTY_DRIVER_MAGIC,
667 tty->major = GIG_MAJOR, 664 tty->major = GIG_MAJOR,
@@ -676,8 +673,8 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
676 673
677 tty->owner = THIS_MODULE; 674 tty->owner = THIS_MODULE;
678 675
679 tty->init_termios = tty_std_termios; //FIXME 676 tty->init_termios = tty_std_termios;
680 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME 677 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
681 tty_set_operations(tty, &if_ops); 678 tty_set_operations(tty, &if_ops);
682 679
683 ret = tty_register_driver(tty); 680 ret = tty_register_driver(tty);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 9f3ef7b4248c..85394a6ebae8 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -41,7 +41,8 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
41 41
42 read = iwb->read; 42 read = iwb->read;
43 write = iwb->write; 43 write = iwb->write;
44 if ((freebytes = read - write) > 0) { 44 freebytes = read - write;
45 if (freebytes > 0) {
45 /* no wraparound: need padding space within regular area */ 46 /* no wraparound: need padding space within regular area */
46 return freebytes - BAS_OUTBUFPAD; 47 return freebytes - BAS_OUTBUFPAD;
47 } else if (read < BAS_OUTBUFPAD) { 48 } else if (read < BAS_OUTBUFPAD) {
@@ -53,29 +54,6 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
53 } 54 }
54} 55}
55 56
56/* compare two offsets within the buffer
57 * The buffer is seen as circular, with the read position as start
58 * returns -1/0/1 if position a </=/> position b without crossing 'read'
59 */
60static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
61{
62 int read;
63 if (a == b)
64 return 0;
65 read = iwb->read;
66 if (a < b) {
67 if (a < read && read <= b)
68 return +1;
69 else
70 return -1;
71 } else {
72 if (b < read && read <= a)
73 return -1;
74 else
75 return +1;
76 }
77}
78
79/* start writing 57/* start writing
80 * acquire the write semaphore 58 * acquire the write semaphore
81 * return true if acquired, false if busy 59 * return true if acquired, false if busy
@@ -271,7 +249,7 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
271 * bit 14..13 = number of bits added by stuffing 249 * bit 14..13 = number of bits added by stuffing
272 */ 250 */
273static const u16 stufftab[5 * 256] = { 251static const u16 stufftab[5 * 256] = {
274// previous 1s = 0: 252/* previous 1s = 0: */
275 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, 253 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
276 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f, 254 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
277 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, 255 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
@@ -289,7 +267,7 @@ static const u16 stufftab[5 * 256] = {
289 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef, 267 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
290 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf, 268 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
291 269
292// previous 1s = 1: 270/* previous 1s = 1: */
293 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f, 271 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
294 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f, 272 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
295 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f, 273 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
@@ -307,7 +285,7 @@ static const u16 stufftab[5 * 256] = {
307 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf, 285 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
308 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef, 286 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
309 287
310// previous 1s = 2: 288/* previous 1s = 2: */
311 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017, 289 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
312 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037, 290 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
313 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057, 291 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
@@ -325,7 +303,7 @@ static const u16 stufftab[5 * 256] = {
325 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7, 303 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
326 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7, 304 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
327 305
328// previous 1s = 3: 306/* previous 1s = 3: */
329 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b, 307 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
330 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b, 308 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
331 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b, 309 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
@@ -343,7 +321,7 @@ static const u16 stufftab[5 * 256] = {
343 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb, 321 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
344 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb, 322 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
345 323
346// previous 1s = 4: 324/* previous 1s = 4: */
347 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d, 325 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
348 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d, 326 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
349 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d, 327 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
@@ -367,7 +345,8 @@ static const u16 stufftab[5 * 256] = {
367 * parameters: 345 * parameters:
368 * cin input byte 346 * cin input byte
369 * ones number of trailing '1' bits in result before this step 347 * ones number of trailing '1' bits in result before this step
370 * iwb pointer to output buffer structure (write semaphore must be held) 348 * iwb pointer to output buffer structure
349 * (write semaphore must be held)
371 * return value: 350 * return value:
372 * number of trailing '1' bits in result after this step 351 * number of trailing '1' bits in result after this step
373 */ 352 */
@@ -408,7 +387,8 @@ static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
408 * parameters: 387 * parameters:
409 * in input buffer 388 * in input buffer
410 * count number of bytes in input buffer 389 * count number of bytes in input buffer
411 * iwb pointer to output buffer structure (write semaphore must be held) 390 * iwb pointer to output buffer structure
391 * (write semaphore must be held)
412 * return value: 392 * return value:
413 * position of end of packet in output buffer on success, 393 * position of end of packet in output buffer on success,
414 * -EAGAIN if write semaphore busy or buffer full 394 * -EAGAIN if write semaphore busy or buffer full
@@ -440,7 +420,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
440 fcs = crc_ccitt_byte(fcs, c); 420 fcs = crc_ccitt_byte(fcs, c);
441 } 421 }
442 422
443 /* bitstuff and append FCS (complemented, least significant byte first) */ 423 /* bitstuff and append FCS
424 * (complemented, least significant byte first) */
444 fcs ^= 0xffff; 425 fcs ^= 0xffff;
445 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones); 426 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
446 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones); 427 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
@@ -459,7 +440,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
459 * parameters: 440 * parameters:
460 * in input buffer 441 * in input buffer
461 * count number of bytes in input buffer 442 * count number of bytes in input buffer
462 * iwb pointer to output buffer structure (write semaphore must be held) 443 * iwb pointer to output buffer structure
444 * (write semaphore must be held)
463 * return value: 445 * return value:
464 * position of end of packet in output buffer on success, 446 * position of end of packet in output buffer on success,
465 * -EAGAIN if write semaphore busy or buffer full 447 * -EAGAIN if write semaphore busy or buffer full
@@ -500,7 +482,7 @@ int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
500 int result; 482 int result;
501 483
502 switch (bcs->proto2) { 484 switch (bcs->proto2) {
503 case ISDN_PROTO_L2_HDLC: 485 case L2_HDLC:
504 result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len); 486 result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
505 gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", 487 gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
506 __func__, len, result); 488 __func__, len, result);
@@ -542,8 +524,9 @@ static inline void hdlc_flush(struct bc_state *bcs)
542 if (likely(bcs->skb != NULL)) 524 if (likely(bcs->skb != NULL))
543 skb_trim(bcs->skb, 0); 525 skb_trim(bcs->skb, 0);
544 else if (!bcs->ignore) { 526 else if (!bcs->ignore) {
545 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) 527 bcs->skb = dev_alloc_skb(SBUFSIZE + bcs->cs->hw_hdr_len);
546 skb_reserve(bcs->skb, HW_HDR_LEN); 528 if (bcs->skb)
529 skb_reserve(bcs->skb, bcs->cs->hw_hdr_len);
547 else 530 else
548 dev_err(bcs->cs->dev, "could not allocate skb\n"); 531 dev_err(bcs->cs->dev, "could not allocate skb\n");
549 } 532 }
@@ -557,43 +540,46 @@ static inline void hdlc_flush(struct bc_state *bcs)
557 */ 540 */
558static inline void hdlc_done(struct bc_state *bcs) 541static inline void hdlc_done(struct bc_state *bcs)
559{ 542{
543 struct cardstate *cs = bcs->cs;
560 struct sk_buff *procskb; 544 struct sk_buff *procskb;
545 unsigned int len;
561 546
562 if (unlikely(bcs->ignore)) { 547 if (unlikely(bcs->ignore)) {
563 bcs->ignore--; 548 bcs->ignore--;
564 hdlc_flush(bcs); 549 hdlc_flush(bcs);
565 return; 550 return;
566 } 551 }
567 552 procskb = bcs->skb;
568 if ((procskb = bcs->skb) == NULL) { 553 if (procskb == NULL) {
569 /* previous error */ 554 /* previous error */
570 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__); 555 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
571 gigaset_rcv_error(NULL, bcs->cs, bcs); 556 gigaset_isdn_rcv_err(bcs);
572 } else if (procskb->len < 2) { 557 } else if (procskb->len < 2) {
573 dev_notice(bcs->cs->dev, "received short frame (%d octets)\n", 558 dev_notice(cs->dev, "received short frame (%d octets)\n",
574 procskb->len); 559 procskb->len);
575 bcs->hw.bas->runts++; 560 bcs->hw.bas->runts++;
576 gigaset_rcv_error(procskb, bcs->cs, bcs); 561 dev_kfree_skb_any(procskb);
562 gigaset_isdn_rcv_err(bcs);
577 } else if (bcs->fcs != PPP_GOODFCS) { 563 } else if (bcs->fcs != PPP_GOODFCS) {
578 dev_notice(bcs->cs->dev, "frame check error (0x%04x)\n", 564 dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs);
579 bcs->fcs);
580 bcs->hw.bas->fcserrs++; 565 bcs->hw.bas->fcserrs++;
581 gigaset_rcv_error(procskb, bcs->cs, bcs); 566 dev_kfree_skb_any(procskb);
567 gigaset_isdn_rcv_err(bcs);
582 } else { 568 } else {
583 procskb->len -= 2; /* subtract FCS */ 569 len = procskb->len;
584 procskb->tail -= 2; 570 __skb_trim(procskb, len -= 2); /* subtract FCS */
585 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", 571 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
586 __func__, procskb->len);
587 dump_bytes(DEBUG_STREAM_DUMP, 572 dump_bytes(DEBUG_STREAM_DUMP,
588 "rcv data", procskb->data, procskb->len); 573 "rcv data", procskb->data, len);
589 bcs->hw.bas->goodbytes += procskb->len; 574 bcs->hw.bas->goodbytes += len;
590 gigaset_rcv_skb(procskb, bcs->cs, bcs); 575 gigaset_skb_rcvd(bcs, procskb);
591 } 576 }
592 577
593 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) 578 bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
594 skb_reserve(bcs->skb, HW_HDR_LEN); 579 if (bcs->skb)
580 skb_reserve(bcs->skb, cs->hw_hdr_len);
595 else 581 else
596 dev_err(bcs->cs->dev, "could not allocate skb\n"); 582 dev_err(cs->dev, "could not allocate skb\n");
597 bcs->fcs = PPP_INITFCS; 583 bcs->fcs = PPP_INITFCS;
598} 584}
599 585
@@ -610,12 +596,8 @@ static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
610 596
611 dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits); 597 dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
612 bcs->hw.bas->alignerrs++; 598 bcs->hw.bas->alignerrs++;
613 gigaset_rcv_error(bcs->skb, bcs->cs, bcs); 599 gigaset_isdn_rcv_err(bcs);
614 600 __skb_trim(bcs->skb, 0);
615 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
616 skb_reserve(bcs->skb, HW_HDR_LEN);
617 else
618 dev_err(bcs->cs->dev, "could not allocate skb\n");
619 bcs->fcs = PPP_INITFCS; 601 bcs->fcs = PPP_INITFCS;
620} 602}
621 603
@@ -646,10 +628,10 @@ static const unsigned char bitcounts[256] = {
646}; 628};
647 629
648/* hdlc_unpack 630/* hdlc_unpack
649 * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation) 631 * perform HDLC frame processing (bit unstuffing, flag detection, FCS
650 * on a sequence of received data bytes (8 bits each, LSB first) 632 * calculation) on a sequence of received data bytes (8 bits each, LSB first)
651 * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb 633 * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
652 * notify of errors via gigaset_rcv_error 634 * notify of errors via gigaset_isdn_rcv_err
653 * tally frames, errors etc. in BC structure counters 635 * tally frames, errors etc. in BC structure counters
654 * parameters: 636 * parameters:
655 * src received data 637 * src received data
@@ -665,9 +647,12 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
665 647
666 /* load previous state: 648 /* load previous state:
667 * inputstate = set of flag bits: 649 * inputstate = set of flag bits:
668 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort 650 * - INS_flag_hunt: no complete opening flag received since connection
669 * - INS_have_data: at least one complete data byte received since last flag 651 * setup or last abort
670 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7) 652 * - INS_have_data: at least one complete data byte received since last
653 * flag
654 * seqlen = number of consecutive '1' bits in last 7 input stream bits
655 * (0..7)
671 * inbyte = accumulated partial data byte (if !INS_flag_hunt) 656 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
672 * inbits = number of valid bits in inbyte, starting at LSB (0..6) 657 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
673 */ 658 */
@@ -701,9 +686,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
701 inbyte = c >> (lead1 + 1); 686 inbyte = c >> (lead1 + 1);
702 inbits = 7 - lead1; 687 inbits = 7 - lead1;
703 if (trail1 >= 8) { 688 if (trail1 >= 8) {
704 /* interior stuffing: omitting the MSB handles most cases */ 689 /* interior stuffing:
690 * omitting the MSB handles most cases,
691 * correct the incorrectly handled
692 * cases individually */
705 inbits--; 693 inbits--;
706 /* correct the incorrectly handled cases individually */
707 switch (c) { 694 switch (c) {
708 case 0xbe: 695 case 0xbe:
709 inbyte = 0x3f; 696 inbyte = 0x3f;
@@ -729,13 +716,14 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
729 hdlc_flush(bcs); 716 hdlc_flush(bcs);
730 inputstate |= INS_flag_hunt; 717 inputstate |= INS_flag_hunt;
731 } else if (seqlen == 6) { 718 } else if (seqlen == 6) {
732 /* closing flag, including (6 - lead1) '1's and one '0' from inbits */ 719 /* closing flag, including (6 - lead1) '1's
720 * and one '0' from inbits */
733 if (inbits > 7 - lead1) { 721 if (inbits > 7 - lead1) {
734 hdlc_frag(bcs, inbits + lead1 - 7); 722 hdlc_frag(bcs, inbits + lead1 - 7);
735 inputstate &= ~INS_have_data; 723 inputstate &= ~INS_have_data;
736 } else { 724 } else {
737 if (inbits < 7 - lead1) 725 if (inbits < 7 - lead1)
738 ubc->stolen0s ++; 726 ubc->stolen0s++;
739 if (inputstate & INS_have_data) { 727 if (inputstate & INS_have_data) {
740 hdlc_done(bcs); 728 hdlc_done(bcs);
741 inputstate &= ~INS_have_data; 729 inputstate &= ~INS_have_data;
@@ -744,7 +732,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
744 732
745 if (c == PPP_FLAG) { 733 if (c == PPP_FLAG) {
746 /* complete flag, LSB overlaps preceding flag */ 734 /* complete flag, LSB overlaps preceding flag */
747 ubc->shared0s ++; 735 ubc->shared0s++;
748 inbits = 0; 736 inbits = 0;
749 inbyte = 0; 737 inbyte = 0;
750 } else if (trail1 != 7) { 738 } else if (trail1 != 7) {
@@ -752,9 +740,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
752 inbyte = c >> (lead1 + 1); 740 inbyte = c >> (lead1 + 1);
753 inbits = 7 - lead1; 741 inbits = 7 - lead1;
754 if (trail1 >= 8) { 742 if (trail1 >= 8) {
755 /* interior stuffing: omitting the MSB handles most cases */ 743 /* interior stuffing:
744 * omitting the MSB handles most cases,
745 * correct the incorrectly handled
746 * cases individually */
756 inbits--; 747 inbits--;
757 /* correct the incorrectly handled cases individually */
758 switch (c) { 748 switch (c) {
759 case 0xbe: 749 case 0xbe:
760 inbyte = 0x3f; 750 inbyte = 0x3f;
@@ -762,7 +752,8 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
762 } 752 }
763 } 753 }
764 } else { 754 } else {
765 /* abort sequence follows, skb already empty anyway */ 755 /* abort sequence follows,
756 * skb already empty anyway */
766 ubc->aborts++; 757 ubc->aborts++;
767 inputstate |= INS_flag_hunt; 758 inputstate |= INS_flag_hunt;
768 } 759 }
@@ -787,14 +778,17 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
787 } else { 778 } else {
788 /* stuffed data */ 779 /* stuffed data */
789 if (trail1 < 7) { /* => seqlen == 5 */ 780 if (trail1 < 7) { /* => seqlen == 5 */
790 /* stuff bit at position lead1, no interior stuffing */ 781 /* stuff bit at position lead1,
782 * no interior stuffing */
791 unsigned char mask = (1 << lead1) - 1; 783 unsigned char mask = (1 << lead1) - 1;
792 c = (c & mask) | ((c & ~mask) >> 1); 784 c = (c & mask) | ((c & ~mask) >> 1);
793 inbyte |= c << inbits; 785 inbyte |= c << inbits;
794 inbits += 7; 786 inbits += 7;
795 } else if (seqlen < 5) { /* trail1 >= 8 */ 787 } else if (seqlen < 5) { /* trail1 >= 8 */
796 /* interior stuffing: omitting the MSB handles most cases */ 788 /* interior stuffing:
797 /* correct the incorrectly handled cases individually */ 789 * omitting the MSB handles most cases,
790 * correct the incorrectly handled
791 * cases individually */
798 switch (c) { 792 switch (c) {
799 case 0xbe: 793 case 0xbe:
800 c = 0x7e; 794 c = 0x7e;
@@ -804,8 +798,9 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
804 inbits += 7; 798 inbits += 7;
805 } else { /* seqlen == 5 && trail1 >= 8 */ 799 } else { /* seqlen == 5 && trail1 >= 8 */
806 800
807 /* stuff bit at lead1 *and* interior stuffing */ 801 /* stuff bit at lead1 *and* interior
808 switch (c) { /* unstuff individually */ 802 * stuffing -- unstuff individually */
803 switch (c) {
809 case 0x7d: 804 case 0x7d:
810 c = 0x3f; 805 c = 0x3f;
811 break; 806 break;
@@ -841,7 +836,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
841} 836}
842 837
843/* trans_receive 838/* trans_receive
844 * pass on received USB frame transparently as SKB via gigaset_rcv_skb 839 * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
845 * invert bytes 840 * invert bytes
846 * tally frames, errors etc. in BC structure counters 841 * tally frames, errors etc. in BC structure counters
847 * parameters: 842 * parameters:
@@ -852,6 +847,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
852static inline void trans_receive(unsigned char *src, unsigned count, 847static inline void trans_receive(unsigned char *src, unsigned count,
853 struct bc_state *bcs) 848 struct bc_state *bcs)
854{ 849{
850 struct cardstate *cs = bcs->cs;
855 struct sk_buff *skb; 851 struct sk_buff *skb;
856 int dobytes; 852 int dobytes;
857 unsigned char *dst; 853 unsigned char *dst;
@@ -861,13 +857,14 @@ static inline void trans_receive(unsigned char *src, unsigned count,
861 hdlc_flush(bcs); 857 hdlc_flush(bcs);
862 return; 858 return;
863 } 859 }
864 if (unlikely((skb = bcs->skb) == NULL)) { 860 skb = bcs->skb;
865 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); 861 if (unlikely(skb == NULL)) {
862 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
866 if (!skb) { 863 if (!skb) {
867 dev_err(bcs->cs->dev, "could not allocate skb\n"); 864 dev_err(cs->dev, "could not allocate skb\n");
868 return; 865 return;
869 } 866 }
870 skb_reserve(skb, HW_HDR_LEN); 867 skb_reserve(skb, cs->hw_hdr_len);
871 } 868 }
872 bcs->hw.bas->goodbytes += skb->len; 869 bcs->hw.bas->goodbytes += skb->len;
873 dobytes = TRANSBUFSIZE - skb->len; 870 dobytes = TRANSBUFSIZE - skb->len;
@@ -881,23 +878,24 @@ static inline void trans_receive(unsigned char *src, unsigned count,
881 if (dobytes == 0) { 878 if (dobytes == 0) {
882 dump_bytes(DEBUG_STREAM_DUMP, 879 dump_bytes(DEBUG_STREAM_DUMP,
883 "rcv data", skb->data, skb->len); 880 "rcv data", skb->data, skb->len);
884 gigaset_rcv_skb(skb, bcs->cs, bcs); 881 gigaset_skb_rcvd(bcs, skb);
885 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); 882 bcs->skb = skb =
883 dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
886 if (!skb) { 884 if (!skb) {
887 dev_err(bcs->cs->dev, 885 dev_err(cs->dev, "could not allocate skb\n");
888 "could not allocate skb\n");
889 return; 886 return;
890 } 887 }
891 skb_reserve(bcs->skb, HW_HDR_LEN); 888 skb_reserve(skb, cs->hw_hdr_len);
892 dobytes = TRANSBUFSIZE; 889 dobytes = TRANSBUFSIZE;
893 } 890 }
894 } 891 }
895} 892}
896 893
897void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs) 894void gigaset_isoc_receive(unsigned char *src, unsigned count,
895 struct bc_state *bcs)
898{ 896{
899 switch (bcs->proto2) { 897 switch (bcs->proto2) {
900 case ISDN_PROTO_L2_HDLC: 898 case L2_HDLC:
901 hdlc_unpack(src, count, bcs); 899 hdlc_unpack(src, count, bcs);
902 break; 900 break;
903 default: /* assume transparent */ 901 default: /* assume transparent */
@@ -981,8 +979,10 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
981 * @bcs: B channel descriptor structure. 979 * @bcs: B channel descriptor structure.
982 * @skb: data to send. 980 * @skb: data to send.
983 * 981 *
984 * Called by i4l.c to queue an skb for sending, and start transmission if 982 * Called by LL to queue an skb for sending, and start transmission if
985 * necessary. 983 * necessary.
984 * Once the payload data has been transmitted completely, gigaset_skb_sent()
985 * will be called with the skb's link layer header preserved.
986 * 986 *
987 * Return value: 987 * Return value:
988 * number of bytes accepted for sending (skb->len) if ok, 988 * number of bytes accepted for sending (skb->len) if ok,
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 9715aad9c3f0..758a00c1d2e2 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -39,7 +39,7 @@ static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
39 return -EINVAL; 39 return -EINVAL;
40 40
41 if (mutex_lock_interruptible(&cs->mutex)) 41 if (mutex_lock_interruptible(&cs->mutex))
42 return -ERESTARTSYS; // FIXME -EINTR? 42 return -ERESTARTSYS;
43 43
44 cs->waiting = 1; 44 cs->waiting = 1;
45 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE, 45 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 3071a52467ed..168d585d64d8 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -164,9 +164,15 @@ static void gigaset_modem_fill(unsigned long data)
164{ 164{
165 struct cardstate *cs = (struct cardstate *) data; 165 struct cardstate *cs = (struct cardstate *) data;
166 struct bc_state *bcs; 166 struct bc_state *bcs;
167 struct sk_buff *nextskb;
167 int sent = 0; 168 int sent = 0;
168 169
169 if (!cs || !(bcs = cs->bcs)) { 170 if (!cs) {
171 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
172 return;
173 }
174 bcs = cs->bcs;
175 if (!bcs) {
170 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__); 176 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
171 return; 177 return;
172 } 178 }
@@ -179,9 +185,11 @@ static void gigaset_modem_fill(unsigned long data)
179 return; 185 return;
180 186
181 /* no command to send; get skb */ 187 /* no command to send; get skb */
182 if (!(bcs->tx_skb = skb_dequeue(&bcs->squeue))) 188 nextskb = skb_dequeue(&bcs->squeue);
189 if (!nextskb)
183 /* no skb either, nothing to do */ 190 /* no skb either, nothing to do */
184 return; 191 return;
192 bcs->tx_skb = nextskb;
185 193
186 gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)", 194 gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
187 (unsigned long) bcs->tx_skb); 195 (unsigned long) bcs->tx_skb);
@@ -236,19 +244,20 @@ static void flush_send_queue(struct cardstate *cs)
236 * number of bytes queued, or error code < 0 244 * number of bytes queued, or error code < 0
237 */ 245 */
238static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, 246static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
239 int len, struct tasklet_struct *wake_tasklet) 247 int len, struct tasklet_struct *wake_tasklet)
240{ 248{
241 struct cmdbuf_t *cb; 249 struct cmdbuf_t *cb;
242 unsigned long flags; 250 unsigned long flags;
243 251
244 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? 252 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
245 DEBUG_TRANSCMD : DEBUG_LOCKCMD, 253 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
246 "CMD Transmit", len, buf); 254 "CMD Transmit", len, buf);
247 255
248 if (len <= 0) 256 if (len <= 0)
249 return 0; 257 return 0;
250 258
251 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 259 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
260 if (!cb) {
252 dev_err(cs->dev, "%s: out of memory!\n", __func__); 261 dev_err(cs->dev, "%s: out of memory!\n", __func__);
253 return -ENOMEM; 262 return -ENOMEM;
254 } 263 }
@@ -392,7 +401,6 @@ static void gigaset_device_release(struct device *dev)
392 struct platform_device *pdev = to_platform_device(dev); 401 struct platform_device *pdev = to_platform_device(dev);
393 402
394 /* adapted from platform_device_release() in drivers/base/platform.c */ 403 /* adapted from platform_device_release() in drivers/base/platform.c */
395 //FIXME is this actually necessary?
396 kfree(dev->platform_data); 404 kfree(dev->platform_data);
397 kfree(pdev->resource); 405 kfree(pdev->resource);
398} 406}
@@ -404,16 +412,20 @@ static void gigaset_device_release(struct device *dev)
404static int gigaset_initcshw(struct cardstate *cs) 412static int gigaset_initcshw(struct cardstate *cs)
405{ 413{
406 int rc; 414 int rc;
415 struct ser_cardstate *scs;
407 416
408 if (!(cs->hw.ser = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL))) { 417 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
418 if (!scs) {
409 pr_err("out of memory\n"); 419 pr_err("out of memory\n");
410 return 0; 420 return 0;
411 } 421 }
422 cs->hw.ser = scs;
412 423
413 cs->hw.ser->dev.name = GIGASET_MODULENAME; 424 cs->hw.ser->dev.name = GIGASET_MODULENAME;
414 cs->hw.ser->dev.id = cs->minor_index; 425 cs->hw.ser->dev.id = cs->minor_index;
415 cs->hw.ser->dev.dev.release = gigaset_device_release; 426 cs->hw.ser->dev.dev.release = gigaset_device_release;
416 if ((rc = platform_device_register(&cs->hw.ser->dev)) != 0) { 427 rc = platform_device_register(&cs->hw.ser->dev);
428 if (rc != 0) {
417 pr_err("error %d registering platform device\n", rc); 429 pr_err("error %d registering platform device\n", rc);
418 kfree(cs->hw.ser); 430 kfree(cs->hw.ser);
419 cs->hw.ser = NULL; 431 cs->hw.ser = NULL;
@@ -422,7 +434,7 @@ static int gigaset_initcshw(struct cardstate *cs)
422 dev_set_drvdata(&cs->hw.ser->dev.dev, cs); 434 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
423 435
424 tasklet_init(&cs->write_tasklet, 436 tasklet_init(&cs->write_tasklet,
425 &gigaset_modem_fill, (unsigned long) cs); 437 gigaset_modem_fill, (unsigned long) cs);
426 return 1; 438 return 1;
427} 439}
428 440
@@ -434,7 +446,8 @@ static int gigaset_initcshw(struct cardstate *cs)
434 * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c 446 * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
435 * and by "if_lock" and "if_termios" in interface.c 447 * and by "if_lock" and "if_termios" in interface.c
436 */ 448 */
437static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state) 449static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
450 unsigned new_state)
438{ 451{
439 struct tty_struct *tty = cs->hw.ser->tty; 452 struct tty_struct *tty = cs->hw.ser->tty;
440 unsigned int set, clear; 453 unsigned int set, clear;
@@ -520,8 +533,8 @@ gigaset_tty_open(struct tty_struct *tty)
520 } 533 }
521 534
522 /* allocate memory for our device state and intialize it */ 535 /* allocate memory for our device state and intialize it */
523 if (!(cs = gigaset_initcs(driver, 1, 1, 0, cidmode, 536 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
524 GIGASET_MODULENAME))) 537 if (!cs)
525 goto error; 538 goto error;
526 539
527 cs->dev = &cs->hw.ser->dev.dev; 540 cs->dev = &cs->hw.ser->dev.dev;
@@ -690,7 +703,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
690 703
691 if (!cs) 704 if (!cs)
692 return; 705 return;
693 if (!(inbuf = cs->inbuf)) { 706 inbuf = cs->inbuf;
707 if (!inbuf) {
694 dev_err(cs->dev, "%s: no inbuf\n", __func__); 708 dev_err(cs->dev, "%s: no inbuf\n", __func__);
695 cs_put(cs); 709 cs_put(cs);
696 return; 710 return;
@@ -770,18 +784,21 @@ static int __init ser_gigaset_init(void)
770 int rc; 784 int rc;
771 785
772 gig_dbg(DEBUG_INIT, "%s", __func__); 786 gig_dbg(DEBUG_INIT, "%s", __func__);
773 if ((rc = platform_driver_register(&device_driver)) != 0) { 787 rc = platform_driver_register(&device_driver);
788 if (rc != 0) {
774 pr_err("error %d registering platform driver\n", rc); 789 pr_err("error %d registering platform driver\n", rc);
775 return rc; 790 return rc;
776 } 791 }
777 792
778 /* allocate memory for our driver state and intialize it */ 793 /* allocate memory for our driver state and intialize it */
779 if (!(driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 794 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
780 GIGASET_MODULENAME, GIGASET_DEVNAME, 795 GIGASET_MODULENAME, GIGASET_DEVNAME,
781 &ops, THIS_MODULE))) 796 &ops, THIS_MODULE);
797 if (!driver)
782 goto error; 798 goto error;
783 799
784 if ((rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc)) != 0) { 800 rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
801 if (rc != 0) {
785 pr_err("error %d registering line discipline\n", rc); 802 pr_err("error %d registering line discipline\n", rc);
786 goto error; 803 goto error;
787 } 804 }
@@ -808,7 +825,8 @@ static void __exit ser_gigaset_exit(void)
808 driver = NULL; 825 driver = NULL;
809 } 826 }
810 827
811 if ((rc = tty_unregister_ldisc(N_GIGASET_M101)) != 0) 828 rc = tty_unregister_ldisc(N_GIGASET_M101);
829 if (rc != 0)
812 pr_err("error %d unregistering line discipline\n", rc); 830 pr_err("error %d unregistering line discipline\n", rc);
813 831
814 platform_driver_unregister(&device_driver); 832 platform_driver_unregister(&device_driver);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4deb1ab0dbf8..3ab1daeb276b 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -43,14 +43,14 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
43#define GIGASET_MODULENAME "usb_gigaset" 43#define GIGASET_MODULENAME "usb_gigaset"
44#define GIGASET_DEVNAME "ttyGU" 44#define GIGASET_DEVNAME "ttyGU"
45 45
46#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256 46#define IF_WRITEBUF 2000 /* arbitrary limit */
47 47
48/* Values for the Gigaset M105 Data */ 48/* Values for the Gigaset M105 Data */
49#define USB_M105_VENDOR_ID 0x0681 49#define USB_M105_VENDOR_ID 0x0681
50#define USB_M105_PRODUCT_ID 0x0009 50#define USB_M105_PRODUCT_ID 0x0009
51 51
52/* table of devices that work with this driver */ 52/* table of devices that work with this driver */
53static const struct usb_device_id gigaset_table [] = { 53static const struct usb_device_id gigaset_table[] = {
54 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, 54 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
55 { } /* Terminating entry */ 55 { } /* Terminating entry */
56}; 56};
@@ -97,8 +97,8 @@ MODULE_DEVICE_TABLE(usb, gigaset_table);
97 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 97 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
98 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). 98 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
99 * xx is usually 0x00 but was 0x7e before starting data transfer 99 * xx is usually 0x00 but was 0x7e before starting data transfer
100 * in unimodem mode. So, this might be an array of characters that need 100 * in unimodem mode. So, this might be an array of characters that
101 * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. 101 * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
102 * 102 *
103 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two 103 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
104 * flags per packet. 104 * flags per packet.
@@ -114,7 +114,7 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
114static int gigaset_resume(struct usb_interface *intf); 114static int gigaset_resume(struct usb_interface *intf);
115static int gigaset_pre_reset(struct usb_interface *intf); 115static int gigaset_pre_reset(struct usb_interface *intf);
116 116
117static struct gigaset_driver *driver = NULL; 117static struct gigaset_driver *driver;
118 118
119/* usb specific object needed to register this driver with the usb subsystem */ 119/* usb specific object needed to register this driver with the usb subsystem */
120static struct usb_driver gigaset_usb_driver = { 120static struct usb_driver gigaset_usb_driver = {
@@ -141,6 +141,7 @@ struct usb_cardstate {
141 struct urb *bulk_out_urb; 141 struct urb *bulk_out_urb;
142 142
143 /* Input buffer */ 143 /* Input buffer */
144 unsigned char *rcvbuf;
144 int rcvbuf_size; 145 int rcvbuf_size;
145 struct urb *read_urb; 146 struct urb *read_urb;
146 __u8 int_in_endpointAddr; 147 __u8 int_in_endpointAddr;
@@ -164,13 +165,11 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
164 val = tiocm_to_gigaset(new_state); 165 val = tiocm_to_gigaset(new_state);
165 166
166 gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); 167 gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
167 // don't use this in an interrupt/BH
168 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41, 168 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
169 (val & 0xff) | ((mask & 0xff) << 8), 0, 169 (val & 0xff) | ((mask & 0xff) << 8), 0,
170 NULL, 0, 2000 /* timeout? */); 170 NULL, 0, 2000 /* timeout? */);
171 if (r < 0) 171 if (r < 0)
172 return r; 172 return r;
173 //..
174 return 0; 173 return 0;
175} 174}
176 175
@@ -220,7 +219,6 @@ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
220 cflag &= CBAUD; 219 cflag &= CBAUD;
221 220
222 switch (cflag) { 221 switch (cflag) {
223 //FIXME more values?
224 case B300: rate = 300; break; 222 case B300: rate = 300; break;
225 case B600: rate = 600; break; 223 case B600: rate = 600; break;
226 case B1200: rate = 1200; break; 224 case B1200: rate = 1200; break;
@@ -273,7 +271,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
273 /* set the number of stop bits */ 271 /* set the number of stop bits */
274 if (cflag & CSTOPB) { 272 if (cflag & CSTOPB) {
275 if ((cflag & CSIZE) == CS5) 273 if ((cflag & CSIZE) == CS5)
276 val |= 1; /* 1.5 stop bits */ //FIXME is this okay? 274 val |= 1; /* 1.5 stop bits */
277 else 275 else
278 val |= 2; /* 2 stop bits */ 276 val |= 2; /* 2 stop bits */
279 } 277 }
@@ -282,7 +280,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
282} 280}
283 281
284 282
285 /*================================================================================================================*/ 283/*============================================================================*/
286static int gigaset_init_bchannel(struct bc_state *bcs) 284static int gigaset_init_bchannel(struct bc_state *bcs)
287{ 285{
288 /* nothing to do for M10x */ 286 /* nothing to do for M10x */
@@ -344,7 +342,6 @@ static void gigaset_modem_fill(unsigned long data)
344 if (write_modem(cs) < 0) { 342 if (write_modem(cs) < 0) {
345 gig_dbg(DEBUG_OUTPUT, 343 gig_dbg(DEBUG_OUTPUT,
346 "modem_fill: write_modem failed"); 344 "modem_fill: write_modem failed");
347 // FIXME should we tell the LL?
348 again = 1; /* no callback will be called! */ 345 again = 1; /* no callback will be called! */
349 } 346 }
350 } 347 }
@@ -356,8 +353,8 @@ static void gigaset_modem_fill(unsigned long data)
356 */ 353 */
357static void gigaset_read_int_callback(struct urb *urb) 354static void gigaset_read_int_callback(struct urb *urb)
358{ 355{
359 struct inbuf_t *inbuf = urb->context; 356 struct cardstate *cs = urb->context;
360 struct cardstate *cs = inbuf->cs; 357 struct inbuf_t *inbuf = cs->inbuf;
361 int status = urb->status; 358 int status = urb->status;
362 int r; 359 int r;
363 unsigned numbytes; 360 unsigned numbytes;
@@ -368,7 +365,7 @@ static void gigaset_read_int_callback(struct urb *urb)
368 numbytes = urb->actual_length; 365 numbytes = urb->actual_length;
369 366
370 if (numbytes) { 367 if (numbytes) {
371 src = inbuf->rcvbuf; 368 src = cs->hw.usb->rcvbuf;
372 if (unlikely(*src)) 369 if (unlikely(*src))
373 dev_warn(cs->dev, 370 dev_warn(cs->dev,
374 "%s: There was no leading 0, but 0x%02x!\n", 371 "%s: There was no leading 0, but 0x%02x!\n",
@@ -440,7 +437,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
440 struct cmdbuf_t *tcb; 437 struct cmdbuf_t *tcb;
441 unsigned long flags; 438 unsigned long flags;
442 int count; 439 int count;
443 int status = -ENOENT; // FIXME 440 int status = -ENOENT;
444 struct usb_cardstate *ucs = cs->hw.usb; 441 struct usb_cardstate *ucs = cs->hw.usb;
445 442
446 do { 443 do {
@@ -480,7 +477,9 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
480 ucs->busy = 1; 477 ucs->busy = 1;
481 478
482 spin_lock_irqsave(&cs->lock, flags); 479 spin_lock_irqsave(&cs->lock, flags);
483 status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV; 480 status = cs->connected ?
481 usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
482 -ENODEV;
484 spin_unlock_irqrestore(&cs->lock, flags); 483 spin_unlock_irqrestore(&cs->lock, flags);
485 484
486 if (status) { 485 if (status) {
@@ -510,8 +509,8 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
510 509
511 if (len <= 0) 510 if (len <= 0)
512 return 0; 511 return 0;
513 512 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
514 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 513 if (!cb) {
515 dev_err(cs->dev, "%s: out of memory\n", __func__); 514 dev_err(cs->dev, "%s: out of memory\n", __func__);
516 return -ENOMEM; 515 return -ENOMEM;
517 } 516 }
@@ -615,7 +614,7 @@ static int gigaset_initcshw(struct cardstate *cs)
615 ucs->bulk_out_urb = NULL; 614 ucs->bulk_out_urb = NULL;
616 ucs->read_urb = NULL; 615 ucs->read_urb = NULL;
617 tasklet_init(&cs->write_tasklet, 616 tasklet_init(&cs->write_tasklet,
618 &gigaset_modem_fill, (unsigned long) cs); 617 gigaset_modem_fill, (unsigned long) cs);
619 618
620 return 1; 619 return 1;
621} 620}
@@ -637,9 +636,7 @@ static int write_modem(struct cardstate *cs)
637 return -EINVAL; 636 return -EINVAL;
638 } 637 }
639 638
640 /* Copy data to bulk out buffer and // FIXME copying not necessary 639 /* Copy data to bulk out buffer and transmit data */
641 * transmit data
642 */
643 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); 640 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
644 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); 641 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
645 skb_pull(bcs->tx_skb, count); 642 skb_pull(bcs->tx_skb, count);
@@ -650,7 +647,8 @@ static int write_modem(struct cardstate *cs)
650 if (cs->connected) { 647 if (cs->connected) {
651 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, 648 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
652 usb_sndbulkpipe(ucs->udev, 649 usb_sndbulkpipe(ucs->udev,
653 ucs->bulk_out_endpointAddr & 0x0f), 650 ucs->bulk_out_endpointAddr &
651 0x0f),
654 ucs->bulk_out_buffer, count, 652 ucs->bulk_out_buffer, count,
655 gigaset_write_bulk_callback, cs); 653 gigaset_write_bulk_callback, cs);
656 ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); 654 ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
@@ -666,7 +664,7 @@ static int write_modem(struct cardstate *cs)
666 664
667 if (!bcs->tx_skb->len) { 665 if (!bcs->tx_skb->len) {
668 /* skb sent completely */ 666 /* skb sent completely */
669 gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0? 667 gigaset_skb_sent(bcs, bcs->tx_skb);
670 668
671 gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!", 669 gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
672 (unsigned long) bcs->tx_skb); 670 (unsigned long) bcs->tx_skb);
@@ -763,8 +761,8 @@ static int gigaset_probe(struct usb_interface *interface,
763 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); 761 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
764 ucs->rcvbuf_size = buffer_size; 762 ucs->rcvbuf_size = buffer_size;
765 ucs->int_in_endpointAddr = endpoint->bEndpointAddress; 763 ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
766 cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL); 764 ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
767 if (!cs->inbuf[0].rcvbuf) { 765 if (!ucs->rcvbuf) {
768 dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); 766 dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
769 retval = -ENOMEM; 767 retval = -ENOMEM;
770 goto error; 768 goto error;
@@ -773,9 +771,9 @@ static int gigaset_probe(struct usb_interface *interface,
773 usb_fill_int_urb(ucs->read_urb, udev, 771 usb_fill_int_urb(ucs->read_urb, udev,
774 usb_rcvintpipe(udev, 772 usb_rcvintpipe(udev,
775 endpoint->bEndpointAddress & 0x0f), 773 endpoint->bEndpointAddress & 0x0f),
776 cs->inbuf[0].rcvbuf, buffer_size, 774 ucs->rcvbuf, buffer_size,
777 gigaset_read_int_callback, 775 gigaset_read_int_callback,
778 cs->inbuf + 0, endpoint->bInterval); 776 cs, endpoint->bInterval);
779 777
780 retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); 778 retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
781 if (retval) { 779 if (retval) {
@@ -789,7 +787,7 @@ static int gigaset_probe(struct usb_interface *interface,
789 787
790 if (!gigaset_start(cs)) { 788 if (!gigaset_start(cs)) {
791 tasklet_kill(&cs->write_tasklet); 789 tasklet_kill(&cs->write_tasklet);
792 retval = -ENODEV; //FIXME 790 retval = -ENODEV;
793 goto error; 791 goto error;
794 } 792 }
795 return 0; 793 return 0;
@@ -798,11 +796,11 @@ error:
798 usb_kill_urb(ucs->read_urb); 796 usb_kill_urb(ucs->read_urb);
799 kfree(ucs->bulk_out_buffer); 797 kfree(ucs->bulk_out_buffer);
800 usb_free_urb(ucs->bulk_out_urb); 798 usb_free_urb(ucs->bulk_out_urb);
801 kfree(cs->inbuf[0].rcvbuf); 799 kfree(ucs->rcvbuf);
802 usb_free_urb(ucs->read_urb); 800 usb_free_urb(ucs->read_urb);
803 usb_set_intfdata(interface, NULL); 801 usb_set_intfdata(interface, NULL);
804 ucs->read_urb = ucs->bulk_out_urb = NULL; 802 ucs->read_urb = ucs->bulk_out_urb = NULL;
805 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; 803 ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
806 usb_put_dev(ucs->udev); 804 usb_put_dev(ucs->udev);
807 ucs->udev = NULL; 805 ucs->udev = NULL;
808 ucs->interface = NULL; 806 ucs->interface = NULL;
@@ -831,10 +829,10 @@ static void gigaset_disconnect(struct usb_interface *interface)
831 829
832 kfree(ucs->bulk_out_buffer); 830 kfree(ucs->bulk_out_buffer);
833 usb_free_urb(ucs->bulk_out_urb); 831 usb_free_urb(ucs->bulk_out_urb);
834 kfree(cs->inbuf[0].rcvbuf); 832 kfree(ucs->rcvbuf);
835 usb_free_urb(ucs->read_urb); 833 usb_free_urb(ucs->read_urb);
836 ucs->read_urb = ucs->bulk_out_urb = NULL; 834 ucs->read_urb = ucs->bulk_out_urb = NULL;
837 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; 835 ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
838 836
839 usb_put_dev(ucs->udev); 837 usb_put_dev(ucs->udev);
840 ucs->interface = NULL; 838 ucs->interface = NULL;
@@ -916,9 +914,10 @@ static int __init usb_gigaset_init(void)
916 int result; 914 int result;
917 915
918 /* allocate memory for our driver state and intialize it */ 916 /* allocate memory for our driver state and intialize it */
919 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 917 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
920 GIGASET_MODULENAME, GIGASET_DEVNAME, 918 GIGASET_MODULENAME, GIGASET_DEVNAME,
921 &ops, THIS_MODULE)) == NULL) 919 &ops, THIS_MODULE);
920 if (driver == NULL)
922 goto error; 921 goto error;
923 922
924 /* register this driver with the USB subsystem */ 923 /* register this driver with the USB subsystem */
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index ff3a4e290da3..7726afdbb40b 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -110,6 +110,7 @@ set_debug(const char *val, struct kernel_param *kp)
110MODULE_AUTHOR("Karsten Keil"); 110MODULE_AUTHOR("Karsten Keil");
111MODULE_LICENSE("GPL v2"); 111MODULE_LICENSE("GPL v2");
112MODULE_VERSION(SPEEDFAX_REV); 112MODULE_VERSION(SPEEDFAX_REV);
113MODULE_FIRMWARE("isdn/ISAR.BIN");
113module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); 114module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(debug, "Speedfax debug mask"); 115MODULE_PARM_DESC(debug, "Speedfax debug mask");
115module_param(irqloops, uint, S_IRUGO | S_IWUSR); 116module_param(irqloops, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index feb0fa45b664..fcfe17a19a61 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -779,7 +779,7 @@ base_sock_create(struct net *net, struct socket *sock, int protocol)
779} 779}
780 780
781static int 781static int
782mISDN_sock_create(struct net *net, struct socket *sock, int proto) 782mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
783{ 783{
784 int err = -EPROTONOSUPPORT; 784 int err = -EPROTONOSUPPORT;
785 785
@@ -808,8 +808,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto)
808 return err; 808 return err;
809} 809}
810 810
811static struct 811static const struct net_proto_family mISDN_sock_family_ops = {
812net_proto_family mISDN_sock_family_ops = {
813 .owner = THIS_MODULE, 812 .owner = THIS_MODULE,
814 .family = PF_ISDN, 813 .family = PF_ISDN,
815 .create = mISDN_sock_create, 814 .create = mISDN_sock_create,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index df1f86b5c83e..a2ea383105a6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -249,5 +249,6 @@ config EP93XX_PWM
249source "drivers/misc/c2port/Kconfig" 249source "drivers/misc/c2port/Kconfig"
250source "drivers/misc/eeprom/Kconfig" 250source "drivers/misc/eeprom/Kconfig"
251source "drivers/misc/cb710/Kconfig" 251source "drivers/misc/cb710/Kconfig"
252source "drivers/misc/iwmc3200top/Kconfig"
252 253
253endif # MISC_DEVICES 254endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f982d2ecfde7..e311267a355f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_HP_ILO) += hpilo.o
21obj-$(CONFIG_ISL29003) += isl29003.o 21obj-$(CONFIG_ISL29003) += isl29003.o
22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o 22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
23obj-$(CONFIG_C2PORT) += c2port/ 23obj-$(CONFIG_C2PORT) += c2port/
24obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
24obj-y += eeprom/ 25obj-y += eeprom/
25obj-y += cb710/ 26obj-y += cb710/
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 000000000000..9e4b88fb57f1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
1config IWMC3200TOP
2 tristate "Intel Wireless MultiCom Top Driver"
3 depends on MMC && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 Intel Wireless MultiCom 3200 Top driver is responsible for
7 for firmware load and enabled coms enumeration
8
9config IWMC3200TOP_DEBUG
10 bool "Enable full debug output of iwmc3200top Driver"
11 depends on IWMC3200TOP
12 ---help---
13 Enable full debug output of iwmc3200top Driver
14
15config IWMC3200TOP_DEBUGFS
16 bool "Enable Debugfs debugging interface for iwmc3200top"
17 depends on IWMC3200TOP
18 ---help---
19 Enable creation of debugfs files for iwmc3200top
20
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 000000000000..fbf53fb4634e
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
1# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
2# drivers/misc/iwmc3200top/Makefile
3#
4# Copyright (C) 2009 Intel Corporation. All rights reserved.
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License version
8# 2 as published by the Free Software Foundation.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18# 02110-1301, USA.
19#
20#
21# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
22# -
23#
24#
25
26obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
27iwmc3200top-objs := main.o fw-download.o
28iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
29iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 000000000000..0c8ea0a1c8a3
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,133 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/string.h>
29#include <linux/ctype.h>
30#include <linux/mmc/sdio_func.h>
31#include <linux/mmc/sdio.h>
32#include <linux/debugfs.h>
33
34#include "iwmc3200top.h"
35#include "fw-msg.h"
36#include "log.h"
37#include "debugfs.h"
38
39
40
41/* Constants definition */
42#define HEXADECIMAL_RADIX 16
43
44/* Functions definition */
45
46
47#define DEBUGFS_ADD(name, parent) do { \
48 dbgfs->dbgfs_##parent##_files.file_##name = \
49 debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
50 &iwmct_dbgfs_##name##_ops); \
51} while (0)
52
53#define DEBUGFS_RM(name) do { \
54 debugfs_remove(name); \
55 name = NULL; \
56} while (0)
57
58#define DEBUGFS_READ_FUNC(name) \
59ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
60 char __user *user_buf, \
61 size_t count, loff_t *ppos);
62
63#define DEBUGFS_WRITE_FUNC(name) \
64ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
65 const char __user *user_buf, \
66 size_t count, loff_t *ppos);
67
68#define DEBUGFS_READ_FILE_OPS(name) \
69 DEBUGFS_READ_FUNC(name) \
70 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
71 .read = iwmct_dbgfs_##name##_read, \
72 .open = iwmct_dbgfs_open_file_generic, \
73 };
74
75#define DEBUGFS_WRITE_FILE_OPS(name) \
76 DEBUGFS_WRITE_FUNC(name) \
77 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
78 .write = iwmct_dbgfs_##name##_write, \
79 .open = iwmct_dbgfs_open_file_generic, \
80 };
81
82#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
83 DEBUGFS_READ_FUNC(name) \
84 DEBUGFS_WRITE_FUNC(name) \
85 static const struct file_operations iwmct_dbgfs_##name##_ops = {\
86 .write = iwmct_dbgfs_##name##_write, \
87 .read = iwmct_dbgfs_##name##_read, \
88 .open = iwmct_dbgfs_open_file_generic, \
89 };
90
91
92/* Debugfs file ops definitions */
93
94/*
95 * Create the debugfs files and directories
96 *
97 */
98void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
99{
100 struct iwmct_debugfs *dbgfs;
101
102 dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
103 if (!dbgfs) {
104 LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
105 sizeof(struct iwmct_debugfs));
106 return;
107 }
108
109 priv->dbgfs = dbgfs;
110 dbgfs->name = name;
111 dbgfs->dir_drv = debugfs_create_dir(name, NULL);
112 if (!dbgfs->dir_drv) {
113 LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
114 return;
115 }
116
117 return;
118}
119
120/**
121 * Remove the debugfs files and directories
122 *
123 */
124void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
125{
126 if (!dbgfs)
127 return;
128
129 DEBUGFS_RM(dbgfs->dir_drv);
130 kfree(dbgfs);
131 dbgfs = NULL;
132}
133
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 000000000000..71d45759b40f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __DEBUGFS_H__
28#define __DEBUGFS_H__
29
30
31#ifdef CONFIG_IWMC3200TOP_DEBUGFS
32
33struct iwmct_debugfs {
34 const char *name;
35 struct dentry *dir_drv;
36 struct dir_drv_files {
37 } dbgfs_drv_files;
38};
39
40void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
41void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
42
43#else /* CONFIG_IWMC3200TOP_DEBUGFS */
44
45struct iwmct_debugfs;
46
47static inline void
48iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
49{}
50
51static inline void
52iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
53{}
54
55#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
56
57#endif /* __DEBUGFS_H__ */
58
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 000000000000..50d431e469f5
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,355 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-download.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/mmc/sdio_func.h>
29#include <asm/unaligned.h>
30
31#include "iwmc3200top.h"
32#include "log.h"
33#include "fw-msg.h"
34
35#define CHECKSUM_BYTES_NUM sizeof(u32)
36
37/**
38 init parser struct with file
39 */
40static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
41 size_t file_size, size_t block_size)
42{
43 struct iwmct_parser *parser = &priv->parser;
44 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
45
46 LOG_INFOEX(priv, INIT, "-->\n");
47
48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
49
50 parser->file = file;
51 parser->file_size = file_size;
52 parser->cur_pos = 0;
53 parser->buf = NULL;
54
55 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
58 return -ENOMEM;
59 }
60 parser->buf_size = block_size;
61
62 /* extract fw versions */
63 memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
64 LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
65 "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
66 fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
67 fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
68 fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
69 fw_hdr->tic_name);
70
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72
73 LOG_INFOEX(priv, INIT, "<--\n");
74 return 0;
75}
76
77static bool iwmct_checksum(struct iwmct_priv *priv)
78{
79 struct iwmct_parser *parser = &priv->parser;
80 __le32 *file = (__le32 *)parser->file;
81 int i, pad, steps;
82 u32 accum = 0;
83 u32 checksum;
84 u32 mask = 0xffffffff;
85
86 pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
87 steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
88
89 LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
90
91 for (i = 0; i < steps; i++)
92 accum += le32_to_cpu(file[i]);
93
94 if (pad) {
95 mask <<= 8 * (4 - pad);
96 accum += le32_to_cpu(file[steps]) & mask;
97 }
98
99 checksum = get_unaligned_le32((__le32 *)(parser->file +
100 parser->file_size - CHECKSUM_BYTES_NUM));
101
102 LOG_INFO(priv, FW_DOWNLOAD,
103 "compare checksum accum=0x%x to checksum=0x%x\n",
104 accum, checksum);
105
106 return checksum == accum;
107}
108
109static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
110 size_t *sec_size, __le32 *sec_addr)
111{
112 struct iwmct_parser *parser = &priv->parser;
113 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr;
115
116 LOG_INFOEX(priv, INIT, "-->\n");
117
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) {
120
121 sec_hdr = (struct iwmct_fw_sec_hdr *)
122 (parser->file + parser->cur_pos);
123 parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
124
125 LOG_INFO(priv, FW_DOWNLOAD,
126 "sec hdr: type=%s addr=0x%x size=%d\n",
127 sec_hdr->type, sec_hdr->target_addr,
128 sec_hdr->data_size);
129
130 if (strcmp(sec_hdr->type, "ENT") == 0)
131 parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
132 else if (strcmp(sec_hdr->type, "LBL") == 0)
133 strcpy(dbg->label_fw, parser->file + parser->cur_pos);
134 else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
135 (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
136 ((strcmp(sec_hdr->type, "GPS") == 0) &&
137 (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
138 ((strcmp(sec_hdr->type, "BTH") == 0) &&
139 (priv->barker & BARKER_DNLOAD_BT_MSK))) {
140 *sec_addr = sec_hdr->target_addr;
141 *sec_size = le32_to_cpu(sec_hdr->data_size);
142 *p_sec = parser->file + parser->cur_pos;
143 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
144 return 1;
145 } else if (strcmp(sec_hdr->type, "LOG") != 0)
146 LOG_WARNING(priv, FW_DOWNLOAD,
147 "skipping section type %s\n",
148 sec_hdr->type);
149
150 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
151 LOG_INFO(priv, FW_DOWNLOAD,
152 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 }
154
155 LOG_INFOEX(priv, INIT, "<--\n");
156 return 0;
157}
158
159static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
160 size_t sec_size, __le32 addr)
161{
162 struct iwmct_parser *parser = &priv->parser;
163 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
164 const u8 *cur_block = p_sec;
165 size_t sent = 0;
166 int cnt = 0;
167 int ret = 0;
168 u32 cmd = 0;
169
170 LOG_INFOEX(priv, INIT, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size);
173
174 while (sent < sec_size) {
175 int i;
176 u32 chksm = 0;
177 u32 reset = atomic_read(&priv->reset);
178 /* actual FW data */
179 u32 data_size = min(parser->buf_size - sizeof(*hdr),
180 sec_size - sent);
181 /* Pad to block size */
182 u32 trans_size = (data_size + sizeof(*hdr) +
183 IWMC_SDIO_BLK_SIZE - 1) &
184 ~(IWMC_SDIO_BLK_SIZE - 1);
185 ++cnt;
186
187 /* in case of reset, interrupt FW DOWNLAOD */
188 if (reset) {
189 LOG_INFO(priv, FW_DOWNLOAD,
190 "Reset detected. Abort FW download!!!");
191 ret = -ECANCELED;
192 goto exit;
193 }
194
195 memset(parser->buf, 0, parser->buf_size);
196 cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
197 cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
198 cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
199 cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
200 hdr->data_size = cpu_to_le32(data_size);
201 hdr->target_addr = addr;
202
203 /* checksum is allowed for sizes divisible by 4 */
204 if (data_size & 0x3)
205 cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
206
207 memcpy(hdr->data, cur_block, data_size);
208
209
210 if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
211
212 chksm = data_size + le32_to_cpu(addr) + cmd;
213 for (i = 0; i < data_size >> 2; i++)
214 chksm += ((u32 *)cur_block)[i];
215
216 hdr->block_chksm = cpu_to_le32(chksm);
217 LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
218 hdr->block_chksm);
219 }
220
221 LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
222 "sec_size=%zd, startAddress 0x%X\n",
223 cnt, trans_size, sent, sec_size, addr);
224
225 if (priv->dbg.dump)
226 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
227
228
229 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */
231 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, 0, parser->buf, trans_size);
233 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret);
236 goto exit;
237 }
238
239 addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
240 sent += data_size;
241 cur_block = p_sec + sent;
242
243 if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
244 LOG_INFO(priv, FW_DOWNLOAD,
245 "Block number limit is reached [%d]\n",
246 priv->dbg.blocks);
247 break;
248 }
249 }
250
251 if (sent < sec_size)
252 ret = -EINVAL;
253exit:
254 LOG_INFOEX(priv, INIT, "<--\n");
255 return ret;
256}
257
258static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
259{
260 struct iwmct_parser *parser = &priv->parser;
261 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
262 int ret;
263 u32 cmd;
264
265 LOG_INFOEX(priv, INIT, "-->\n");
266
267 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
269 if (jump) {
270 cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
271 hdr->target_addr = cpu_to_le32(parser->entry_point);
272 LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
273 parser->entry_point);
274 } else {
275 cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
276 LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
277 }
278
279 hdr->cmd = cpu_to_le32(cmd);
280
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */
283 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287
288 LOG_INFOEX(priv, INIT, "<--\n");
289 return 0;
290}
291
292int iwmct_fw_load(struct iwmct_priv *priv)
293{
294 const u8 *fw_name = FW_NAME(FW_API_VER);
295 const struct firmware *raw;
296 const u8 *pdata;
297 size_t len;
298 __le32 addr;
299 int ret;
300
301 /* clear parser struct */
302 memset(&priv->parser, 0, sizeof(struct iwmct_parser));
303
304 /* get the firmware */
305 ret = request_firmware(&raw, fw_name, &priv->func->dev);
306 if (ret < 0) {
307 LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
308 fw_name, ret);
309 goto exit;
310 }
311
312 if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
313 LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
314 fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
315 goto exit;
316 }
317
318 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
319
320 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
321 if (ret < 0) {
322 LOG_ERROR(priv, FW_DOWNLOAD,
323 "iwmct_parser_init failed: Reason %d\n", ret);
324 goto exit;
325 }
326
327 /* checksum */
328 if (!iwmct_checksum(priv)) {
329 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
330 ret = -EINVAL;
331 goto exit;
332 }
333
334 /* download firmware to device */
335 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
336 if (iwmct_download_section(priv, pdata, len, addr)) {
337 LOG_ERROR(priv, FW_DOWNLOAD,
338 "%s download section failed\n", fw_name);
339 ret = -EIO;
340 goto exit;
341 }
342 }
343
344 iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
345
346exit:
347 kfree(priv->parser.buf);
348
349 if (raw)
350 release_firmware(raw);
351
352 raw = NULL;
353
354 return ret;
355}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 000000000000..9e26b75bd482
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-msg.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __FWMSG_H__
28#define __FWMSG_H__
29
30#define COMM_TYPE_D2H 0xFF
31#define COMM_TYPE_H2D 0xEE
32
33#define COMM_CATEGORY_OPERATIONAL 0x00
34#define COMM_CATEGORY_DEBUG 0x01
35#define COMM_CATEGORY_TESTABILITY 0x02
36#define COMM_CATEGORY_DIAGNOSTICS 0x03
37
38#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
39
40#define FW_LOG_SRC_MAX 32
41#define FW_LOG_SRC_ALL 255
42
43#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
44
45#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
46#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
47#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
48#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
49#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
50#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
51#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
52#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
53#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
54#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
55
56#define OP_OPR_ALIVE cpu_to_le16(0x0010)
57#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
58#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
59#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
60
61#define CMD_FLAG_PADDING_256 0x80
62
63#define FW_HCMD_BLOCK_SIZE 256
64
65struct msg_hdr {
66 u8 type;
67 u8 category;
68 __le16 opcode;
69 u8 seqnum;
70 u8 flags;
71 __le16 length;
72} __attribute__((__packed__));
73
74struct log_hdr {
75 __le32 timestamp;
76 u8 severity;
77 u8 logsource;
78 __le16 reserved;
79} __attribute__((__packed__));
80
81struct mdump_hdr {
82 u8 dmpid;
83 u8 frag;
84 __le16 size;
85 __le32 addr;
86} __attribute__((__packed__));
87
88struct top_msg {
89 struct msg_hdr hdr;
90 union {
91 /* D2H messages */
92 struct {
93 struct log_hdr log_hdr;
94 u8 data[1];
95 } __attribute__((__packed__)) log;
96
97 struct {
98 struct log_hdr log_hdr;
99 struct mdump_hdr md_hdr;
100 u8 data[1];
101 } __attribute__((__packed__)) mdump;
102
103 /* H2D messages */
104 struct {
105 u8 logsource;
106 u8 sevmask;
107 } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
108 struct mdump_hdr mdump_req;
109 } u;
110} __attribute__((__packed__));
111
112
113#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 000000000000..43bd510e1872
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,209 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/iwmc3200top.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __IWMC3200TOP_H__
28#define __IWMC3200TOP_H__
29
30#include <linux/workqueue.h>
31
32#define DRV_NAME "iwmc3200top"
33#define FW_API_VER 1
34#define _FW_NAME(api) DRV_NAME "." #api ".fw"
35#define FW_NAME(api) _FW_NAME(api)
36
37#define IWMC_SDIO_BLK_SIZE 256
38#define IWMC_DEFAULT_TR_BLK 64
39#define IWMC_SDIO_DATA_ADDR 0x0
40#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
41#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
42#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
43#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
44
45#define COMM_HUB_HEADER_LENGTH 16
46#define LOGGER_HEADER_LENGTH 10
47
48
49#define BARKER_DNLOAD_BT_POS 0
50#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
51#define BARKER_DNLOAD_GPS_POS 1
52#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
53#define BARKER_DNLOAD_TOP_POS 2
54#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
55#define BARKER_DNLOAD_RESERVED1_POS 3
56#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
57#define BARKER_DNLOAD_JUMP_POS 4
58#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
59#define BARKER_DNLOAD_SYNC_POS 5
60#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
61#define BARKER_DNLOAD_RESERVED2_POS 6
62#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
63#define BARKER_DNLOAD_BARKER_POS 8
64#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
65
66#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
67/* whole field barker */
68#define IWMC_BARKER_ACK 0xfeedbabe
69
70#define IWMC_CMD_SIGNATURE 0xcbbc
71
72#define CMD_HDR_OPCODE_POS 0
73#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
74#define CMD_HDR_RESPONSE_CODE_POS 4
75#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
76#define CMD_HDR_USE_CHECKSUM_POS 8
77#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
78#define CMD_HDR_RESPONSE_REQUIRED_POS 9
79#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
80#define CMD_HDR_DIRECT_ACCESS_POS 10
81#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
82#define CMD_HDR_RESERVED_POS 11
83#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
84#define CMD_HDR_SIGNATURE_POS 16
85#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
86
87enum {
88 IWMC_OPCODE_PING = 0,
89 IWMC_OPCODE_READ = 1,
90 IWMC_OPCODE_WRITE = 2,
91 IWMC_OPCODE_JUMP = 3,
92 IWMC_OPCODE_REBOOT = 4,
93 IWMC_OPCODE_PERSISTENT_WRITE = 5,
94 IWMC_OPCODE_PERSISTENT_READ = 6,
95 IWMC_OPCODE_READ_MODIFY_WRITE = 7,
96 IWMC_OPCODE_LAST_COMMAND = 15
97};
98
99struct iwmct_fw_load_hdr {
100 __le32 cmd;
101 __le32 target_addr;
102 __le32 data_size;
103 __le32 block_chksm;
104 u8 data[0];
105};
106
107/**
108 * struct iwmct_fw_hdr
109 * holds all sw components versions
110 */
111struct iwmct_fw_hdr {
112 u8 top_major;
113 u8 top_minor;
114 u8 top_revision;
115 u8 gps_major;
116 u8 gps_minor;
117 u8 gps_revision;
118 u8 bt_major;
119 u8 bt_minor;
120 u8 bt_revision;
121 u8 tic_name[31];
122};
123
124/**
125 * struct iwmct_fw_sec_hdr
126 * @type: function type
127 * @data_size: section's data size
128 * @target_addr: download address
129 */
130struct iwmct_fw_sec_hdr {
131 u8 type[4];
132 __le32 data_size;
133 __le32 target_addr;
134};
135
136/**
137 * struct iwmct_parser
138 * @file: fw image
139 * @file_size: fw size
140 * @cur_pos: position in file
141 * @buf: temp buf for download
142 * @buf_size: size of buf
143 * @entry_point: address to jump in fw kick-off
144 */
145struct iwmct_parser {
146 const u8 *file;
147 size_t file_size;
148 size_t cur_pos;
149 u8 *buf;
150 size_t buf_size;
151 u32 entry_point;
152 struct iwmct_fw_hdr versions;
153};
154
155
156struct iwmct_work_struct {
157 struct list_head list;
158 ssize_t iosize;
159};
160
161struct iwmct_dbg {
162 int blocks;
163 bool dump;
164 bool jump;
165 bool direct;
166 bool checksum;
167 bool fw_download;
168 int block_size;
169 int download_trans_blks;
170
171 char label_fw[256];
172};
173
174struct iwmct_debugfs;
175
176struct iwmct_priv {
177 struct sdio_func *func;
178 struct iwmct_debugfs *dbgfs;
179 struct iwmct_parser parser;
180 atomic_t reset;
181 atomic_t dev_sync;
182 u32 trans_len;
183 u32 barker;
184 struct iwmct_dbg dbg;
185
186 /* drivers work queue */
187 struct workqueue_struct *wq;
188 struct workqueue_struct *bus_rescan_wq;
189 struct work_struct bus_rescan_worker;
190 struct work_struct isr_worker;
191
192 /* drivers wait queue */
193 wait_queue_head_t wait_q;
194
195 /* rx request list */
196 struct list_head read_req_list;
197};
198
199extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
200 void *src, int count);
201
202extern int iwmct_fw_load(struct iwmct_priv *priv);
203
204extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
205extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
206extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
207extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
208
209#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 000000000000..d569279698f6
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,347 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/ctype.h>
30#include "fw-msg.h"
31#include "iwmc3200top.h"
32#include "log.h"
33
34/* Maximal hexadecimal string size of the FW memdump message */
35#define LOG_MSG_SIZE_MAX 12400
36
37/* iwmct_logdefs is a global used by log macros */
38u8 iwmct_logdefs[LOG_SRC_MAX];
39static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
40
41
42static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
43{
44 int i;
45
46 if (src < size)
47 logdefs[src] = logmask;
48 else if (src == LOG_SRC_ALL)
49 for (i = 0; i < size; i++)
50 logdefs[i] = logmask;
51 else
52 return -1;
53
54 return 0;
55}
56
57
58int iwmct_log_set_filter(u8 src, u8 logmask)
59{
60 return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
61}
62
63
64int iwmct_log_set_fw_filter(u8 src, u8 logmask)
65{
66 return _log_set_log_filter(iwmct_fw_logdefs,
67 FW_LOG_SRC_MAX, src, logmask);
68}
69
70
71static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
72 int ilen, char *pref)
73{
74 int pos = 0;
75 int i;
76 int len;
77
78 for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
79 str[pos] = pref[i];
80
81 for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
82 len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
83
84 if (i < ilen)
85 return -1;
86
87 return 0;
88}
89
90/* NOTE: This function is not thread safe.
91 Currently it's called only from sdio rx worker - no race there
92*/
93void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
94{
95 struct top_msg *msg;
96 static char logbuf[LOG_MSG_SIZE_MAX];
97
98 msg = (struct top_msg *)buf;
99
100 if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
101 LOG_ERROR(priv, FW_MSG, "Log message from TOP "
102 "is too short %d (expected %zd)\n",
103 len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
104 return;
105 }
106
107 if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
108 BIT(msg->u.log.log_hdr.severity)) ||
109 !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
110 return;
111
112 switch (msg->hdr.category) {
113 case COMM_CATEGORY_TESTABILITY:
114 if (!(iwmct_logdefs[LOG_SRC_TST] &
115 BIT(msg->u.log.log_hdr.severity)))
116 return;
117 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
118 le16_to_cpu(msg->hdr.length) +
119 sizeof(msg->hdr), "<TST>"))
120 LOG_WARNING(priv, TST,
121 "TOP TST message is too long, truncating...");
122 LOG_WARNING(priv, TST, "%s\n", logbuf);
123 break;
124 case COMM_CATEGORY_DEBUG:
125 if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
126 LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
127 ((u8 *)msg) + sizeof(msg->hdr)
128 + sizeof(msg->u.log.log_hdr));
129 else {
130 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
131 le16_to_cpu(msg->hdr.length)
132 + sizeof(msg->hdr),
133 "<DBG>"))
134 LOG_WARNING(priv, FW_MSG,
135 "TOP DBG message is too long,"
136 "truncating...");
137 LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
138 }
139 break;
140 default:
141 break;
142 }
143}
144
145static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
146{
147 int i, pos, len;
148 for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
149 len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
150 i, logdefs[i]);
151 pos += len;
152 }
153 buf[pos-1] = '\n';
154 buf[pos] = '\0';
155
156 if (i < logdefsz)
157 return -1;
158 return 0;
159}
160
161int log_get_filter_str(char *buf, int size)
162{
163 return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
164}
165
166int log_get_fw_filter_str(char *buf, int size)
167{
168 return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
169}
170
171#define HEXADECIMAL_RADIX 16
172#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
173
174ssize_t show_iwmct_log_level(struct device *d,
175 struct device_attribute *attr, char *buf)
176{
177 struct iwmct_priv *priv = dev_get_drvdata(d);
178 char *str_buf;
179 int buf_size;
180 ssize_t ret;
181
182 buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
183 str_buf = kzalloc(buf_size, GFP_KERNEL);
184 if (!str_buf) {
185 LOG_ERROR(priv, DEBUGFS,
186 "failed to allocate %d bytes\n", buf_size);
187 ret = -ENOMEM;
188 goto exit;
189 }
190
191 if (log_get_filter_str(str_buf, buf_size) < 0) {
192 ret = -EINVAL;
193 goto exit;
194 }
195
196 ret = sprintf(buf, "%s", str_buf);
197
198exit:
199 kfree(str_buf);
200 return ret;
201}
202
203ssize_t store_iwmct_log_level(struct device *d,
204 struct device_attribute *attr,
205 const char *buf, size_t count)
206{
207 struct iwmct_priv *priv = dev_get_drvdata(d);
208 char *token, *str_buf = NULL;
209 long val;
210 ssize_t ret = count;
211 u8 src, mask;
212
213 if (!count)
214 goto exit;
215
216 str_buf = kzalloc(count, GFP_KERNEL);
217 if (!str_buf) {
218 LOG_ERROR(priv, DEBUGFS,
219 "failed to allocate %zd bytes\n", count);
220 ret = -ENOMEM;
221 goto exit;
222 }
223
224 memcpy(str_buf, buf, count);
225
226 while ((token = strsep(&str_buf, ",")) != NULL) {
227 while (isspace(*token))
228 ++token;
229 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
230 LOG_ERROR(priv, DEBUGFS,
231 "failed to convert string to long %s\n",
232 token);
233 ret = -EINVAL;
234 goto exit;
235 }
236
237 mask = val & 0xFF;
238 src = (val & 0XFF00) >> 8;
239 iwmct_log_set_filter(src, mask);
240 }
241
242exit:
243 kfree(str_buf);
244 return ret;
245}
246
247ssize_t show_iwmct_log_level_fw(struct device *d,
248 struct device_attribute *attr, char *buf)
249{
250 struct iwmct_priv *priv = dev_get_drvdata(d);
251 char *str_buf;
252 int buf_size;
253 ssize_t ret;
254
255 buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
256
257 str_buf = kzalloc(buf_size, GFP_KERNEL);
258 if (!str_buf) {
259 LOG_ERROR(priv, DEBUGFS,
260 "failed to allocate %d bytes\n", buf_size);
261 ret = -ENOMEM;
262 goto exit;
263 }
264
265 if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
266 ret = -EINVAL;
267 goto exit;
268 }
269
270 ret = sprintf(buf, "%s", str_buf);
271
272exit:
273 kfree(str_buf);
274 return ret;
275}
276
277ssize_t store_iwmct_log_level_fw(struct device *d,
278 struct device_attribute *attr,
279 const char *buf, size_t count)
280{
281 struct iwmct_priv *priv = dev_get_drvdata(d);
282 struct top_msg cmd;
283 char *token, *str_buf = NULL;
284 ssize_t ret = count;
285 u16 cmdlen = 0;
286 int i;
287 long val;
288 u8 src, mask;
289
290 if (!count)
291 goto exit;
292
293 str_buf = kzalloc(count, GFP_KERNEL);
294 if (!str_buf) {
295 LOG_ERROR(priv, DEBUGFS,
296 "failed to allocate %zd bytes\n", count);
297 ret = -ENOMEM;
298 goto exit;
299 }
300
301 memcpy(str_buf, buf, count);
302
303 cmd.hdr.type = COMM_TYPE_H2D;
304 cmd.hdr.category = COMM_CATEGORY_DEBUG;
305 cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
306
307 for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
308 (i < FW_LOG_SRC_MAX); i++) {
309
310 while (isspace(*token))
311 ++token;
312
313 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
314 LOG_ERROR(priv, DEBUGFS,
315 "failed to convert string to long %s\n",
316 token);
317 ret = -EINVAL;
318 goto exit;
319 }
320
321 mask = val & 0xFF; /* LSB */
322 src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
323 iwmct_log_set_fw_filter(src, mask);
324
325 cmd.u.logdefs[i].logsource = src;
326 cmd.u.logdefs[i].sevmask = mask;
327 }
328
329 cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
330 cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
331
332 ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
333 if (ret) {
334 LOG_ERROR(priv, DEBUGFS,
335 "Failed to send %d bytes of fwcmd, ret=%zd\n",
336 cmdlen, ret);
337 goto exit;
338 } else
339 LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
340
341 ret = count;
342
343exit:
344 kfree(str_buf);
345 return ret;
346}
347
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 000000000000..aba8121f978c
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,158 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __LOG_H__
28#define __LOG_H__
29
30
31/* log severity:
32 * The log levels here match FW log levels
33 * so values need to stay as is */
34#define LOG_SEV_CRITICAL 0
35#define LOG_SEV_ERROR 1
36#define LOG_SEV_WARNING 2
37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4
39
40#define LOG_SEV_FILTER_ALL \
41 (BIT(LOG_SEV_CRITICAL) | \
42 BIT(LOG_SEV_ERROR) | \
43 BIT(LOG_SEV_WARNING) | \
44 BIT(LOG_SEV_INFO) | \
45 BIT(LOG_SEV_INFOEX))
46
47/* log source */
48#define LOG_SRC_INIT 0
49#define LOG_SRC_DEBUGFS 1
50#define LOG_SRC_FW_DOWNLOAD 2
51#define LOG_SRC_FW_MSG 3
52#define LOG_SRC_TST 4
53#define LOG_SRC_IRQ 5
54
55#define LOG_SRC_MAX 6
56#define LOG_SRC_ALL 0xFF
57
58/**
59 * Default intitialization runtime log level
60 */
61#ifndef LOG_SEV_FILTER_RUNTIME
62#define LOG_SEV_FILTER_RUNTIME \
63 (BIT(LOG_SEV_CRITICAL) | \
64 BIT(LOG_SEV_ERROR) | \
65 BIT(LOG_SEV_WARNING))
66#endif
67
68#ifndef FW_LOG_SEV_FILTER_RUNTIME
69#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
70#endif
71
72#ifdef CONFIG_IWMC3200TOP_DEBUG
73/**
74 * Log macros
75 */
76
77#define priv2dev(priv) (&(priv->func)->dev)
78
79#define LOG_CRITICAL(priv, src, fmt, args...) \
80do { \
81 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
82 dev_crit(priv2dev(priv), "%s %d: " fmt, \
83 __func__, __LINE__, ##args); \
84} while (0)
85
86#define LOG_ERROR(priv, src, fmt, args...) \
87do { \
88 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
89 dev_err(priv2dev(priv), "%s %d: " fmt, \
90 __func__, __LINE__, ##args); \
91} while (0)
92
93#define LOG_WARNING(priv, src, fmt, args...) \
94do { \
95 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
96 dev_warn(priv2dev(priv), "%s %d: " fmt, \
97 __func__, __LINE__, ##args); \
98} while (0)
99
100#define LOG_INFO(priv, src, fmt, args...) \
101do { \
102 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
103 dev_info(priv2dev(priv), "%s %d: " fmt, \
104 __func__, __LINE__, ##args); \
105} while (0)
106
107#define LOG_INFOEX(priv, src, fmt, args...) \
108do { \
109 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
110 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
111 __func__, __LINE__, ##args); \
112} while (0)
113
114#define LOG_HEXDUMP(src, ptr, len) \
115do { \
116 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
117 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
118 16, 1, ptr, len, false); \
119} while (0)
120
121void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
122
123extern u8 iwmct_logdefs[];
124
125int iwmct_log_set_filter(u8 src, u8 logmask);
126int iwmct_log_set_fw_filter(u8 src, u8 logmask);
127
128ssize_t show_iwmct_log_level(struct device *d,
129 struct device_attribute *attr, char *buf);
130ssize_t store_iwmct_log_level(struct device *d,
131 struct device_attribute *attr,
132 const char *buf, size_t count);
133ssize_t show_iwmct_log_level_fw(struct device *d,
134 struct device_attribute *attr, char *buf);
135ssize_t store_iwmct_log_level_fw(struct device *d,
136 struct device_attribute *attr,
137 const char *buf, size_t count);
138
139#else
140
141#define LOG_CRITICAL(priv, src, fmt, args...)
142#define LOG_ERROR(priv, src, fmt, args...)
143#define LOG_WARNING(priv, src, fmt, args...)
144#define LOG_INFO(priv, src, fmt, args...)
145#define LOG_INFOEX(priv, src, fmt, args...)
146#define LOG_HEXDUMP(src, ptr, len)
147
148static inline void iwmct_log_top_message(struct iwmct_priv *priv,
149 u8 *buf, int len) {}
150static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
151static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
152
153#endif /* CONFIG_IWMC3200TOP_DEBUG */
154
155int log_get_filter_str(char *buf, int size);
156int log_get_fw_filter_str(char *buf, int size);
157
158#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 000000000000..fafcaa481d74
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,678 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/main.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/kernel.h>
30#include <linux/debugfs.h>
31#include <linux/mmc/sdio_ids.h>
32#include <linux/mmc/sdio_func.h>
33#include <linux/mmc/sdio.h>
34
35#include "iwmc3200top.h"
36#include "log.h"
37#include "fw-msg.h"
38#include "debugfs.h"
39
40
41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
43
44#define DRIVER_VERSION "0.1.62"
45
46MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
47MODULE_VERSION(DRIVER_VERSION);
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR(DRIVER_COPYRIGHT);
50MODULE_FIRMWARE(FW_NAME(FW_API_VER));
51
52/*
53 * This workers main task is to wait for OP_OPR_ALIVE
54 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
55 * When OP_OPR_ALIVE received it will issue
56 * a call to "bus_rescan_devices".
57 */
58static void iwmct_rescan_worker(struct work_struct *ws)
59{
60 struct iwmct_priv *priv;
61 int ret;
62
63 priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
64
65 LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
66
67 ret = bus_rescan_devices(priv->func->dev.bus);
68 if (ret < 0)
69 LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
70}
71
72static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
73{
74 switch (msg->hdr.opcode) {
75 case OP_OPR_ALIVE:
76 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
77 queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
78 break;
79 default:
80 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
81 msg->hdr.opcode);
82 break;
83 }
84}
85
86
87static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
88{
89 struct top_msg *msg;
90
91 msg = (struct top_msg *)buf;
92
93 if (msg->hdr.type != COMM_TYPE_D2H) {
94 LOG_ERROR(priv, FW_MSG,
95 "Message from TOP with invalid message type 0x%X\n",
96 msg->hdr.type);
97 return;
98 }
99
100 if (len < sizeof(msg->hdr)) {
101 LOG_ERROR(priv, FW_MSG,
102 "Message from TOP is too short for message header "
103 "received %d bytes, expected at least %zd bytes\n",
104 len, sizeof(msg->hdr));
105 return;
106 }
107
108 if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
109 LOG_ERROR(priv, FW_MSG,
110 "Message length (%d bytes) is shorter than "
111 "in header (%d bytes)\n",
112 len, le16_to_cpu(msg->hdr.length));
113 return;
114 }
115
116 switch (msg->hdr.category) {
117 case COMM_CATEGORY_OPERATIONAL:
118 op_top_message(priv, (struct top_msg *)buf);
119 break;
120
121 case COMM_CATEGORY_DEBUG:
122 case COMM_CATEGORY_TESTABILITY:
123 case COMM_CATEGORY_DIAGNOSTICS:
124 iwmct_log_top_message(priv, buf, len);
125 break;
126
127 default:
128 LOG_ERROR(priv, FW_MSG,
129 "Message from TOP with unknown category 0x%X\n",
130 msg->hdr.category);
131 break;
132 }
133}
134
135int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
136{
137 int ret;
138 u8 *buf;
139
140 LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
141
142 /* add padding to 256 for IWMC */
143 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
144
145 LOG_HEXDUMP(FW_MSG, cmd, len);
146
147 if (len > FW_HCMD_BLOCK_SIZE) {
148 LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
149 len, FW_HCMD_BLOCK_SIZE);
150 return -1;
151 }
152
153 buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
154 if (!buf) {
155 LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
156 FW_HCMD_BLOCK_SIZE);
157 return -1;
158 }
159
160 memcpy(buf, cmd, len);
161
162 sdio_claim_host(priv->func);
163 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
164 FW_HCMD_BLOCK_SIZE);
165 sdio_release_host(priv->func);
166
167 kfree(buf);
168 return ret;
169}
170
171int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
172 void *src, int count)
173{
174 int ret;
175
176 sdio_claim_host(priv->func);
177 ret = sdio_memcpy_toio(priv->func, addr, src, count);
178 sdio_release_host(priv->func);
179
180 return ret;
181}
182
183static void iwmct_irq_read_worker(struct work_struct *ws)
184{
185 struct iwmct_priv *priv;
186 struct iwmct_work_struct *read_req;
187 __le32 *buf = NULL;
188 int ret;
189 int iosize;
190 u32 barker;
191 bool is_barker;
192
193 priv = container_of(ws, struct iwmct_priv, isr_worker);
194
195 LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
196
197 /* --------------------- Handshake with device -------------------- */
198 sdio_claim_host(priv->func);
199
200 /* all list manipulations have to be protected by
201 * sdio_claim_host/sdio_release_host */
202 if (list_empty(&priv->read_req_list)) {
203 LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
204 goto exit_release;
205 }
206
207 read_req = list_entry(priv->read_req_list.next,
208 struct iwmct_work_struct, list);
209
210 list_del(&read_req->list);
211 iosize = read_req->iosize;
212 kfree(read_req);
213
214 buf = kzalloc(iosize, GFP_KERNEL);
215 if (!buf) {
216 LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
217 goto exit_release;
218 }
219
220 LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
221 iosize, buf, priv->func->num);
222
223 /* read from device */
224 ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
225 if (ret) {
226 LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
227 goto exit_release;
228 }
229
230 LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
231
232 barker = le32_to_cpu(buf[0]);
233
234 /* Verify whether it's a barker and if not - treat as regular Rx */
235 if (barker == IWMC_BARKER_ACK ||
236 (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
237
238 /* Valid Barker is equal on first 4 dwords */
239 is_barker = (buf[1] == buf[0]) &&
240 (buf[2] == buf[0]) &&
241 (buf[3] == buf[0]);
242
243 if (!is_barker) {
244 LOG_WARNING(priv, IRQ,
245 "Potentially inconsistent barker "
246 "%08X_%08X_%08X_%08X\n",
247 le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
248 le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
249 }
250 } else {
251 is_barker = false;
252 }
253
254 /* Handle Top CommHub message */
255 if (!is_barker) {
256 sdio_release_host(priv->func);
257 handle_top_message(priv, (u8 *)buf, iosize);
258 goto exit;
259 } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
260 if (atomic_read(&priv->dev_sync) == 0) {
261 LOG_ERROR(priv, IRQ,
262 "ACK barker arrived out-of-sync\n");
263 goto exit_release;
264 }
265
266 /* Continuing to FW download (after Sync is completed)*/
267 atomic_set(&priv->dev_sync, 0);
268 LOG_INFO(priv, IRQ, "ACK barker arrived "
269 "- starting FW download\n");
270 } else { /* REBOOT barker */
271 LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker);
272 priv->barker = barker;
273
274 if (barker & BARKER_DNLOAD_SYNC_MSK) {
275 /* Send the same barker back */
276 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
277 buf, iosize);
278 if (ret) {
279 LOG_ERROR(priv, IRQ,
280 "error %d echoing barker\n", ret);
281 goto exit_release;
282 }
283 LOG_INFO(priv, IRQ, "Echoing barker to device\n");
284 atomic_set(&priv->dev_sync, 1);
285 goto exit_release;
286 }
287
288 /* Continuing to FW download (without Sync) */
289 LOG_INFO(priv, IRQ, "No sync requested "
290 "- starting FW download\n");
291 }
292
293 sdio_release_host(priv->func);
294
295
296 LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
297 LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
298 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
299 LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
300 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
301 LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
302 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
303
304 if (priv->dbg.fw_download)
305 iwmct_fw_load(priv);
306 else
307 LOG_ERROR(priv, IRQ, "FW download not allowed\n");
308
309 goto exit;
310
311exit_release:
312 sdio_release_host(priv->func);
313exit:
314 kfree(buf);
315 LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
316}
317
318static void iwmct_irq(struct sdio_func *func)
319{
320 struct iwmct_priv *priv;
321 int val, ret;
322 int iosize;
323 int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
324 struct iwmct_work_struct *read_req;
325
326 priv = sdio_get_drvdata(func);
327
328 LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
329
330 /* read the function's status register */
331 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
332
333 LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
334
335 if (!val) {
336 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
337 goto exit_clear_intr;
338 }
339
340
341 /*
342 * read 2 bytes of the transaction size
343 * IMPORTANT: sdio transaction size has to be read before clearing
344 * sdio interrupt!!!
345 */
346 val = sdio_readb(priv->func, addr++, &ret);
347 iosize = val;
348 val = sdio_readb(priv->func, addr++, &ret);
349 iosize += val << 8;
350
351 LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
352
353 if (iosize == 0) {
354 LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
355 goto exit_clear_intr;
356 }
357
358 /* allocate a work structure to pass iosize to the worker */
359 read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
360 if (!read_req) {
361 LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
362 goto exit_clear_intr;
363 }
364
365 INIT_LIST_HEAD(&read_req->list);
366 read_req->iosize = iosize;
367
368 list_add_tail(&priv->read_req_list, &read_req->list);
369
370 /* clear the function's interrupt request bit (write 1 to clear) */
371 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
372
373 queue_work(priv->wq, &priv->isr_worker);
374
375 LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
376
377 return;
378
379exit_clear_intr:
380 /* clear the function's interrupt request bit (write 1 to clear) */
381 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
382}
383
384
385static int blocks;
386module_param(blocks, int, 0604);
387MODULE_PARM_DESC(blocks, "max_blocks_to_send");
388
389static int dump;
390module_param(dump, bool, 0604);
391MODULE_PARM_DESC(dump, "dump_hex_content");
392
393static int jump = 1;
394module_param(jump, bool, 0604);
395
396static int direct = 1;
397module_param(direct, bool, 0604);
398
399static int checksum = 1;
400module_param(checksum, bool, 0604);
401
402static int fw_download = 1;
403module_param(fw_download, bool, 0604);
404
405static int block_size = IWMC_SDIO_BLK_SIZE;
406module_param(block_size, int, 0404);
407
408static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
409module_param(download_trans_blks, int, 0604);
410
411static int rubbish_barker;
412module_param(rubbish_barker, bool, 0604);
413
414#ifdef CONFIG_IWMC3200TOP_DEBUG
415static int log_level[LOG_SRC_MAX];
416static unsigned int log_level_argc;
417module_param_array(log_level, int, &log_level_argc, 0604);
418MODULE_PARM_DESC(log_level, "log_level");
419
420static int log_level_fw[FW_LOG_SRC_MAX];
421static unsigned int log_level_fw_argc;
422module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
423MODULE_PARM_DESC(log_level_fw, "log_level_fw");
424#endif
425
426void iwmct_dbg_init_params(struct iwmct_priv *priv)
427{
428#ifdef CONFIG_IWMC3200TOP_DEBUG
429 int i;
430
431 for (i = 0; i < log_level_argc; i++) {
432 dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
433 i, log_level[i]);
434 iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
435 log_level[i] & 0xFF);
436 }
437 for (i = 0; i < log_level_fw_argc; i++) {
438 dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
439 i, log_level_fw[i]);
440 iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
441 log_level_fw[i] & 0xFF);
442 }
443#endif
444
445 priv->dbg.blocks = blocks;
446 LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
447 priv->dbg.dump = (bool)dump;
448 LOG_INFO(priv, INIT, "dump=%d\n", dump);
449 priv->dbg.jump = (bool)jump;
450 LOG_INFO(priv, INIT, "jump=%d\n", jump);
451 priv->dbg.direct = (bool)direct;
452 LOG_INFO(priv, INIT, "direct=%d\n", direct);
453 priv->dbg.checksum = (bool)checksum;
454 LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
455 priv->dbg.fw_download = (bool)fw_download;
456 LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
457 priv->dbg.block_size = block_size;
458 LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
459 priv->dbg.download_trans_blks = download_trans_blks;
460 LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
461}
462
463/*****************************************************************************
464 *
465 * sysfs attributes
466 *
467 *****************************************************************************/
468static ssize_t show_iwmct_fw_version(struct device *d,
469 struct device_attribute *attr, char *buf)
470{
471 struct iwmct_priv *priv = dev_get_drvdata(d);
472 return sprintf(buf, "%s\n", priv->dbg.label_fw);
473}
474static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
475
476#ifdef CONFIG_IWMC3200TOP_DEBUG
477static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
478 show_iwmct_log_level, store_iwmct_log_level);
479static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
480 show_iwmct_log_level_fw, store_iwmct_log_level_fw);
481#endif
482
483static struct attribute *iwmct_sysfs_entries[] = {
484 &dev_attr_cc_label_fw.attr,
485#ifdef CONFIG_IWMC3200TOP_DEBUG
486 &dev_attr_log_level.attr,
487 &dev_attr_log_level_fw.attr,
488#endif
489 NULL
490};
491
492static struct attribute_group iwmct_attribute_group = {
493 .name = NULL, /* put in device directory */
494 .attrs = iwmct_sysfs_entries,
495};
496
497
498static int iwmct_probe(struct sdio_func *func,
499 const struct sdio_device_id *id)
500{
501 struct iwmct_priv *priv;
502 int ret;
503 int val = 1;
504 int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
505
506 dev_dbg(&func->dev, "enter iwmct_probe\n");
507
508 dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
509 jiffies_to_msecs(2147483647), HZ);
510
511 priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
512 if (!priv) {
513 dev_err(&func->dev, "kzalloc error\n");
514 return -ENOMEM;
515 }
516 priv->func = func;
517 sdio_set_drvdata(func, priv);
518
519
520 /* create drivers work queue */
521 priv->wq = create_workqueue(DRV_NAME "_wq");
522 priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
523 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
524 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
525
526 init_waitqueue_head(&priv->wait_q);
527
528 sdio_claim_host(func);
529 /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
530 func->enable_timeout = 10;
531
532 /* In our HW, setting the block size also wakes up the boot rom. */
533 ret = sdio_set_block_size(func, priv->dbg.block_size);
534 if (ret) {
535 LOG_ERROR(priv, INIT,
536 "sdio_set_block_size() failure: %d\n", ret);
537 goto error_sdio_enable;
538 }
539
540 ret = sdio_enable_func(func);
541 if (ret) {
542 LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
543 goto error_sdio_enable;
544 }
545
546 /* init reset and dev_sync states */
547 atomic_set(&priv->reset, 0);
548 atomic_set(&priv->dev_sync, 0);
549
550 /* init read req queue */
551 INIT_LIST_HEAD(&priv->read_req_list);
552
553 /* process configurable parameters */
554 iwmct_dbg_init_params(priv);
555 ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
556 if (ret) {
557 LOG_ERROR(priv, INIT, "Failed to register attributes and "
558 "initialize module_params\n");
559 goto error_dev_attrs;
560 }
561
562 iwmct_dbgfs_register(priv, DRV_NAME);
563
564 if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
565 LOG_INFO(priv, INIT,
566 "Reducing transaction to 8 blocks = 2K (from %d)\n",
567 priv->dbg.download_trans_blks);
568 priv->dbg.download_trans_blks = 8;
569 }
570 priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
571 LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
572
573 ret = sdio_claim_irq(func, iwmct_irq);
574 if (ret) {
575 LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
576 goto error_claim_irq;
577 }
578
579
580 /* Enable function's interrupt */
581 sdio_writeb(priv->func, val, addr, &ret);
582 if (ret) {
583 LOG_ERROR(priv, INIT, "Failure writing to "
584 "Interrupt Enable Register (%d): %d\n", addr, ret);
585 goto error_enable_int;
586 }
587
588 sdio_release_host(func);
589
590 LOG_INFO(priv, INIT, "exit iwmct_probe\n");
591
592 return ret;
593
594error_enable_int:
595 sdio_release_irq(func);
596error_claim_irq:
597 sdio_disable_func(func);
598error_dev_attrs:
599 iwmct_dbgfs_unregister(priv->dbgfs);
600 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
601error_sdio_enable:
602 sdio_release_host(func);
603 return ret;
604}
605
606static void iwmct_remove(struct sdio_func *func)
607{
608 struct iwmct_work_struct *read_req;
609 struct iwmct_priv *priv = sdio_get_drvdata(func);
610
611 priv = sdio_get_drvdata(func);
612
613 LOG_INFO(priv, INIT, "enter\n");
614
615 sdio_claim_host(func);
616 sdio_release_irq(func);
617 sdio_release_host(func);
618
619 /* Safely destroy osc workqueue */
620 destroy_workqueue(priv->bus_rescan_wq);
621 destroy_workqueue(priv->wq);
622
623 sdio_claim_host(func);
624 sdio_disable_func(func);
625 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
626 iwmct_dbgfs_unregister(priv->dbgfs);
627 sdio_release_host(func);
628
629 /* free read requests */
630 while (!list_empty(&priv->read_req_list)) {
631 read_req = list_entry(priv->read_req_list.next,
632 struct iwmct_work_struct, list);
633
634 list_del(&read_req->list);
635 kfree(read_req);
636 }
637
638 kfree(priv);
639}
640
641
642static const struct sdio_device_id iwmct_ids[] = {
643 /* Intel Wireless MultiCom 3200 Top Driver */
644 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
645 { }, /* Terminating entry */
646};
647
648MODULE_DEVICE_TABLE(sdio, iwmct_ids);
649
650static struct sdio_driver iwmct_driver = {
651 .probe = iwmct_probe,
652 .remove = iwmct_remove,
653 .name = DRV_NAME,
654 .id_table = iwmct_ids,
655};
656
657static int __init iwmct_init(void)
658{
659 int rc;
660
661 /* Default log filter settings */
662 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
663 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
664 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
665
666 rc = sdio_register_driver(&iwmct_driver);
667
668 return rc;
669}
670
671static void __exit iwmct_exit(void)
672{
673 sdio_unregister_driver(&iwmct_driver);
674}
675
676module_init(iwmct_init);
677module_exit(iwmct_exit);
678
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index f60309175ef5..62ceb2b4820c 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -345,7 +345,7 @@ static int el_open(struct net_device *dev)
345 if (el_debug > 2) 345 if (el_debug > 2)
346 pr_debug("%s: Doing el_open()...\n", dev->name); 346 pr_debug("%s: Doing el_open()...\n", dev->name);
347 347
348 retval = request_irq(dev->irq, &el_interrupt, 0, dev->name, dev); 348 retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev);
349 if (retval) 349 if (retval)
350 return retval; 350 return retval;
351 351
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index a21c9d15ef8a..9257d7ce0378 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -886,7 +886,7 @@ static int elp_open(struct net_device *dev)
886 /* 886 /*
887 * install our interrupt service routine 887 * install our interrupt service routine
888 */ 888 */
889 if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) { 889 if ((retval = request_irq(dev->irq, elp_interrupt, 0, dev->name, dev))) {
890 pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq); 890 pr_err("%s: could not allocate IRQ%d\n", dev->name, dev->irq);
891 return retval; 891 return retval;
892 } 892 }
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index a6dc8bcbc7df..605f1d17a8f3 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -399,7 +399,7 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
399 399
400 irq = inb(ioaddr + IRQ_CONFIG) & 0x0f; 400 irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
401 401
402 irqval = request_irq(irq, &el16_interrupt, 0, DRV_NAME, dev); 402 irqval = request_irq(irq, el16_interrupt, 0, DRV_NAME, dev);
403 if (irqval) { 403 if (irqval) {
404 pr_cont("\n"); 404 pr_cont("\n");
405 pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval); 405 pr_err("3c507: unable to get IRQ %d (irqval=%d).\n", irq, irqval);
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 3b00a4e927aa..8d4ce0964073 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -780,7 +780,7 @@ el3_open(struct net_device *dev)
780 outw(RxReset, ioaddr + EL3_CMD); 780 outw(RxReset, ioaddr + EL3_CMD);
781 outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); 781 outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
782 782
783 i = request_irq(dev->irq, &el3_interrupt, 0, dev->name, dev); 783 i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev);
784 if (i) 784 if (i)
785 return i; 785 return i;
786 786
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4adcb950f5f1..37faf36e2457 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -766,11 +766,11 @@ static int corkscrew_open(struct net_device *dev)
766 /* Corkscrew: Cannot share ISA resources. */ 766 /* Corkscrew: Cannot share ISA resources. */
767 if (dev->irq == 0 767 if (dev->irq == 0
768 || dev->dma == 0 768 || dev->dma == 0
769 || request_irq(dev->irq, &corkscrew_interrupt, 0, 769 || request_irq(dev->irq, corkscrew_interrupt, 0,
770 vp->product_name, dev)) return -EAGAIN; 770 vp->product_name, dev)) return -EAGAIN;
771 enable_dma(dev->dma); 771 enable_dma(dev->dma);
772 set_dma_mode(dev->dma, DMA_MODE_CASCADE); 772 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
773 } else if (request_irq(dev->irq, &corkscrew_interrupt, IRQF_SHARED, 773 } else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED,
774 vp->product_name, dev)) { 774 vp->product_name, dev)) {
775 return -EAGAIN; 775 return -EAGAIN;
776 } 776 }
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index cb0b730799ba..27d80ca5e4c0 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -288,7 +288,7 @@ static int elmc_open(struct net_device *dev)
288 288
289 elmc_id_attn586(); /* disable interrupts */ 289 elmc_id_attn586(); /* disable interrupts */
290 290
291 ret = request_irq(dev->irq, &elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, 291 ret = request_irq(dev->irq, elmc_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM,
292 dev->name, dev); 292 dev->name, dev);
293 if (ret) { 293 if (ret) {
294 pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq); 294 pr_err("%s: couldn't get irq %d\n", dev->name, dev->irq);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 6021e6dded8f..d91c3464fe72 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -443,7 +443,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
443 * Grab the IRQ 443 * Grab the IRQ
444 */ 444 */
445 445
446 err = request_irq(dev->irq, &mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev); 446 err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED | IRQF_SAMPLE_RANDOM, DRV_NAME, dev);
447 if (err) { 447 if (err) {
448 release_region(dev->base_addr, MC32_IO_EXTENT); 448 release_region(dev->base_addr, MC32_IO_EXTENT);
449 pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq); 449 pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 975e25b19ebe..32031eaf4910 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2560,7 +2560,7 @@ boomerang_rx(struct net_device *dev)
2560 struct sk_buff *skb; 2560 struct sk_buff *skb;
2561 entry = vp->dirty_rx % RX_RING_SIZE; 2561 entry = vp->dirty_rx % RX_RING_SIZE;
2562 if (vp->rx_skbuff[entry] == NULL) { 2562 if (vp->rx_skbuff[entry] == NULL) {
2563 skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 2563 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2564 if (skb == NULL) { 2564 if (skb == NULL) {
2565 static unsigned long last_jif; 2565 static unsigned long last_jif;
2566 if (time_after(jiffies, last_jif + 10 * HZ)) { 2566 if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2572,7 +2572,6 @@ boomerang_rx(struct net_device *dev)
2572 break; /* Bad news! */ 2572 break; /* Bad news! */
2573 } 2573 }
2574 2574
2575 skb_reserve(skb, NET_IP_ALIGN);
2576 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2575 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2577 vp->rx_skbuff[entry] = skb; 2576 vp->rx_skbuff[entry] = skb;
2578 } 2577 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 83a1922e68e0..ab451bb8995a 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -549,14 +549,12 @@ rx_status_loop:
549 pr_debug("%s: rx slot %d status 0x%x len %d\n", 549 pr_debug("%s: rx slot %d status 0x%x len %d\n",
550 dev->name, rx_tail, status, len); 550 dev->name, rx_tail, status, len);
551 551
552 new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); 552 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
553 if (!new_skb) { 553 if (!new_skb) {
554 dev->stats.rx_dropped++; 554 dev->stats.rx_dropped++;
555 goto rx_next; 555 goto rx_next;
556 } 556 }
557 557
558 skb_reserve(new_skb, NET_IP_ALIGN);
559
560 dma_unmap_single(&cp->pdev->dev, mapping, 558 dma_unmap_single(&cp->pdev->dev, mapping,
561 buflen, PCI_DMA_FROMDEVICE); 559 buflen, PCI_DMA_FROMDEVICE);
562 560
@@ -1057,12 +1055,10 @@ static int cp_refill_rx(struct cp_private *cp)
1057 struct sk_buff *skb; 1055 struct sk_buff *skb;
1058 dma_addr_t mapping; 1056 dma_addr_t mapping;
1059 1057
1060 skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN); 1058 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1061 if (!skb) 1059 if (!skb)
1062 goto err_out; 1060 goto err_out;
1063 1061
1064 skb_reserve(skb, NET_IP_ALIGN);
1065
1066 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1062 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1067 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1063 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1068 cp->rx_skb[i] = skb; 1064 cp->rx_skb[i] = skb;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4a3628755026..7e333f73b228 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2004,9 +2004,8 @@ no_early_rx:
2004 /* Malloc up new buffer, compatible with net-2e. */ 2004 /* Malloc up new buffer, compatible with net-2e. */
2005 /* Omit the four octet CRC from the length. */ 2005 /* Omit the four octet CRC from the length. */
2006 2006
2007 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); 2007 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
2008 if (likely(skb)) { 2008 if (likely(skb)) {
2009 skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */
2010#if RX_BUF_IDX == 3 2009#if RX_BUF_IDX == 3
2011 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2010 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2012#else 2011#else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b2f71f79baaf..0bbd5ae49862 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1001,7 +1001,7 @@ config SMC911X
1001 1001
1002config SMSC911X 1002config SMSC911X
1003 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 1003 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
1004 depends on ARM || SUPERH || BLACKFIN 1004 depends on ARM || SUPERH || BLACKFIN || MIPS
1005 select CRC32 1005 select CRC32
1006 select MII 1006 select MII
1007 select PHYLIB 1007 select PHYLIB
@@ -3235,7 +3235,7 @@ config VIRTIO_NET
3235 3235
3236config VMXNET3 3236config VMXNET3
3237 tristate "VMware VMXNET3 ethernet driver" 3237 tristate "VMware VMXNET3 ethernet driver"
3238 depends on PCI && X86 && INET 3238 depends on PCI && INET
3239 help 3239 help
3240 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3240 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3241 To compile this driver as a module, choose M here: the 3241 To compile this driver as a module, choose M here: the
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index b5dc7f550725..50cecf417471 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -328,7 +328,7 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
328 328
329 /* Reserve any actual interrupt. */ 329 /* Reserve any actual interrupt. */
330 if (dev->irq) { 330 if (dev->irq) {
331 retval = request_irq(dev->irq, &cops_interrupt, 0, dev->name, dev); 331 retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
332 if (retval) 332 if (retval)
333 goto err_out; 333 goto err_out;
334 } 334 }
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 08760baece7a..dbfbd3b7ff86 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1158,7 +1158,7 @@ struct net_device * __init ltpc_probe(void)
1158 } 1158 }
1159 1159
1160 /* grab it and don't let go :-) */ 1160 /* grab it and don't let go :-) */
1161 if (irq && request_irq( irq, &ltpc_interrupt, 0, "ltpc", dev) >= 0) 1161 if (irq && request_irq( irq, ltpc_interrupt, 0, "ltpc", dev) >= 0)
1162 { 1162 {
1163 (void) inb_p(io+7); /* enable interrupts from board */ 1163 (void) inb_p(io+7); /* enable interrupts from board */
1164 (void) inb_p(io+7); /* and reset irq line */ 1164 (void) inb_p(io+7); /* and reset irq line */
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index e3082a9350fc..c5b988140a91 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -156,7 +156,7 @@ static int __init arcrimi_found(struct net_device *dev)
156 } 156 }
157 157
158 /* reserve the irq */ 158 /* reserve the irq */
159 if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { 159 if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
160 iounmap(p); 160 iounmap(p);
161 release_mem_region(dev->mem_start, MIRROR_SIZE); 161 release_mem_region(dev->mem_start, MIRROR_SIZE);
162 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); 162 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 651275a5f3d2..0a74f21409c5 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -200,7 +200,7 @@ int com20020_found(struct net_device *dev, int shared)
200 outb(dev->dev_addr[0], _XREG); 200 outb(dev->dev_addr[0], _XREG);
201 201
202 /* reserve the irq */ 202 /* reserve the irq */
203 if (request_irq(dev->irq, &arcnet_interrupt, shared, 203 if (request_irq(dev->irq, arcnet_interrupt, shared,
204 "arcnet (COM20020)", dev)) { 204 "arcnet (COM20020)", dev)) {
205 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); 205 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
206 return -ENODEV; 206 return -ENODEV;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 89de29b3b1dc..28dea518d554 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -238,7 +238,7 @@ static int __init com90io_found(struct net_device *dev)
238 int err; 238 int err;
239 239
240 /* Reserve the irq */ 240 /* Reserve the irq */
241 if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) { 241 if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
242 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); 242 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
243 return -ENODEV; 243 return -ENODEV;
244 } 244 }
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index d762fe46251e..112e230cb13d 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -501,7 +501,7 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem, void __iomem
501 goto err_free_dev; 501 goto err_free_dev;
502 502
503 /* reserve the irq */ 503 /* reserve the irq */
504 if (request_irq(airq, &arcnet_interrupt, 0, "arcnet (90xx)", dev)) { 504 if (request_irq(airq, arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
505 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq); 505 BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
506 goto err_release_mem; 506 goto err_release_mem;
507 } 507 }
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 2a7b7745cc55..be256b34cea8 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -35,11 +35,13 @@
35 35
36#include <mach/regs-switch.h> 36#include <mach/regs-switch.h>
37#include <mach/regs-misc.h> 37#include <mach/regs-misc.h>
38#include <asm/mach/irq.h>
39#include <mach/regs-irq.h>
38 40
39#include "ks8695net.h" 41#include "ks8695net.h"
40 42
41#define MODULENAME "ks8695_ether" 43#define MODULENAME "ks8695_ether"
42#define MODULEVERSION "1.01" 44#define MODULEVERSION "1.02"
43 45
44/* 46/*
45 * Transmit and device reset timeout, default 5 seconds. 47 * Transmit and device reset timeout, default 5 seconds.
@@ -95,6 +97,9 @@ struct ks8695_skbuff {
95#define MAX_RX_DESC 16 97#define MAX_RX_DESC 16
96#define MAX_RX_DESC_MASK 0xf 98#define MAX_RX_DESC_MASK 0xf
97 99
100/*napi_weight have better more than rx DMA buffers*/
101#define NAPI_WEIGHT 64
102
98#define MAX_RXBUF_SIZE 0x700 103#define MAX_RXBUF_SIZE 0x700
99 104
100#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC) 105#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
@@ -120,6 +125,7 @@ enum ks8695_dtype {
120 * @dev: The platform device object for this interface 125 * @dev: The platform device object for this interface
121 * @dtype: The type of this device 126 * @dtype: The type of this device
122 * @io_regs: The ioremapped registers for this interface 127 * @io_regs: The ioremapped registers for this interface
128 * @napi : Add support NAPI for Rx
123 * @rx_irq_name: The textual name of the RX IRQ from the platform data 129 * @rx_irq_name: The textual name of the RX IRQ from the platform data
124 * @tx_irq_name: The textual name of the TX IRQ from the platform data 130 * @tx_irq_name: The textual name of the TX IRQ from the platform data
125 * @link_irq_name: The textual name of the link IRQ from the 131 * @link_irq_name: The textual name of the link IRQ from the
@@ -143,6 +149,7 @@ enum ks8695_dtype {
143 * @rx_ring_dma: The DMA mapped equivalent of rx_ring 149 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
144 * @rx_buffers: The sk_buff mappings for the RX ring 150 * @rx_buffers: The sk_buff mappings for the RX ring
145 * @next_rx_desc_read: The next RX descriptor to read from on IRQ 151 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
152 * @rx_lock: A lock to protect Rx irq function
146 * @msg_enable: The flags for which messages to emit 153 * @msg_enable: The flags for which messages to emit
147 */ 154 */
148struct ks8695_priv { 155struct ks8695_priv {
@@ -152,6 +159,8 @@ struct ks8695_priv {
152 enum ks8695_dtype dtype; 159 enum ks8695_dtype dtype;
153 void __iomem *io_regs; 160 void __iomem *io_regs;
154 161
162 struct napi_struct napi;
163
155 const char *rx_irq_name, *tx_irq_name, *link_irq_name; 164 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
156 int rx_irq, tx_irq, link_irq; 165 int rx_irq, tx_irq, link_irq;
157 166
@@ -172,6 +181,7 @@ struct ks8695_priv {
172 dma_addr_t rx_ring_dma; 181 dma_addr_t rx_ring_dma;
173 struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; 182 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
174 int next_rx_desc_read; 183 int next_rx_desc_read;
184 spinlock_t rx_lock;
175 185
176 int msg_enable; 186 int msg_enable;
177}; 187};
@@ -392,29 +402,74 @@ ks8695_tx_irq(int irq, void *dev_id)
392} 402}
393 403
394/** 404/**
405 * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
406 * @ksp: Private data for the KS8695 Ethernet
407 *
408 * For KS8695 document:
409 * Interrupt Enable Register (offset 0xE204)
410 * Bit29 : WAN MAC Receive Interrupt Enable
411 * Bit16 : LAN MAC Receive Interrupt Enable
412 * Interrupt Status Register (Offset 0xF208)
413 * Bit29: WAN MAC Receive Status
414 * Bit16: LAN MAC Receive Status
415 * So, this Rx interrrupt enable/status bit number is equal
416 * as Rx IRQ number.
417 */
418static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
419{
420 return ksp->rx_irq;
421}
422
423/**
395 * ks8695_rx_irq - Receive IRQ handler 424 * ks8695_rx_irq - Receive IRQ handler
396 * @irq: The IRQ which went off (ignored) 425 * @irq: The IRQ which went off (ignored)
397 * @dev_id: The net_device for the interrupt 426 * @dev_id: The net_device for the interrupt
398 * 427 *
399 * Process the RX ring, passing any received packets up to the 428 * Inform NAPI that packet reception needs to be scheduled
400 * host. If we received anything other than errors, we then
401 * refill the ring.
402 */ 429 */
430
403static irqreturn_t 431static irqreturn_t
404ks8695_rx_irq(int irq, void *dev_id) 432ks8695_rx_irq(int irq, void *dev_id)
405{ 433{
406 struct net_device *ndev = (struct net_device *)dev_id; 434 struct net_device *ndev = (struct net_device *)dev_id;
407 struct ks8695_priv *ksp = netdev_priv(ndev); 435 struct ks8695_priv *ksp = netdev_priv(ndev);
436
437 spin_lock(&ksp->rx_lock);
438
439 if (napi_schedule_prep(&ksp->napi)) {
440 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
441 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
442 /*disable rx interrupt*/
443 status &= ~mask_bit;
444 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
445 __napi_schedule(&ksp->napi);
446 }
447
448 spin_unlock(&ksp->rx_lock);
449 return IRQ_HANDLED;
450}
451
452/**
453 * ks8695_rx - Receive packets called by NAPI poll method
454 * @ksp: Private data for the KS8695 Ethernet
455 * @budget: The max packets would be receive
456 */
457
458static int ks8695_rx(struct ks8695_priv *ksp, int budget)
459{
460 struct net_device *ndev = ksp->ndev;
408 struct sk_buff *skb; 461 struct sk_buff *skb;
409 int buff_n; 462 int buff_n;
410 u32 flags; 463 u32 flags;
411 int pktlen; 464 int pktlen;
412 int last_rx_processed = -1; 465 int last_rx_processed = -1;
466 int received = 0;
413 467
414 buff_n = ksp->next_rx_desc_read; 468 buff_n = ksp->next_rx_desc_read;
415 do { 469 while (received < budget
416 if (ksp->rx_buffers[buff_n].skb && 470 && ksp->rx_buffers[buff_n].skb
417 !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) { 471 && (!(ksp->rx_ring[buff_n].status &
472 cpu_to_le32(RDES_OWN)))) {
418 rmb(); 473 rmb();
419 flags = le32_to_cpu(ksp->rx_ring[buff_n].status); 474 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
420 /* Found an SKB which we own, this means we 475 /* Found an SKB which we own, this means we
@@ -464,7 +519,7 @@ ks8695_rx_irq(int irq, void *dev_id)
464 /* Relinquish the SKB to the network layer */ 519 /* Relinquish the SKB to the network layer */
465 skb_put(skb, pktlen); 520 skb_put(skb, pktlen);
466 skb->protocol = eth_type_trans(skb, ndev); 521 skb->protocol = eth_type_trans(skb, ndev);
467 netif_rx(skb); 522 netif_receive_skb(skb);
468 523
469 /* Record stats */ 524 /* Record stats */
470 ndev->stats.rx_packets++; 525 ndev->stats.rx_packets++;
@@ -478,29 +533,55 @@ rx_failure:
478 /* Give the ring entry back to the hardware */ 533 /* Give the ring entry back to the hardware */
479 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); 534 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
480rx_finished: 535rx_finished:
536 received++;
481 /* And note this as processed so we can start 537 /* And note this as processed so we can start
482 * from here next time 538 * from here next time
483 */ 539 */
484 last_rx_processed = buff_n; 540 last_rx_processed = buff_n;
485 } else { 541 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
486 /* Ran out of things to process, stop now */ 542 /*And note which RX descriptor we last did */
487 break; 543 if (likely(last_rx_processed != -1))
488 } 544 ksp->next_rx_desc_read =
489 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; 545 (last_rx_processed + 1) &
490 } while (buff_n != ksp->next_rx_desc_read); 546 MAX_RX_DESC_MASK;
491 547 }
492 /* And note which RX descriptor we last did anything with */
493 if (likely(last_rx_processed != -1))
494 ksp->next_rx_desc_read =
495 (last_rx_processed + 1) & MAX_RX_DESC_MASK;
496
497 /* And refill the buffers */ 548 /* And refill the buffers */
498 ks8695_refill_rxbuffers(ksp); 549 ks8695_refill_rxbuffers(ksp);
499 550
500 /* Kick the RX DMA engine, in case it became suspended */ 551 /* Kick the RX DMA engine, in case it became
552 * suspended */
501 ks8695_writereg(ksp, KS8695_DRSC, 0); 553 ks8695_writereg(ksp, KS8695_DRSC, 0);
554 return received;
555}
502 556
503 return IRQ_HANDLED; 557
558/**
559 * ks8695_poll - Receive packet by NAPI poll method
560 * @ksp: Private data for the KS8695 Ethernet
561 * @budget: The remaining number packets for network subsystem
562 *
563 * Invoked by the network core when it requests for new
564 * packets from the driver
565 */
566static int ks8695_poll(struct napi_struct *napi, int budget)
567{
568 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
569 unsigned long work_done;
570
571 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
572 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
573
574 work_done = ks8695_rx(ksp, budget);
575
576 if (work_done < budget) {
577 unsigned long flags;
578 spin_lock_irqsave(&ksp->rx_lock, flags);
579 /*enable rx interrupt*/
580 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
581 __napi_complete(napi);
582 spin_unlock_irqrestore(&ksp->rx_lock, flags);
583 }
584 return work_done;
504} 585}
505 586
506/** 587/**
@@ -1253,6 +1334,7 @@ ks8695_stop(struct net_device *ndev)
1253 struct ks8695_priv *ksp = netdev_priv(ndev); 1334 struct ks8695_priv *ksp = netdev_priv(ndev);
1254 1335
1255 netif_stop_queue(ndev); 1336 netif_stop_queue(ndev);
1337 napi_disable(&ksp->napi);
1256 netif_carrier_off(ndev); 1338 netif_carrier_off(ndev);
1257 1339
1258 ks8695_shutdown(ksp); 1340 ks8695_shutdown(ksp);
@@ -1287,6 +1369,7 @@ ks8695_open(struct net_device *ndev)
1287 return ret; 1369 return ret;
1288 } 1370 }
1289 1371
1372 napi_enable(&ksp->napi);
1290 netif_start_queue(ndev); 1373 netif_start_queue(ndev);
1291 1374
1292 return 0; 1375 return 0;
@@ -1472,6 +1555,8 @@ ks8695_probe(struct platform_device *pdev)
1472 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1555 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1473 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1556 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1474 1557
1558 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1559
1475 /* Retrieve the default MAC addr from the chip. */ 1560 /* Retrieve the default MAC addr from the chip. */
1476 /* The bootloader should have left it in there for us. */ 1561 /* The bootloader should have left it in there for us. */
1477 1562
@@ -1505,6 +1590,7 @@ ks8695_probe(struct platform_device *pdev)
1505 1590
1506 /* And initialise the queue's lock */ 1591 /* And initialise the queue's lock */
1507 spin_lock_init(&ksp->txq_lock); 1592 spin_lock_init(&ksp->txq_lock);
1593 spin_lock_init(&ksp->rx_lock);
1508 1594
1509 /* Specify the RX DMA ring buffer */ 1595 /* Specify the RX DMA ring buffer */
1510 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; 1596 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
@@ -1626,6 +1712,7 @@ ks8695_drv_remove(struct platform_device *pdev)
1626 struct ks8695_priv *ksp = netdev_priv(ndev); 1712 struct ks8695_priv *ksp = netdev_priv(ndev);
1627 1713
1628 platform_set_drvdata(pdev, NULL); 1714 platform_set_drvdata(pdev, NULL);
1715 netif_napi_del(&ksp->napi);
1629 1716
1630 unregister_netdev(ndev); 1717 unregister_netdev(ndev);
1631 ks8695_release_device(ksp); 1718 ks8695_release_device(ksp);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 25e2627eb118..b7f3866d546f 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -160,8 +160,8 @@ struct w90p910_ether {
160 struct mii_if_info mii; 160 struct mii_if_info mii;
161 struct timer_list check_timer; 161 struct timer_list check_timer;
162 void __iomem *reg; 162 void __iomem *reg;
163 unsigned int rxirq; 163 int rxirq;
164 unsigned int txirq; 164 int txirq;
165 unsigned int cur_tx; 165 unsigned int cur_tx;
166 unsigned int cur_rx; 166 unsigned int cur_rx;
167 unsigned int finish_tx; 167 unsigned int finish_tx;
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 544d5af6950e..d4ab69f032be 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -468,7 +468,7 @@ found:
468 lp->jumpered = is_fmv18x; 468 lp->jumpered = is_fmv18x;
469 lp->mca_slot = slot; 469 lp->mca_slot = slot;
470 /* Snarf the interrupt vector now. */ 470 /* Snarf the interrupt vector now. */
471 ret = request_irq(irq, &net_interrupt, 0, DRV_NAME, dev); 471 ret = request_irq(irq, net_interrupt, 0, DRV_NAME, dev);
472 if (ret) { 472 if (ret) {
473 printk(KERN_ERR "AT1700 at %#3x is unusable due to a " 473 printk(KERN_ERR "AT1700 at %#3x is unusable due to a "
474 "conflict on IRQ %d.\n", 474 "conflict on IRQ %d.\n",
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 2a1120ad2e74..a348a22551d9 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -470,12 +470,28 @@ struct atl1c_ring_header {
470struct atl1c_buffer { 470struct atl1c_buffer {
471 struct sk_buff *skb; /* socket buffer */ 471 struct sk_buff *skb; /* socket buffer */
472 u16 length; /* rx buffer length */ 472 u16 length; /* rx buffer length */
473 u16 state; /* state of buffer */ 473 u16 flags; /* information of buffer */
474#define ATL1_BUFFER_FREE 0 474#define ATL1C_BUFFER_FREE 0x0001
475#define ATL1_BUFFER_BUSY 1 475#define ATL1C_BUFFER_BUSY 0x0002
476#define ATL1C_BUFFER_STATE_MASK 0x0003
477
478#define ATL1C_PCIMAP_SINGLE 0x0004
479#define ATL1C_PCIMAP_PAGE 0x0008
480#define ATL1C_PCIMAP_TYPE_MASK 0x000C
481
476 dma_addr_t dma; 482 dma_addr_t dma;
477}; 483};
478 484
485#define ATL1C_SET_BUFFER_STATE(buff, state) do { \
486 ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \
487 ((buff)->flags) |= (state); \
488 } while (0)
489
490#define ATL1C_SET_PCIMAP_TYPE(buff, type) do { \
491 ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \
492 ((buff)->flags) |= (type); \
493 } while (0)
494
479/* transimit packet descriptor (tpd) ring */ 495/* transimit packet descriptor (tpd) ring */
480struct atl1c_tpd_ring { 496struct atl1c_tpd_ring {
481 void *desc; /* descriptor ring virtual address */ 497 void *desc; /* descriptor ring virtual address */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1372e9a99f5b..1e2f57d4c367 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -710,6 +710,29 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
710 return 0; 710 return 0;
711} 711}
712 712
713static inline void atl1c_clean_buffer(struct pci_dev *pdev,
714 struct atl1c_buffer *buffer_info, int in_irq)
715{
716 if (buffer_info->flags & ATL1C_BUFFER_FREE)
717 return;
718 if (buffer_info->dma) {
719 if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
720 pci_unmap_single(pdev, buffer_info->dma,
721 buffer_info->length, PCI_DMA_TODEVICE);
722 else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
723 pci_unmap_page(pdev, buffer_info->dma,
724 buffer_info->length, PCI_DMA_TODEVICE);
725 }
726 if (buffer_info->skb) {
727 if (in_irq)
728 dev_kfree_skb_irq(buffer_info->skb);
729 else
730 dev_kfree_skb(buffer_info->skb);
731 }
732 buffer_info->dma = 0;
733 buffer_info->skb = NULL;
734 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
735}
713/* 736/*
714 * atl1c_clean_tx_ring - Free Tx-skb 737 * atl1c_clean_tx_ring - Free Tx-skb
715 * @adapter: board private structure 738 * @adapter: board private structure
@@ -725,22 +748,12 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
725 ring_count = tpd_ring->count; 748 ring_count = tpd_ring->count;
726 for (index = 0; index < ring_count; index++) { 749 for (index = 0; index < ring_count; index++) {
727 buffer_info = &tpd_ring->buffer_info[index]; 750 buffer_info = &tpd_ring->buffer_info[index];
728 if (buffer_info->state == ATL1_BUFFER_FREE) 751 atl1c_clean_buffer(pdev, buffer_info, 0);
729 continue;
730 if (buffer_info->dma)
731 pci_unmap_single(pdev, buffer_info->dma,
732 buffer_info->length,
733 PCI_DMA_TODEVICE);
734 if (buffer_info->skb)
735 dev_kfree_skb(buffer_info->skb);
736 buffer_info->dma = 0;
737 buffer_info->skb = NULL;
738 buffer_info->state = ATL1_BUFFER_FREE;
739 } 752 }
740 753
741 /* Zero out Tx-buffers */ 754 /* Zero out Tx-buffers */
742 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * 755 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
743 ring_count); 756 ring_count);
744 atomic_set(&tpd_ring->next_to_clean, 0); 757 atomic_set(&tpd_ring->next_to_clean, 0);
745 tpd_ring->next_to_use = 0; 758 tpd_ring->next_to_use = 0;
746} 759}
@@ -760,16 +773,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
760 for (i = 0; i < adapter->num_rx_queues; i++) { 773 for (i = 0; i < adapter->num_rx_queues; i++) {
761 for (j = 0; j < rfd_ring[i].count; j++) { 774 for (j = 0; j < rfd_ring[i].count; j++) {
762 buffer_info = &rfd_ring[i].buffer_info[j]; 775 buffer_info = &rfd_ring[i].buffer_info[j];
763 if (buffer_info->state == ATL1_BUFFER_FREE) 776 atl1c_clean_buffer(pdev, buffer_info, 0);
764 continue;
765 if (buffer_info->dma)
766 pci_unmap_single(pdev, buffer_info->dma,
767 buffer_info->length,
768 PCI_DMA_FROMDEVICE);
769 if (buffer_info->skb)
770 dev_kfree_skb(buffer_info->skb);
771 buffer_info->state = ATL1_BUFFER_FREE;
772 buffer_info->skb = NULL;
773 } 777 }
774 /* zero out the descriptor ring */ 778 /* zero out the descriptor ring */
775 memset(rfd_ring[i].desc, 0, rfd_ring[i].size); 779 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
@@ -796,7 +800,8 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
796 atomic_set(&tpd_ring[i].next_to_clean, 0); 800 atomic_set(&tpd_ring[i].next_to_clean, 0);
797 buffer_info = tpd_ring[i].buffer_info; 801 buffer_info = tpd_ring[i].buffer_info;
798 for (j = 0; j < tpd_ring->count; j++) 802 for (j = 0; j < tpd_ring->count; j++)
799 buffer_info[i].state = ATL1_BUFFER_FREE; 803 ATL1C_SET_BUFFER_STATE(&buffer_info[i],
804 ATL1C_BUFFER_FREE);
800 } 805 }
801 for (i = 0; i < adapter->num_rx_queues; i++) { 806 for (i = 0; i < adapter->num_rx_queues; i++) {
802 rfd_ring[i].next_to_use = 0; 807 rfd_ring[i].next_to_use = 0;
@@ -805,7 +810,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
805 rrd_ring[i].next_to_clean = 0; 810 rrd_ring[i].next_to_clean = 0;
806 for (j = 0; j < rfd_ring[i].count; j++) { 811 for (j = 0; j < rfd_ring[i].count; j++) {
807 buffer_info = &rfd_ring[i].buffer_info[j]; 812 buffer_info = &rfd_ring[i].buffer_info[j];
808 buffer_info->state = ATL1_BUFFER_FREE; 813 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
809 } 814 }
810 } 815 }
811} 816}
@@ -1447,6 +1452,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1447 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1452 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1448 &adapter->tpd_ring[type]; 1453 &adapter->tpd_ring[type];
1449 struct atl1c_buffer *buffer_info; 1454 struct atl1c_buffer *buffer_info;
1455 struct pci_dev *pdev = adapter->pdev;
1450 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1456 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1451 u16 hw_next_to_clean; 1457 u16 hw_next_to_clean;
1452 u16 shift; 1458 u16 shift;
@@ -1462,16 +1468,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1462 1468
1463 while (next_to_clean != hw_next_to_clean) { 1469 while (next_to_clean != hw_next_to_clean) {
1464 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1470 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1465 if (buffer_info->state == ATL1_BUFFER_BUSY) { 1471 atl1c_clean_buffer(pdev, buffer_info, 1);
1466 pci_unmap_page(adapter->pdev, buffer_info->dma,
1467 buffer_info->length, PCI_DMA_TODEVICE);
1468 buffer_info->dma = 0;
1469 if (buffer_info->skb) {
1470 dev_kfree_skb_irq(buffer_info->skb);
1471 buffer_info->skb = NULL;
1472 }
1473 buffer_info->state = ATL1_BUFFER_FREE;
1474 }
1475 if (++next_to_clean == tpd_ring->count) 1472 if (++next_to_clean == tpd_ring->count)
1476 next_to_clean = 0; 1473 next_to_clean = 0;
1477 atomic_set(&tpd_ring->next_to_clean, next_to_clean); 1474 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1587,7 +1584,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1587 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; 1584 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1588 next_info = &rfd_ring->buffer_info[next_next]; 1585 next_info = &rfd_ring->buffer_info[next_next];
1589 1586
1590 while (next_info->state == ATL1_BUFFER_FREE) { 1587 while (next_info->flags & ATL1C_BUFFER_FREE) {
1591 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); 1588 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1592 1589
1593 skb = dev_alloc_skb(adapter->rx_buffer_len); 1590 skb = dev_alloc_skb(adapter->rx_buffer_len);
@@ -1603,12 +1600,13 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1603 * the 14 byte MAC header is removed 1600 * the 14 byte MAC header is removed
1604 */ 1601 */
1605 vir_addr = skb->data; 1602 vir_addr = skb->data;
1606 buffer_info->state = ATL1_BUFFER_BUSY; 1603 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1607 buffer_info->skb = skb; 1604 buffer_info->skb = skb;
1608 buffer_info->length = adapter->rx_buffer_len; 1605 buffer_info->length = adapter->rx_buffer_len;
1609 buffer_info->dma = pci_map_single(pdev, vir_addr, 1606 buffer_info->dma = pci_map_single(pdev, vir_addr,
1610 buffer_info->length, 1607 buffer_info->length,
1611 PCI_DMA_FROMDEVICE); 1608 PCI_DMA_FROMDEVICE);
1609 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
1612 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 1610 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1613 rfd_next_to_use = next_next; 1611 rfd_next_to_use = next_next;
1614 if (++next_next == rfd_ring->count) 1612 if (++next_next == rfd_ring->count)
@@ -1653,7 +1651,8 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1653 RRS_RX_RFD_INDEX_MASK; 1651 RRS_RX_RFD_INDEX_MASK;
1654 for (i = 0; i < num; i++) { 1652 for (i = 0; i < num; i++) {
1655 buffer_info[rfd_index].skb = NULL; 1653 buffer_info[rfd_index].skb = NULL;
1656 buffer_info[rfd_index].state = ATL1_BUFFER_FREE; 1654 ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
1655 ATL1C_BUFFER_FREE);
1657 if (++rfd_index == rfd_ring->count) 1656 if (++rfd_index == rfd_ring->count)
1658 rfd_index = 0; 1657 rfd_index = 0;
1659 } 1658 }
@@ -1967,7 +1966,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
1967 buffer_info->length = map_len; 1966 buffer_info->length = map_len;
1968 buffer_info->dma = pci_map_single(adapter->pdev, 1967 buffer_info->dma = pci_map_single(adapter->pdev,
1969 skb->data, hdr_len, PCI_DMA_TODEVICE); 1968 skb->data, hdr_len, PCI_DMA_TODEVICE);
1970 buffer_info->state = ATL1_BUFFER_BUSY; 1969 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1970 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
1971 mapped_len += map_len; 1971 mapped_len += map_len;
1972 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 1972 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1973 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); 1973 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
@@ -1981,16 +1981,14 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
1981 else { 1981 else {
1982 use_tpd = atl1c_get_tpd(adapter, type); 1982 use_tpd = atl1c_get_tpd(adapter, type);
1983 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); 1983 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
1984 use_tpd = atl1c_get_tpd(adapter, type);
1985 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
1986 } 1984 }
1987 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); 1985 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
1988 buffer_info->length = buf_len - mapped_len; 1986 buffer_info->length = buf_len - mapped_len;
1989 buffer_info->dma = 1987 buffer_info->dma =
1990 pci_map_single(adapter->pdev, skb->data + mapped_len, 1988 pci_map_single(adapter->pdev, skb->data + mapped_len,
1991 buffer_info->length, PCI_DMA_TODEVICE); 1989 buffer_info->length, PCI_DMA_TODEVICE);
1992 buffer_info->state = ATL1_BUFFER_BUSY; 1990 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1993 1991 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
1994 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 1992 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1995 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); 1993 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
1996 } 1994 }
@@ -2010,8 +2008,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
2010 frag->page_offset, 2008 frag->page_offset,
2011 buffer_info->length, 2009 buffer_info->length,
2012 PCI_DMA_TODEVICE); 2010 PCI_DMA_TODEVICE);
2013 buffer_info->state = ATL1_BUFFER_BUSY; 2011 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2014 2012 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE);
2015 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2013 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2016 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); 2014 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2017 } 2015 }
@@ -2137,7 +2135,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2137 2135
2138 if (!adapter->have_msi) 2136 if (!adapter->have_msi)
2139 flags |= IRQF_SHARED; 2137 flags |= IRQF_SHARED;
2140 err = request_irq(adapter->pdev->irq, &atl1c_intr, flags, 2138 err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
2141 netdev->name, netdev); 2139 netdev->name, netdev);
2142 if (err) { 2140 if (err) {
2143 if (netif_msg_ifup(adapter)) 2141 if (netif_msg_ifup(adapter))
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 955da733c2ad..ad17e74e5662 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1433,14 +1433,12 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1433 1433
1434 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & 1434 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */
1436 skb = netdev_alloc_skb(netdev, 1436 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1437 packet_size + NET_IP_ALIGN);
1438 if (skb == NULL) { 1437 if (skb == NULL) {
1439 dev_warn(&pdev->dev, "%s: Memory squeeze," 1438 dev_warn(&pdev->dev, "%s: Memory squeeze,"
1440 "deferring packet.\n", netdev->name); 1439 "deferring packet.\n", netdev->name);
1441 goto skip_pkt; 1440 goto skip_pkt;
1442 } 1441 }
1443 skb_reserve(skb, NET_IP_ALIGN);
1444 skb->dev = netdev; 1442 skb->dev = netdev;
1445 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1443 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1446 skb_put(skb, packet_size); 1444 skb_put(skb, packet_size);
@@ -1932,7 +1930,7 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
1932 1930
1933 if (!adapter->have_msi) 1931 if (!adapter->have_msi)
1934 flags |= IRQF_SHARED; 1932 flags |= IRQF_SHARED;
1935 err = request_irq(adapter->pdev->irq, &atl1e_intr, flags, 1933 err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
1936 netdev->name, netdev); 1934 netdev->name, netdev);
1937 if (err) { 1935 if (err) {
1938 dev_dbg(&pdev->dev, 1936 dev_dbg(&pdev->dev,
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 00569dc1313c..e547f788a266 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1864,21 +1864,14 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1864 1864
1865 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1865 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1866 1866
1867 skb = netdev_alloc_skb(adapter->netdev, 1867 skb = netdev_alloc_skb_ip_align(adapter->netdev,
1868 adapter->rx_buffer_len + NET_IP_ALIGN); 1868 adapter->rx_buffer_len);
1869 if (unlikely(!skb)) { 1869 if (unlikely(!skb)) {
1870 /* Better luck next round */ 1870 /* Better luck next round */
1871 adapter->netdev->stats.rx_dropped++; 1871 adapter->netdev->stats.rx_dropped++;
1872 break; 1872 break;
1873 } 1873 }
1874 1874
1875 /*
1876 * Make buffer alignment 2 beyond a 16 byte boundary
1877 * this will result in a 16 byte aligned IP header after
1878 * the 14 byte MAC header is removed
1879 */
1880 skb_reserve(skb, NET_IP_ALIGN);
1881
1882 buffer_info->alloced = 1; 1875 buffer_info->alloced = 1;
1883 buffer_info->skb = skb; 1876 buffer_info->skb = skb;
1884 buffer_info->length = (u16) adapter->rx_buffer_len; 1877 buffer_info->length = (u16) adapter->rx_buffer_len;
@@ -2596,7 +2589,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2596 irq_flags |= IRQF_SHARED; 2589 irq_flags |= IRQF_SHARED;
2597 } 2590 }
2598 2591
2599 err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags, 2592 err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags,
2600 netdev->name, netdev); 2593 netdev->name, netdev);
2601 if (unlikely(err)) 2594 if (unlikely(err))
2602 goto err_up; 2595 goto err_up;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ab688862093f..c0451d75cdcf 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -409,7 +409,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
409 if (rxd->status.ok && rxd->status.pkt_size >= 60) { 409 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
410 int rx_size = (int)(rxd->status.pkt_size - 4); 410 int rx_size = (int)(rxd->status.pkt_size - 4);
411 /* alloc new buffer */ 411 /* alloc new buffer */
412 skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN); 412 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
413 if (NULL == skb) { 413 if (NULL == skb) {
414 printk(KERN_WARNING 414 printk(KERN_WARNING
415 "%s: Mem squeeze, deferring packet.\n", 415 "%s: Mem squeeze, deferring packet.\n",
@@ -421,7 +421,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
421 netdev->stats.rx_dropped++; 421 netdev->stats.rx_dropped++;
422 break; 422 break;
423 } 423 }
424 skb_reserve(skb, NET_IP_ALIGN);
425 skb->dev = netdev; 424 skb->dev = netdev;
426 memcpy(skb->data, rxd->packet, rx_size); 425 memcpy(skb->data, rxd->packet, rx_size);
427 skb_put(skb, rx_size); 426 skb_put(skb, rx_size);
@@ -652,7 +651,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
652 if (adapter->have_msi) 651 if (adapter->have_msi)
653 flags &= ~IRQF_SHARED; 652 flags &= ~IRQF_SHARED;
654 653
655 return request_irq(adapter->pdev->irq, &atl2_intr, flags, netdev->name, 654 return request_irq(adapter->pdev->irq, atl2_intr, flags, netdev->name,
656 netdev); 655 netdev);
657} 656}
658 657
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 9043294fe617..001791775be3 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -437,7 +437,7 @@ static int net_open(struct net_device *dev)
437 /* The interrupt line is turned off (tri-stated) when the device isn't in 437 /* The interrupt line is turned off (tri-stated) when the device isn't in
438 use. That's especially important for "attached" interfaces where the 438 use. That's especially important for "attached" interfaces where the
439 port or interrupt may be shared. */ 439 port or interrupt may be shared. */
440 ret = request_irq(dev->irq, &atp_interrupt, 0, dev->name, dev); 440 ret = request_irq(dev->irq, atp_interrupt, 0, dev->name, dev);
441 if (ret) 441 if (ret)
442 return ret; 442 return ret;
443 443
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 3f4b4300f533..6bac04603a88 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -881,7 +881,7 @@ static int au1000_open(struct net_device *dev)
881 if (au1000_debug > 4) 881 if (au1000_debug > 4)
882 printk("%s: open: dev=%p\n", dev->name, dev); 882 printk("%s: open: dev=%p\n", dev->name, dev);
883 883
884 if ((retval = request_irq(dev->irq, &au1000_interrupt, 0, 884 if ((retval = request_irq(dev->irq, au1000_interrupt, 0,
885 dev->name, dev))) { 885 dev->name, dev))) {
886 printk(KERN_ERR "%s: unable to get IRQ %d\n", 886 printk(KERN_ERR "%s: unable to get IRQ %d\n",
887 dev->name, dev->irq); 887 dev->name, dev->irq);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ba29dc319b34..1f6c5486d715 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -320,16 +320,13 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
320 if (len < copybreak) { 320 if (len < copybreak) {
321 struct sk_buff *nskb; 321 struct sk_buff *nskb;
322 322
323 nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); 323 nskb = netdev_alloc_skb_ip_align(dev, len);
324 if (!nskb) { 324 if (!nskb) {
325 /* forget packet, just rearm desc */ 325 /* forget packet, just rearm desc */
326 priv->stats.rx_dropped++; 326 priv->stats.rx_dropped++;
327 continue; 327 continue;
328 } 328 }
329 329
330 /* since we're copying the data, we can align
331 * them properly */
332 skb_reserve(nskb, NET_IP_ALIGN);
333 dma_sync_single_for_cpu(kdev, desc->address, 330 dma_sync_single_for_cpu(kdev, desc->address,
334 len, DMA_FROM_DEVICE); 331 len, DMA_FROM_DEVICE);
335 memcpy(nskb->data, skb->data, len); 332 memcpy(nskb->data, skb->data, len);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 3b79a225628a..dc7c19e49111 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -35,20 +35,31 @@
35#define DRV_VER "2.101.205" 35#define DRV_VER "2.101.205"
36#define DRV_NAME "be2net" 36#define DRV_NAME "be2net"
37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
38#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
38#define OC_NAME "Emulex OneConnect 10Gbps NIC" 39#define OC_NAME "Emulex OneConnect 10Gbps NIC"
40#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39#define DRV_DESC BE_NAME "Driver" 41#define DRV_DESC BE_NAME "Driver"
40 42
41#define BE_VENDOR_ID 0x19a2 43#define BE_VENDOR_ID 0x19a2
42#define BE_DEVICE_ID1 0x211 44#define BE_DEVICE_ID1 0x211
45#define BE_DEVICE_ID2 0x221
43#define OC_DEVICE_ID1 0x700 46#define OC_DEVICE_ID1 0x700
44#define OC_DEVICE_ID2 0x701 47#define OC_DEVICE_ID2 0x701
48#define OC_DEVICE_ID3 0x710
45 49
46static inline char *nic_name(struct pci_dev *pdev) 50static inline char *nic_name(struct pci_dev *pdev)
47{ 51{
48 if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) 52 switch (pdev->device) {
53 case OC_DEVICE_ID1:
54 case OC_DEVICE_ID2:
49 return OC_NAME; 55 return OC_NAME;
50 else 56 case OC_DEVICE_ID3:
57 return OC_NAME1;
58 case BE_DEVICE_ID2:
59 return BE3_NAME;
60 default:
51 return BE_NAME; 61 return BE_NAME;
62 }
52} 63}
53 64
54/* Number of bytes of an RX frame that are copied to skb->data */ 65/* Number of bytes of an RX frame that are copied to skb->data */
@@ -181,7 +192,6 @@ struct be_drvr_stats {
181 192
182struct be_stats_obj { 193struct be_stats_obj {
183 struct be_drvr_stats drvr_stats; 194 struct be_drvr_stats drvr_stats;
184 struct net_device_stats net_stats;
185 struct be_dma_mem cmd; 195 struct be_dma_mem cmd;
186}; 196};
187 197
@@ -244,6 +254,7 @@ struct be_adapter {
244 struct vlan_group *vlan_grp; 254 struct vlan_group *vlan_grp;
245 u16 num_vlans; 255 u16 num_vlans;
246 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; 256 u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
257 struct be_dma_mem mc_cmd_mem;
247 258
248 struct be_stats_obj stats; 259 struct be_stats_obj stats;
249 /* Work queue used to perform periodic tasks like getting statistics */ 260 /* Work queue used to perform periodic tasks like getting statistics */
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 28a0eda92680..8bd531560043 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -349,7 +349,11 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
349 struct be_queue_info *mccq = &adapter->mcc_obj.q; 349 struct be_queue_info *mccq = &adapter->mcc_obj.q;
350 struct be_mcc_wrb *wrb; 350 struct be_mcc_wrb *wrb;
351 351
352 BUG_ON(atomic_read(&mccq->used) >= mccq->len); 352 if (atomic_read(&mccq->used) >= mccq->len) {
353 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
354 return NULL;
355 }
356
353 wrb = queue_head_node(mccq); 357 wrb = queue_head_node(mccq);
354 queue_head_inc(mccq); 358 queue_head_inc(mccq);
355 atomic_inc(&mccq->used); 359 atomic_inc(&mccq->used);
@@ -357,6 +361,57 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
357 return wrb; 361 return wrb;
358} 362}
359 363
364/* Tell fw we're about to start firing cmds by writing a
365 * special pattern across the wrb hdr; uses mbox
366 */
367int be_cmd_fw_init(struct be_adapter *adapter)
368{
369 u8 *wrb;
370 int status;
371
372 spin_lock(&adapter->mbox_lock);
373
374 wrb = (u8 *)wrb_from_mbox(adapter);
375 *wrb++ = 0xFF;
376 *wrb++ = 0x12;
377 *wrb++ = 0x34;
378 *wrb++ = 0xFF;
379 *wrb++ = 0xFF;
380 *wrb++ = 0x56;
381 *wrb++ = 0x78;
382 *wrb = 0xFF;
383
384 status = be_mbox_notify_wait(adapter);
385
386 spin_unlock(&adapter->mbox_lock);
387 return status;
388}
389
390/* Tell fw we're done with firing cmds by writing a
391 * special pattern across the wrb hdr; uses mbox
392 */
393int be_cmd_fw_clean(struct be_adapter *adapter)
394{
395 u8 *wrb;
396 int status;
397
398 spin_lock(&adapter->mbox_lock);
399
400 wrb = (u8 *)wrb_from_mbox(adapter);
401 *wrb++ = 0xFF;
402 *wrb++ = 0xAA;
403 *wrb++ = 0xBB;
404 *wrb++ = 0xFF;
405 *wrb++ = 0xFF;
406 *wrb++ = 0xCC;
407 *wrb++ = 0xDD;
408 *wrb = 0xFF;
409
410 status = be_mbox_notify_wait(adapter);
411
412 spin_unlock(&adapter->mbox_lock);
413 return status;
414}
360int be_cmd_eq_create(struct be_adapter *adapter, 415int be_cmd_eq_create(struct be_adapter *adapter,
361 struct be_queue_info *eq, int eq_delay) 416 struct be_queue_info *eq, int eq_delay)
362{ 417{
@@ -448,6 +503,10 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
448 spin_lock_bh(&adapter->mcc_lock); 503 spin_lock_bh(&adapter->mcc_lock);
449 504
450 wrb = wrb_from_mccq(adapter); 505 wrb = wrb_from_mccq(adapter);
506 if (!wrb) {
507 status = -EBUSY;
508 goto err;
509 }
451 req = embedded_payload(wrb); 510 req = embedded_payload(wrb);
452 511
453 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 512 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -464,6 +523,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
464 *pmac_id = le32_to_cpu(resp->pmac_id); 523 *pmac_id = le32_to_cpu(resp->pmac_id);
465 } 524 }
466 525
526err:
467 spin_unlock_bh(&adapter->mcc_lock); 527 spin_unlock_bh(&adapter->mcc_lock);
468 return status; 528 return status;
469} 529}
@@ -478,6 +538,10 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
478 spin_lock_bh(&adapter->mcc_lock); 538 spin_lock_bh(&adapter->mcc_lock);
479 539
480 wrb = wrb_from_mccq(adapter); 540 wrb = wrb_from_mccq(adapter);
541 if (!wrb) {
542 status = -EBUSY;
543 goto err;
544 }
481 req = embedded_payload(wrb); 545 req = embedded_payload(wrb);
482 546
483 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 547 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -490,8 +554,8 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
490 554
491 status = be_mcc_notify_wait(adapter); 555 status = be_mcc_notify_wait(adapter);
492 556
557err:
493 spin_unlock_bh(&adapter->mcc_lock); 558 spin_unlock_bh(&adapter->mcc_lock);
494
495 return status; 559 return status;
496} 560}
497 561
@@ -810,10 +874,15 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
810 struct be_mcc_wrb *wrb; 874 struct be_mcc_wrb *wrb;
811 struct be_cmd_req_get_stats *req; 875 struct be_cmd_req_get_stats *req;
812 struct be_sge *sge; 876 struct be_sge *sge;
877 int status = 0;
813 878
814 spin_lock_bh(&adapter->mcc_lock); 879 spin_lock_bh(&adapter->mcc_lock);
815 880
816 wrb = wrb_from_mccq(adapter); 881 wrb = wrb_from_mccq(adapter);
882 if (!wrb) {
883 status = -EBUSY;
884 goto err;
885 }
817 req = nonemb_cmd->va; 886 req = nonemb_cmd->va;
818 sge = nonembedded_sgl(wrb); 887 sge = nonembedded_sgl(wrb);
819 888
@@ -828,13 +897,14 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
828 897
829 be_mcc_notify(adapter); 898 be_mcc_notify(adapter);
830 899
900err:
831 spin_unlock_bh(&adapter->mcc_lock); 901 spin_unlock_bh(&adapter->mcc_lock);
832 return 0; 902 return status;
833} 903}
834 904
835/* Uses synchronous mcc */ 905/* Uses synchronous mcc */
836int be_cmd_link_status_query(struct be_adapter *adapter, 906int be_cmd_link_status_query(struct be_adapter *adapter,
837 bool *link_up) 907 bool *link_up, u8 *mac_speed, u16 *link_speed)
838{ 908{
839 struct be_mcc_wrb *wrb; 909 struct be_mcc_wrb *wrb;
840 struct be_cmd_req_link_status *req; 910 struct be_cmd_req_link_status *req;
@@ -843,6 +913,10 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
843 spin_lock_bh(&adapter->mcc_lock); 913 spin_lock_bh(&adapter->mcc_lock);
844 914
845 wrb = wrb_from_mccq(adapter); 915 wrb = wrb_from_mccq(adapter);
916 if (!wrb) {
917 status = -EBUSY;
918 goto err;
919 }
846 req = embedded_payload(wrb); 920 req = embedded_payload(wrb);
847 921
848 *link_up = false; 922 *link_up = false;
@@ -855,10 +929,14 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
855 status = be_mcc_notify_wait(adapter); 929 status = be_mcc_notify_wait(adapter);
856 if (!status) { 930 if (!status) {
857 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 931 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
858 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) 932 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
859 *link_up = true; 933 *link_up = true;
934 *link_speed = le16_to_cpu(resp->link_speed);
935 *mac_speed = resp->mac_speed;
936 }
860 } 937 }
861 938
939err:
862 spin_unlock_bh(&adapter->mcc_lock); 940 spin_unlock_bh(&adapter->mcc_lock);
863 return status; 941 return status;
864} 942}
@@ -897,10 +975,15 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
897{ 975{
898 struct be_mcc_wrb *wrb; 976 struct be_mcc_wrb *wrb;
899 struct be_cmd_req_modify_eq_delay *req; 977 struct be_cmd_req_modify_eq_delay *req;
978 int status = 0;
900 979
901 spin_lock_bh(&adapter->mcc_lock); 980 spin_lock_bh(&adapter->mcc_lock);
902 981
903 wrb = wrb_from_mccq(adapter); 982 wrb = wrb_from_mccq(adapter);
983 if (!wrb) {
984 status = -EBUSY;
985 goto err;
986 }
904 req = embedded_payload(wrb); 987 req = embedded_payload(wrb);
905 988
906 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 989 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -915,8 +998,9 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
915 998
916 be_mcc_notify(adapter); 999 be_mcc_notify(adapter);
917 1000
1001err:
918 spin_unlock_bh(&adapter->mcc_lock); 1002 spin_unlock_bh(&adapter->mcc_lock);
919 return 0; 1003 return status;
920} 1004}
921 1005
922/* Uses sycnhronous mcc */ 1006/* Uses sycnhronous mcc */
@@ -930,6 +1014,10 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
930 spin_lock_bh(&adapter->mcc_lock); 1014 spin_lock_bh(&adapter->mcc_lock);
931 1015
932 wrb = wrb_from_mccq(adapter); 1016 wrb = wrb_from_mccq(adapter);
1017 if (!wrb) {
1018 status = -EBUSY;
1019 goto err;
1020 }
933 req = embedded_payload(wrb); 1021 req = embedded_payload(wrb);
934 1022
935 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1023 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -948,6 +1036,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
948 1036
949 status = be_mcc_notify_wait(adapter); 1037 status = be_mcc_notify_wait(adapter);
950 1038
1039err:
951 spin_unlock_bh(&adapter->mcc_lock); 1040 spin_unlock_bh(&adapter->mcc_lock);
952 return status; 1041 return status;
953} 1042}
@@ -964,6 +1053,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
964 spin_lock_bh(&adapter->mcc_lock); 1053 spin_lock_bh(&adapter->mcc_lock);
965 1054
966 wrb = wrb_from_mccq(adapter); 1055 wrb = wrb_from_mccq(adapter);
1056 if (!wrb) {
1057 status = -EBUSY;
1058 goto err;
1059 }
967 req = embedded_payload(wrb); 1060 req = embedded_payload(wrb);
968 1061
969 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1062 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -978,6 +1071,7 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
978 1071
979 status = be_mcc_notify_wait(adapter); 1072 status = be_mcc_notify_wait(adapter);
980 1073
1074err:
981 spin_unlock_bh(&adapter->mcc_lock); 1075 spin_unlock_bh(&adapter->mcc_lock);
982 return status; 1076 return status;
983} 1077}
@@ -987,24 +1081,34 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
987 * (mc == NULL) => multicast promiscous 1081 * (mc == NULL) => multicast promiscous
988 */ 1082 */
989int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, 1083int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
990 struct dev_mc_list *mc_list, u32 mc_count) 1084 struct dev_mc_list *mc_list, u32 mc_count,
1085 struct be_dma_mem *mem)
991{ 1086{
992#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
993 struct be_mcc_wrb *wrb; 1087 struct be_mcc_wrb *wrb;
994 struct be_cmd_req_mcast_mac_config *req; 1088 struct be_cmd_req_mcast_mac_config *req = mem->va;
1089 struct be_sge *sge;
1090 int status;
995 1091
996 spin_lock_bh(&adapter->mcc_lock); 1092 spin_lock_bh(&adapter->mcc_lock);
997 1093
998 wrb = wrb_from_mccq(adapter); 1094 wrb = wrb_from_mccq(adapter);
999 req = embedded_payload(wrb); 1095 if (!wrb) {
1096 status = -EBUSY;
1097 goto err;
1098 }
1099 sge = nonembedded_sgl(wrb);
1100 memset(req, 0, sizeof(*req));
1000 1101
1001 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1102 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1103 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1104 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1105 sge->len = cpu_to_le32(mem->size);
1002 1106
1003 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1107 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1004 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req)); 1108 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1005 1109
1006 req->interface_id = if_id; 1110 req->interface_id = if_id;
1007 if (mc_list && mc_count <= BE_MAX_MC) { 1111 if (mc_list) {
1008 int i; 1112 int i;
1009 struct dev_mc_list *mc; 1113 struct dev_mc_list *mc;
1010 1114
@@ -1016,11 +1120,11 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1016 req->promiscuous = 1; 1120 req->promiscuous = 1;
1017 } 1121 }
1018 1122
1019 be_mcc_notify_wait(adapter); 1123 status = be_mcc_notify_wait(adapter);
1020 1124
1125err:
1021 spin_unlock_bh(&adapter->mcc_lock); 1126 spin_unlock_bh(&adapter->mcc_lock);
1022 1127 return status;
1023 return 0;
1024} 1128}
1025 1129
1026/* Uses synchrounous mcc */ 1130/* Uses synchrounous mcc */
@@ -1033,6 +1137,10 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1033 spin_lock_bh(&adapter->mcc_lock); 1137 spin_lock_bh(&adapter->mcc_lock);
1034 1138
1035 wrb = wrb_from_mccq(adapter); 1139 wrb = wrb_from_mccq(adapter);
1140 if (!wrb) {
1141 status = -EBUSY;
1142 goto err;
1143 }
1036 req = embedded_payload(wrb); 1144 req = embedded_payload(wrb);
1037 1145
1038 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1146 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1045,6 +1153,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1045 1153
1046 status = be_mcc_notify_wait(adapter); 1154 status = be_mcc_notify_wait(adapter);
1047 1155
1156err:
1048 spin_unlock_bh(&adapter->mcc_lock); 1157 spin_unlock_bh(&adapter->mcc_lock);
1049 return status; 1158 return status;
1050} 1159}
@@ -1059,6 +1168,10 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1059 spin_lock_bh(&adapter->mcc_lock); 1168 spin_lock_bh(&adapter->mcc_lock);
1060 1169
1061 wrb = wrb_from_mccq(adapter); 1170 wrb = wrb_from_mccq(adapter);
1171 if (!wrb) {
1172 status = -EBUSY;
1173 goto err;
1174 }
1062 req = embedded_payload(wrb); 1175 req = embedded_payload(wrb);
1063 1176
1064 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 1177 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
@@ -1074,6 +1187,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1074 *rx_fc = le16_to_cpu(resp->rx_flow_control); 1187 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1075 } 1188 }
1076 1189
1190err:
1077 spin_unlock_bh(&adapter->mcc_lock); 1191 spin_unlock_bh(&adapter->mcc_lock);
1078 return status; 1192 return status;
1079} 1193}
@@ -1129,6 +1243,110 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1129 return status; 1243 return status;
1130} 1244}
1131 1245
1246/* Uses sync mcc */
1247int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1248 u8 bcn, u8 sts, u8 state)
1249{
1250 struct be_mcc_wrb *wrb;
1251 struct be_cmd_req_enable_disable_beacon *req;
1252 int status;
1253
1254 spin_lock_bh(&adapter->mcc_lock);
1255
1256 wrb = wrb_from_mccq(adapter);
1257 if (!wrb) {
1258 status = -EBUSY;
1259 goto err;
1260 }
1261 req = embedded_payload(wrb);
1262
1263 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1264
1265 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1266 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1267
1268 req->port_num = port_num;
1269 req->beacon_state = state;
1270 req->beacon_duration = bcn;
1271 req->status_duration = sts;
1272
1273 status = be_mcc_notify_wait(adapter);
1274
1275err:
1276 spin_unlock_bh(&adapter->mcc_lock);
1277 return status;
1278}
1279
1280/* Uses sync mcc */
1281int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1282{
1283 struct be_mcc_wrb *wrb;
1284 struct be_cmd_req_get_beacon_state *req;
1285 int status;
1286
1287 spin_lock_bh(&adapter->mcc_lock);
1288
1289 wrb = wrb_from_mccq(adapter);
1290 if (!wrb) {
1291 status = -EBUSY;
1292 goto err;
1293 }
1294 req = embedded_payload(wrb);
1295
1296 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1297
1298 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1299 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1300
1301 req->port_num = port_num;
1302
1303 status = be_mcc_notify_wait(adapter);
1304 if (!status) {
1305 struct be_cmd_resp_get_beacon_state *resp =
1306 embedded_payload(wrb);
1307 *state = resp->beacon_state;
1308 }
1309
1310err:
1311 spin_unlock_bh(&adapter->mcc_lock);
1312 return status;
1313}
1314
1315/* Uses sync mcc */
1316int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1317 u8 *connector)
1318{
1319 struct be_mcc_wrb *wrb;
1320 struct be_cmd_req_port_type *req;
1321 int status;
1322
1323 spin_lock_bh(&adapter->mcc_lock);
1324
1325 wrb = wrb_from_mccq(adapter);
1326 if (!wrb) {
1327 status = -EBUSY;
1328 goto err;
1329 }
1330 req = embedded_payload(wrb);
1331
1332 be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);
1333
1334 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1335 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1336
1337 req->port = cpu_to_le32(port);
1338 req->page_num = cpu_to_le32(TR_PAGE_A0);
1339 status = be_mcc_notify_wait(adapter);
1340 if (!status) {
1341 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1342 *connector = resp->data.connector;
1343 }
1344
1345err:
1346 spin_unlock_bh(&adapter->mcc_lock);
1347 return status;
1348}
1349
1132int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1350int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1133 u32 flash_type, u32 flash_opcode, u32 buf_size) 1351 u32 flash_type, u32 flash_opcode, u32 buf_size)
1134{ 1352{
@@ -1140,6 +1358,11 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1140 spin_lock_bh(&adapter->mcc_lock); 1358 spin_lock_bh(&adapter->mcc_lock);
1141 1359
1142 wrb = wrb_from_mccq(adapter); 1360 wrb = wrb_from_mccq(adapter);
1361 if (!wrb) {
1362 status = -EBUSY;
1363 goto err;
1364 }
1365 req = cmd->va;
1143 sge = nonembedded_sgl(wrb); 1366 sge = nonembedded_sgl(wrb);
1144 1367
1145 be_wrb_hdr_prepare(wrb, cmd->size, false, 1); 1368 be_wrb_hdr_prepare(wrb, cmd->size, false, 1);
@@ -1156,6 +1379,41 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1156 1379
1157 status = be_mcc_notify_wait(adapter); 1380 status = be_mcc_notify_wait(adapter);
1158 1381
1382err:
1383 spin_unlock_bh(&adapter->mcc_lock);
1384 return status;
1385}
1386
1387int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
1388{
1389 struct be_mcc_wrb *wrb;
1390 struct be_cmd_write_flashrom *req;
1391 int status;
1392
1393 spin_lock_bh(&adapter->mcc_lock);
1394
1395 wrb = wrb_from_mccq(adapter);
1396 if (!wrb) {
1397 status = -EBUSY;
1398 goto err;
1399 }
1400 req = embedded_payload(wrb);
1401
1402 be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0);
1403
1404 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1405 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1406
1407 req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
1408 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1409 req->params.offset = 0x3FFFC;
1410 req->params.data_buf_size = 0x4;
1411
1412 status = be_mcc_notify_wait(adapter);
1413 if (!status)
1414 memcpy(flashed_crc, req->params.data_buf, 4);
1415
1416err:
1159 spin_unlock_bh(&adapter->mcc_lock); 1417 spin_unlock_bh(&adapter->mcc_lock);
1160 return status; 1418 return status;
1161} 1419}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e5f9676cf1bc..8ec6528cb054 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -118,6 +118,7 @@ struct be_mcc_mailbox {
118#define OPCODE_COMMON_NTWK_MULTICAST_SET 3 118#define OPCODE_COMMON_NTWK_MULTICAST_SET 3
119#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4 119#define OPCODE_COMMON_NTWK_VLAN_CONFIG 4
120#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5 120#define OPCODE_COMMON_NTWK_LINK_STATUS_QUERY 5
121#define OPCODE_COMMON_READ_FLASHROM 6
121#define OPCODE_COMMON_WRITE_FLASHROM 7 122#define OPCODE_COMMON_WRITE_FLASHROM 7
122#define OPCODE_COMMON_CQ_CREATE 12 123#define OPCODE_COMMON_CQ_CREATE 12
123#define OPCODE_COMMON_EQ_CREATE 13 124#define OPCODE_COMMON_EQ_CREATE 13
@@ -138,6 +139,9 @@ struct be_mcc_mailbox {
138#define OPCODE_COMMON_NTWK_PMAC_ADD 59 139#define OPCODE_COMMON_NTWK_PMAC_ADD 59
139#define OPCODE_COMMON_NTWK_PMAC_DEL 60 140#define OPCODE_COMMON_NTWK_PMAC_DEL 60
140#define OPCODE_COMMON_FUNCTION_RESET 61 141#define OPCODE_COMMON_FUNCTION_RESET 61
142#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
143#define OPCODE_COMMON_GET_BEACON_STATE 70
144#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
141 145
142#define OPCODE_ETH_ACPI_CONFIG 2 146#define OPCODE_ETH_ACPI_CONFIG 2
143#define OPCODE_ETH_PROMISCUOUS 3 147#define OPCODE_ETH_PROMISCUOUS 3
@@ -587,6 +591,8 @@ struct be_cmd_req_promiscuous_config {
587 u16 rsvd0; 591 u16 rsvd0;
588} __packed; 592} __packed;
589 593
594/******************** Multicast MAC Config *******************/
595#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
590struct macaddr { 596struct macaddr {
591 u8 byte[ETH_ALEN]; 597 u8 byte[ETH_ALEN];
592}; 598};
@@ -596,7 +602,7 @@ struct be_cmd_req_mcast_mac_config {
596 u16 num_mac; 602 u16 num_mac;
597 u8 promiscuous; 603 u8 promiscuous;
598 u8 interface_id; 604 u8 interface_id;
599 struct macaddr mac[32]; 605 struct macaddr mac[BE_MAX_MC];
600} __packed; 606} __packed;
601 607
602static inline struct be_hw_stats * 608static inline struct be_hw_stats *
@@ -633,9 +639,47 @@ struct be_cmd_resp_link_status {
633 u8 mac_fault; 639 u8 mac_fault;
634 u8 mgmt_mac_duplex; 640 u8 mgmt_mac_duplex;
635 u8 mgmt_mac_speed; 641 u8 mgmt_mac_speed;
636 u16 rsvd0; 642 u16 link_speed;
643 u32 rsvd0;
637} __packed; 644} __packed;
638 645
646/******************** Port Identification ***************************/
647/* Identifies the type of port attached to NIC */
648struct be_cmd_req_port_type {
649 struct be_cmd_req_hdr hdr;
650 u32 page_num;
651 u32 port;
652};
653
654enum {
655 TR_PAGE_A0 = 0xa0,
656 TR_PAGE_A2 = 0xa2
657};
658
659struct be_cmd_resp_port_type {
660 struct be_cmd_resp_hdr hdr;
661 u32 page_num;
662 u32 port;
663 struct data {
664 u8 identifier;
665 u8 identifier_ext;
666 u8 connector;
667 u8 transceiver[8];
668 u8 rsvd0[3];
669 u8 length_km;
670 u8 length_hm;
671 u8 length_om1;
672 u8 length_om2;
673 u8 length_cu;
674 u8 length_cu_m;
675 u8 vendor_name[16];
676 u8 rsvd;
677 u8 vendor_oui[3];
678 u8 vendor_pn[16];
679 u8 vendor_rev[4];
680 } data;
681};
682
639/******************** Get FW Version *******************/ 683/******************** Get FW Version *******************/
640struct be_cmd_req_get_fw_version { 684struct be_cmd_req_get_fw_version {
641 struct be_cmd_req_hdr hdr; 685 struct be_cmd_req_hdr hdr;
@@ -699,6 +743,37 @@ struct be_cmd_resp_query_fw_cfg {
699 u32 rsvd[26]; 743 u32 rsvd[26];
700}; 744};
701 745
746/******************** Port Beacon ***************************/
747
748#define BEACON_STATE_ENABLED 0x1
749#define BEACON_STATE_DISABLED 0x0
750
751struct be_cmd_req_enable_disable_beacon {
752 struct be_cmd_req_hdr hdr;
753 u8 port_num;
754 u8 beacon_state;
755 u8 beacon_duration;
756 u8 status_duration;
757} __packed;
758
759struct be_cmd_resp_enable_disable_beacon {
760 struct be_cmd_resp_hdr resp_hdr;
761 u32 rsvd0;
762} __packed;
763
764struct be_cmd_req_get_beacon_state {
765 struct be_cmd_req_hdr hdr;
766 u8 port_num;
767 u8 rsvd0;
768 u16 rsvd1;
769} __packed;
770
771struct be_cmd_resp_get_beacon_state {
772 struct be_cmd_resp_hdr resp_hdr;
773 u8 beacon_state;
774 u8 rsvd0[3];
775} __packed;
776
702/****************** Firmware Flash ******************/ 777/****************** Firmware Flash ******************/
703struct flashrom_params { 778struct flashrom_params {
704 u32 op_code; 779 u32 op_code;
@@ -743,7 +818,7 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
743extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 818extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
744 int type); 819 int type);
745extern int be_cmd_link_status_query(struct be_adapter *adapter, 820extern int be_cmd_link_status_query(struct be_adapter *adapter,
746 bool *link_up); 821 bool *link_up, u8 *mac_speed, u16 *link_speed);
747extern int be_cmd_reset(struct be_adapter *adapter); 822extern int be_cmd_reset(struct be_adapter *adapter);
748extern int be_cmd_get_stats(struct be_adapter *adapter, 823extern int be_cmd_get_stats(struct be_adapter *adapter,
749 struct be_dma_mem *nonemb_cmd); 824 struct be_dma_mem *nonemb_cmd);
@@ -756,7 +831,8 @@ extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
756extern int be_cmd_promiscuous_config(struct be_adapter *adapter, 831extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
757 u8 port_num, bool en); 832 u8 port_num, bool en);
758extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, 833extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
759 struct dev_mc_list *mc_list, u32 mc_count); 834 struct dev_mc_list *mc_list, u32 mc_count,
835 struct be_dma_mem *mem);
760extern int be_cmd_set_flow_control(struct be_adapter *adapter, 836extern int be_cmd_set_flow_control(struct be_adapter *adapter,
761 u32 tx_fc, u32 rx_fc); 837 u32 tx_fc, u32 rx_fc);
762extern int be_cmd_get_flow_control(struct be_adapter *adapter, 838extern int be_cmd_get_flow_control(struct be_adapter *adapter,
@@ -765,6 +841,15 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
765 u32 *port_num, u32 *cap); 841 u32 *port_num, u32 *cap);
766extern int be_cmd_reset_function(struct be_adapter *adapter); 842extern int be_cmd_reset_function(struct be_adapter *adapter);
767extern int be_process_mcc(struct be_adapter *adapter); 843extern int be_process_mcc(struct be_adapter *adapter);
844extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
845 u8 port_num, u8 beacon, u8 status, u8 state);
846extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
847 u8 port_num, u32 *state);
848extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
849 u8 *connector);
768extern int be_cmd_write_flashrom(struct be_adapter *adapter, 850extern int be_cmd_write_flashrom(struct be_adapter *adapter,
769 struct be_dma_mem *cmd, u32 flash_oper, 851 struct be_dma_mem *cmd, u32 flash_oper,
770 u32 flash_opcode, u32 buf_size); 852 u32 flash_opcode, u32 buf_size);
853extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc);
854extern int be_cmd_fw_init(struct be_adapter *adapter);
855extern int be_cmd_fw_clean(struct be_adapter *adapter);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index f0fd95b43c07..e8f92831021a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -234,7 +234,7 @@ be_get_ethtool_stats(struct net_device *netdev,
234 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 234 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
235 struct be_port_rxf_stats *port_stats = 235 struct be_port_rxf_stats *port_stats =
236 &rxf_stats->port[adapter->port_num]; 236 &rxf_stats->port[adapter->port_num];
237 struct net_device_stats *net_stats = &adapter->stats.net_stats; 237 struct net_device_stats *net_stats = &netdev->stats;
238 struct be_erx_stats *erx_stats = &hw_stats->erx; 238 struct be_erx_stats *erx_stats = &hw_stats->erx;
239 void *p = NULL; 239 void *p = NULL;
240 int i; 240 int i;
@@ -281,16 +281,55 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
281 } 281 }
282} 282}
283 283
284static int be_get_stats_count(struct net_device *netdev) 284static int be_get_sset_count(struct net_device *netdev, int stringset)
285{ 285{
286 return ETHTOOL_STATS_NUM; 286 switch (stringset) {
287 case ETH_SS_STATS:
288 return ETHTOOL_STATS_NUM;
289 default:
290 return -EINVAL;
291 }
287} 292}
288 293
289static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 294static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
290{ 295{
291 ecmd->speed = SPEED_10000; 296 struct be_adapter *adapter = netdev_priv(netdev);
297 u8 mac_speed = 0, connector = 0;
298 u16 link_speed = 0;
299 bool link_up = false;
300
301 be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed);
302
303 /* link_speed is in units of 10 Mbps */
304 if (link_speed) {
305 ecmd->speed = link_speed*10;
306 } else {
307 switch (mac_speed) {
308 case PHY_LINK_SPEED_1GBPS:
309 ecmd->speed = SPEED_1000;
310 break;
311 case PHY_LINK_SPEED_10GBPS:
312 ecmd->speed = SPEED_10000;
313 break;
314 }
315 }
292 ecmd->duplex = DUPLEX_FULL; 316 ecmd->duplex = DUPLEX_FULL;
293 ecmd->autoneg = AUTONEG_DISABLE; 317 ecmd->autoneg = AUTONEG_DISABLE;
318 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
319
320 be_cmd_read_port_type(adapter, adapter->port_num, &connector);
321 switch (connector) {
322 case 7:
323 ecmd->port = PORT_FIBRE;
324 break;
325 default:
326 ecmd->port = PORT_TP;
327 break;
328 }
329
330 ecmd->phy_address = adapter->port_num;
331 ecmd->transceiver = XCVR_INTERNAL;
332
294 return 0; 333 return 0;
295} 334}
296 335
@@ -335,6 +374,35 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
335} 374}
336 375
337static int 376static int
377be_phys_id(struct net_device *netdev, u32 data)
378{
379 struct be_adapter *adapter = netdev_priv(netdev);
380 int status;
381 u32 cur;
382
383 if (!netif_running(netdev))
384 return 0;
385
386 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
387
388 if (cur == BEACON_STATE_ENABLED)
389 return 0;
390
391 if (data < 2)
392 data = 2;
393
394 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
395 BEACON_STATE_ENABLED);
396 set_current_state(TASK_INTERRUPTIBLE);
397 schedule_timeout(data*HZ);
398
399 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
400 BEACON_STATE_DISABLED);
401
402 return status;
403}
404
405static int
338be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) 406be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
339{ 407{
340 struct be_adapter *adapter = netdev_priv(netdev); 408 struct be_adapter *adapter = netdev_priv(netdev);
@@ -366,7 +434,8 @@ const struct ethtool_ops be_ethtool_ops = {
366 .get_tso = ethtool_op_get_tso, 434 .get_tso = ethtool_op_get_tso,
367 .set_tso = ethtool_op_set_tso, 435 .set_tso = ethtool_op_set_tso,
368 .get_strings = be_get_stat_strings, 436 .get_strings = be_get_stat_strings,
369 .get_stats_count = be_get_stats_count, 437 .phys_id = be_phys_id,
438 .get_sset_count = be_get_sset_count,
370 .get_ethtool_stats = be_get_ethtool_stats, 439 .get_ethtool_stats = be_get_ethtool_stats,
371 .flash_device = be_do_flash, 440 .flash_device = be_do_flash,
372}; 441};
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a3394b4aa14a..f53d5ca2da9e 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -225,6 +225,7 @@ struct be_eth_rx_compl {
225#define NUM_FLASHDIR_ENTRIES 32 225#define NUM_FLASHDIR_ENTRIES 32
226 226
227#define FLASHROM_TYPE_ISCSI_ACTIVE 0 227#define FLASHROM_TYPE_ISCSI_ACTIVE 0
228#define FLASHROM_TYPE_REDBOOT 1
228#define FLASHROM_TYPE_BIOS 2 229#define FLASHROM_TYPE_BIOS 2
229#define FLASHROM_TYPE_PXE_BIOS 3 230#define FLASHROM_TYPE_PXE_BIOS 3
230#define FLASHROM_TYPE_FCOE_BIOS 8 231#define FLASHROM_TYPE_FCOE_BIOS 8
@@ -234,9 +235,11 @@ struct be_eth_rx_compl {
234 235
235#define FLASHROM_OPER_FLASH 1 236#define FLASHROM_OPER_FLASH 1
236#define FLASHROM_OPER_SAVE 2 237#define FLASHROM_OPER_SAVE 2
238#define FLASHROM_OPER_REPORT 4
237 239
238#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */ 240#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
239#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */ 241#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
242#define FLASH_REDBOOT_IMAGE_MAX_SIZE (262144) /* Max redboot image sz */
240 243
241/* Offsets for components on Flash. */ 244/* Offsets for components on Flash. */
242#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576) 245#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
@@ -246,6 +249,8 @@ struct be_eth_rx_compl {
246#define FLASH_iSCSI_BIOS_START (7340032) 249#define FLASH_iSCSI_BIOS_START (7340032)
247#define FLASH_PXE_BIOS_START (7864320) 250#define FLASH_PXE_BIOS_START (7864320)
248#define FLASH_FCoE_BIOS_START (524288) 251#define FLASH_FCoE_BIOS_START (524288)
252#define FLASH_REDBOOT_START (32768)
253#define FLASH_REDBOOT_ISM_START (0)
249 254
250struct controller_id { 255struct controller_id {
251 u32 vendor; 256 u32 vendor;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 876b357101fa..795936439498 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -31,8 +31,10 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31 31
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
37 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
36 { 0 } 38 { 0 }
37}; 39};
38MODULE_DEVICE_TABLE(pci, be_dev_ids); 40MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -141,7 +143,7 @@ void netdev_stats_update(struct be_adapter *adapter)
141 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 143 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
142 struct be_port_rxf_stats *port_stats = 144 struct be_port_rxf_stats *port_stats =
143 &rxf_stats->port[adapter->port_num]; 145 &rxf_stats->port[adapter->port_num];
144 struct net_device_stats *dev_stats = &adapter->stats.net_stats; 146 struct net_device_stats *dev_stats = &adapter->netdev->stats;
145 struct be_erx_stats *erx_stats = &hw_stats->erx; 147 struct be_erx_stats *erx_stats = &hw_stats->erx;
146 148
147 dev_stats->rx_packets = port_stats->rx_total_frames; 149 dev_stats->rx_packets = port_stats->rx_total_frames;
@@ -168,7 +170,8 @@ void netdev_stats_update(struct be_adapter *adapter)
168 port_stats->rx_udp_checksum_errs; 170 port_stats->rx_udp_checksum_errs;
169 171
170 /* no space in linux buffers: best possible approximation */ 172 /* no space in linux buffers: best possible approximation */
171 dev_stats->rx_dropped = erx_stats->rx_drops_no_fragments[0]; 173 dev_stats->rx_dropped =
174 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
172 175
173 /* detailed rx errors */ 176 /* detailed rx errors */
174 dev_stats->rx_length_errors = port_stats->rx_in_range_errors + 177 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
@@ -269,9 +272,7 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
269 272
270static struct net_device_stats *be_get_stats(struct net_device *dev) 273static struct net_device_stats *be_get_stats(struct net_device *dev)
271{ 274{
272 struct be_adapter *adapter = netdev_priv(dev); 275 return &dev->stats;
273
274 return &adapter->stats.net_stats;
275} 276}
276 277
277static u32 be_calc_rate(u64 bytes, unsigned long ticks) 278static u32 be_calc_rate(u64 bytes, unsigned long ticks)
@@ -562,13 +563,15 @@ static void be_set_multicast_list(struct net_device *netdev)
562 be_cmd_promiscuous_config(adapter, adapter->port_num, 0); 563 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
563 } 564 }
564 565
565 if (netdev->flags & IFF_ALLMULTI) { 566 /* Enable multicast promisc if num configured exceeds what we support */
566 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0); 567 if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
568 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
569 &adapter->mc_cmd_mem);
567 goto done; 570 goto done;
568 } 571 }
569 572
570 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list, 573 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
571 netdev->mc_count); 574 netdev->mc_count, &adapter->mc_cmd_mem);
572done: 575done:
573 return; 576 return;
574} 577}
@@ -758,7 +761,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
758 if ((adapter->cap == 0x400) && !vtm) 761 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0; 762 vlanf = 0;
760 763
761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 764 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
762 if (!skb) { 765 if (!skb) {
763 if (net_ratelimit()) 766 if (net_ratelimit())
764 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 767 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -766,8 +769,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
766 return; 769 return;
767 } 770 }
768 771
769 skb_reserve(skb, NET_IP_ALIGN);
770
771 skb_fill_rx_data(adapter, skb, rxcp); 772 skb_fill_rx_data(adapter, skb, rxcp);
772 773
773 if (do_pkt_csum(rxcp, adapter->rx_csum)) 774 if (do_pkt_csum(rxcp, adapter->rx_csum))
@@ -1475,6 +1476,14 @@ static void be_worker(struct work_struct *work)
1475 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 1476 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1476} 1477}
1477 1478
1479static void be_msix_disable(struct be_adapter *adapter)
1480{
1481 if (adapter->msix_enabled) {
1482 pci_disable_msix(adapter->pdev);
1483 adapter->msix_enabled = false;
1484 }
1485}
1486
1478static void be_msix_enable(struct be_adapter *adapter) 1487static void be_msix_enable(struct be_adapter *adapter)
1479{ 1488{
1480 int i, status; 1489 int i, status;
@@ -1590,6 +1599,8 @@ static int be_open(struct net_device *netdev)
1590 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1599 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1591 bool link_up; 1600 bool link_up;
1592 int status; 1601 int status;
1602 u8 mac_speed;
1603 u16 link_speed;
1593 1604
1594 /* First time posting */ 1605 /* First time posting */
1595 be_post_rx_frags(adapter); 1606 be_post_rx_frags(adapter);
@@ -1608,7 +1619,8 @@ static int be_open(struct net_device *netdev)
1608 /* Rx compl queue may be in unarmed state; rearm it */ 1619 /* Rx compl queue may be in unarmed state; rearm it */
1609 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); 1620 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1610 1621
1611 status = be_cmd_link_status_query(adapter, &link_up); 1622 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1623 &link_speed);
1612 if (status) 1624 if (status)
1613 goto ret_sts; 1625 goto ret_sts;
1614 be_link_status_update(adapter, link_up); 1626 be_link_status_update(adapter, link_up);
@@ -1678,6 +1690,8 @@ static int be_clear(struct be_adapter *adapter)
1678 1690
1679 be_cmd_if_destroy(adapter, adapter->if_handle); 1691 be_cmd_if_destroy(adapter, adapter->if_handle);
1680 1692
1693 /* tell fw we're done with firing cmds */
1694 be_cmd_fw_clean(adapter);
1681 return 0; 1695 return 0;
1682} 1696}
1683 1697
@@ -1720,6 +1734,31 @@ static int be_close(struct net_device *netdev)
1720#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 1734#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1721char flash_cookie[2][16] = {"*** SE FLAS", 1735char flash_cookie[2][16] = {"*** SE FLAS",
1722 "H DIRECTORY *** "}; 1736 "H DIRECTORY *** "};
1737
1738static bool be_flash_redboot(struct be_adapter *adapter,
1739 const u8 *p)
1740{
1741 u32 crc_offset;
1742 u8 flashed_crc[4];
1743 int status;
1744 crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
1745 + sizeof(struct flash_file_hdr) - 32*1024;
1746 p += crc_offset;
1747 status = be_cmd_get_flash_crc(adapter, flashed_crc);
1748 if (status) {
1749 dev_err(&adapter->pdev->dev,
1750 "could not get crc from flash, not flashing redboot\n");
1751 return false;
1752 }
1753
1754 /*update redboot only if crc does not match*/
1755 if (!memcmp(flashed_crc, p, 4))
1756 return false;
1757 else
1758 return true;
1759
1760}
1761
1723static int be_flash_image(struct be_adapter *adapter, 1762static int be_flash_image(struct be_adapter *adapter,
1724 const struct firmware *fw, 1763 const struct firmware *fw,
1725 struct be_dma_mem *flash_cmd, u32 flash_type) 1764 struct be_dma_mem *flash_cmd, u32 flash_type)
@@ -1759,6 +1798,12 @@ static int be_flash_image(struct be_adapter *adapter,
1759 image_offset = FLASH_PXE_BIOS_START; 1798 image_offset = FLASH_PXE_BIOS_START;
1760 image_size = FLASH_BIOS_IMAGE_MAX_SIZE; 1799 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1761 break; 1800 break;
1801 case FLASHROM_TYPE_REDBOOT:
1802 if (!be_flash_redboot(adapter, fw->data))
1803 return 0;
1804 image_offset = FLASH_REDBOOT_ISM_START;
1805 image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
1806 break;
1762 default: 1807 default:
1763 return 0; 1808 return 0;
1764 } 1809 }
@@ -1977,34 +2022,61 @@ static void be_ctrl_cleanup(struct be_adapter *adapter)
1977 if (mem->va) 2022 if (mem->va)
1978 pci_free_consistent(adapter->pdev, mem->size, 2023 pci_free_consistent(adapter->pdev, mem->size,
1979 mem->va, mem->dma); 2024 mem->va, mem->dma);
2025
2026 mem = &adapter->mc_cmd_mem;
2027 if (mem->va)
2028 pci_free_consistent(adapter->pdev, mem->size,
2029 mem->va, mem->dma);
1980} 2030}
1981 2031
1982static int be_ctrl_init(struct be_adapter *adapter) 2032static int be_ctrl_init(struct be_adapter *adapter)
1983{ 2033{
1984 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced; 2034 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
1985 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem; 2035 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
2036 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
1986 int status; 2037 int status;
1987 2038
1988 status = be_map_pci_bars(adapter); 2039 status = be_map_pci_bars(adapter);
1989 if (status) 2040 if (status)
1990 return status; 2041 goto done;
1991 2042
1992 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 2043 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
1993 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev, 2044 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
1994 mbox_mem_alloc->size, &mbox_mem_alloc->dma); 2045 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
1995 if (!mbox_mem_alloc->va) { 2046 if (!mbox_mem_alloc->va) {
1996 be_unmap_pci_bars(adapter); 2047 status = -ENOMEM;
1997 return -1; 2048 goto unmap_pci_bars;
1998 } 2049 }
2050
1999 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 2051 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2000 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 2052 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2001 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 2053 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2002 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 2054 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
2055
2056 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2057 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2058 &mc_cmd_mem->dma);
2059 if (mc_cmd_mem->va == NULL) {
2060 status = -ENOMEM;
2061 goto free_mbox;
2062 }
2063 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2064
2003 spin_lock_init(&adapter->mbox_lock); 2065 spin_lock_init(&adapter->mbox_lock);
2004 spin_lock_init(&adapter->mcc_lock); 2066 spin_lock_init(&adapter->mcc_lock);
2005 spin_lock_init(&adapter->mcc_cq_lock); 2067 spin_lock_init(&adapter->mcc_cq_lock);
2006 2068
2007 return 0; 2069 return 0;
2070
2071free_mbox:
2072 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2073 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2074
2075unmap_pci_bars:
2076 be_unmap_pci_bars(adapter);
2077
2078done:
2079 return status;
2008} 2080}
2009 2081
2010static void be_stats_cleanup(struct be_adapter *adapter) 2082static void be_stats_cleanup(struct be_adapter *adapter)
@@ -2032,6 +2104,7 @@ static int be_stats_init(struct be_adapter *adapter)
2032static void __devexit be_remove(struct pci_dev *pdev) 2104static void __devexit be_remove(struct pci_dev *pdev)
2033{ 2105{
2034 struct be_adapter *adapter = pci_get_drvdata(pdev); 2106 struct be_adapter *adapter = pci_get_drvdata(pdev);
2107
2035 if (!adapter) 2108 if (!adapter)
2036 return; 2109 return;
2037 2110
@@ -2043,10 +2116,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
2043 2116
2044 be_ctrl_cleanup(adapter); 2117 be_ctrl_cleanup(adapter);
2045 2118
2046 if (adapter->msix_enabled) { 2119 be_msix_disable(adapter);
2047 pci_disable_msix(adapter->pdev);
2048 adapter->msix_enabled = false;
2049 }
2050 2120
2051 pci_set_drvdata(pdev, NULL); 2121 pci_set_drvdata(pdev, NULL);
2052 pci_release_regions(pdev); 2122 pci_release_regions(pdev);
@@ -2055,25 +2125,28 @@ static void __devexit be_remove(struct pci_dev *pdev)
2055 free_netdev(adapter->netdev); 2125 free_netdev(adapter->netdev);
2056} 2126}
2057 2127
2058static int be_hw_up(struct be_adapter *adapter) 2128static int be_get_config(struct be_adapter *adapter)
2059{ 2129{
2060 int status; 2130 int status;
2131 u8 mac[ETH_ALEN];
2061 2132
2062 status = be_cmd_POST(adapter); 2133 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
2063 if (status) 2134 if (status)
2064 return status; 2135 return status;
2065 2136
2066 status = be_cmd_reset_function(adapter); 2137 status = be_cmd_query_fw_cfg(adapter,
2138 &adapter->port_num, &adapter->cap);
2067 if (status) 2139 if (status)
2068 return status; 2140 return status;
2069 2141
2070 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2142 memset(mac, 0, ETH_ALEN);
2143 status = be_cmd_mac_addr_query(adapter, mac,
2144 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2071 if (status) 2145 if (status)
2072 return status; 2146 return status;
2147 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2073 2148
2074 status = be_cmd_query_fw_cfg(adapter, 2149 return 0;
2075 &adapter->port_num, &adapter->cap);
2076 return status;
2077} 2150}
2078 2151
2079static int __devinit be_probe(struct pci_dev *pdev, 2152static int __devinit be_probe(struct pci_dev *pdev,
@@ -2082,7 +2155,6 @@ static int __devinit be_probe(struct pci_dev *pdev,
2082 int status = 0; 2155 int status = 0;
2083 struct be_adapter *adapter; 2156 struct be_adapter *adapter;
2084 struct net_device *netdev; 2157 struct net_device *netdev;
2085 u8 mac[ETH_ALEN];
2086 2158
2087 status = pci_enable_device(pdev); 2159 status = pci_enable_device(pdev);
2088 if (status) 2160 if (status)
@@ -2102,6 +2174,8 @@ static int __devinit be_probe(struct pci_dev *pdev,
2102 adapter->pdev = pdev; 2174 adapter->pdev = pdev;
2103 pci_set_drvdata(pdev, adapter); 2175 pci_set_drvdata(pdev, adapter);
2104 adapter->netdev = netdev; 2176 adapter->netdev = netdev;
2177 be_netdev_init(netdev);
2178 SET_NETDEV_DEV(netdev, &pdev->dev);
2105 2179
2106 be_msix_enable(adapter); 2180 be_msix_enable(adapter);
2107 2181
@@ -2120,27 +2194,34 @@ static int __devinit be_probe(struct pci_dev *pdev,
2120 if (status) 2194 if (status)
2121 goto free_netdev; 2195 goto free_netdev;
2122 2196
2123 status = be_stats_init(adapter); 2197 /* sync up with fw's ready state */
2198 status = be_cmd_POST(adapter);
2124 if (status) 2199 if (status)
2125 goto ctrl_clean; 2200 goto ctrl_clean;
2126 2201
2127 status = be_hw_up(adapter); 2202 /* tell fw we're ready to fire cmds */
2203 status = be_cmd_fw_init(adapter);
2128 if (status) 2204 if (status)
2129 goto stats_clean; 2205 goto ctrl_clean;
2130 2206
2131 status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK, 2207 status = be_cmd_reset_function(adapter);
2132 true /* permanent */, 0); 2208 if (status)
2209 goto ctrl_clean;
2210
2211 status = be_stats_init(adapter);
2212 if (status)
2213 goto ctrl_clean;
2214
2215 status = be_get_config(adapter);
2133 if (status) 2216 if (status)
2134 goto stats_clean; 2217 goto stats_clean;
2135 memcpy(netdev->dev_addr, mac, ETH_ALEN);
2136 2218
2137 INIT_DELAYED_WORK(&adapter->work, be_worker); 2219 INIT_DELAYED_WORK(&adapter->work, be_worker);
2138 be_netdev_init(netdev);
2139 SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
2140 2220
2141 status = be_setup(adapter); 2221 status = be_setup(adapter);
2142 if (status) 2222 if (status)
2143 goto stats_clean; 2223 goto stats_clean;
2224
2144 status = register_netdev(netdev); 2225 status = register_netdev(netdev);
2145 if (status != 0) 2226 if (status != 0)
2146 goto unsetup; 2227 goto unsetup;
@@ -2155,7 +2236,9 @@ stats_clean:
2155ctrl_clean: 2236ctrl_clean:
2156 be_ctrl_cleanup(adapter); 2237 be_ctrl_cleanup(adapter);
2157free_netdev: 2238free_netdev:
2239 be_msix_disable(adapter);
2158 free_netdev(adapter->netdev); 2240 free_netdev(adapter->netdev);
2241 pci_set_drvdata(pdev, NULL);
2159rel_reg: 2242rel_reg:
2160 pci_release_regions(pdev); 2243 pci_release_regions(pdev);
2161disable_dev: 2244disable_dev:
@@ -2200,6 +2283,11 @@ static int be_resume(struct pci_dev *pdev)
2200 pci_set_power_state(pdev, 0); 2283 pci_set_power_state(pdev, 0);
2201 pci_restore_state(pdev); 2284 pci_restore_state(pdev);
2202 2285
2286 /* tell fw we're ready to fire cmds */
2287 status = be_cmd_fw_init(adapter);
2288 if (status)
2289 return status;
2290
2203 be_setup(adapter); 2291 be_setup(adapter);
2204 if (netif_running(netdev)) { 2292 if (netif_running(netdev)) {
2205 rtnl_lock(); 2293 rtnl_lock();
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6ff740..539d23b594ce 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1466,6 +1466,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468 bmcr |= BCM5708S_BMCR_FORCE_2500; 1468 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469 } else {
1470 return;
1469 } 1471 }
1470 1472
1471 if (bp->autoneg & AUTONEG_SPEED) { 1473 if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1502,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1502 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1504 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505 } else {
1506 return;
1503 } 1507 }
1504 1508
1505 if (bp->autoneg & AUTONEG_SPEED) 1509 if (bp->autoneg & AUTONEG_SPEED)
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index bbf842284ebb..602ab86b6392 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,6 +24,10 @@
24#define BCM_VLAN 1 24#define BCM_VLAN 1
25#endif 25#endif
26 26
27#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
28#define BCM_CNIC 1
29#include "cnic_if.h"
30#endif
27 31
28#define BNX2X_MULTI_QUEUE 32#define BNX2X_MULTI_QUEUE
29 33
@@ -255,9 +259,6 @@ struct bnx2x_eth_q_stats {
255struct bnx2x_fastpath { 259struct bnx2x_fastpath {
256 260
257 struct napi_struct napi; 261 struct napi_struct napi;
258
259 u8 is_rx_queue;
260
261 struct host_status_block *status_blk; 262 struct host_status_block *status_blk;
262 dma_addr_t status_blk_mapping; 263 dma_addr_t status_blk_mapping;
263 264
@@ -762,7 +763,11 @@ struct bnx2x_eth_stats {
762 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 763 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
763 764
764 765
766#ifdef BCM_CNIC
767#define MAX_CONTEXT 15
768#else
765#define MAX_CONTEXT 16 769#define MAX_CONTEXT 16
770#endif
766 771
767union cdu_context { 772union cdu_context {
768 struct eth_context eth; 773 struct eth_context eth;
@@ -811,13 +816,21 @@ struct bnx2x {
811 struct bnx2x_fastpath fp[MAX_CONTEXT]; 816 struct bnx2x_fastpath fp[MAX_CONTEXT];
812 void __iomem *regview; 817 void __iomem *regview;
813 void __iomem *doorbells; 818 void __iomem *doorbells;
819#ifdef BCM_CNIC
820#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
821#else
814#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE) 822#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
823#endif
815 824
816 struct net_device *dev; 825 struct net_device *dev;
817 struct pci_dev *pdev; 826 struct pci_dev *pdev;
818 827
819 atomic_t intr_sem; 828 atomic_t intr_sem;
829#ifdef BCM_CNIC
830 struct msix_entry msix_table[MAX_CONTEXT+2];
831#else
820 struct msix_entry msix_table[MAX_CONTEXT+1]; 832 struct msix_entry msix_table[MAX_CONTEXT+1];
833#endif
821#define INT_MODE_INTx 1 834#define INT_MODE_INTx 1
822#define INT_MODE_MSI 2 835#define INT_MODE_MSI 2
823#define INT_MODE_MSIX 3 836#define INT_MODE_MSIX 3
@@ -863,8 +876,8 @@ struct bnx2x {
863 876
864 /* Flags for marking that there is a STAT_QUERY or 877 /* Flags for marking that there is a STAT_QUERY or
865 SET_MAC ramrod pending */ 878 SET_MAC ramrod pending */
866 u8 stats_pending; 879 int stats_pending;
867 u8 set_mac_pending; 880 int set_mac_pending;
868 881
869 /* End of fields used in the performance code paths */ 882 /* End of fields used in the performance code paths */
870 883
@@ -884,6 +897,7 @@ struct bnx2x {
884#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 897#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
885#define HW_VLAN_TX_FLAG 0x400 898#define HW_VLAN_TX_FLAG 0x400
886#define HW_VLAN_RX_FLAG 0x800 899#define HW_VLAN_RX_FLAG 0x800
900#define MF_FUNC_DIS 0x1000
887 901
888 int func; 902 int func;
889#define BP_PORT(bp) (bp->func % PORT_MAX) 903#define BP_PORT(bp) (bp->func % PORT_MAX)
@@ -891,6 +905,11 @@ struct bnx2x {
891#define BP_E1HVN(bp) (bp->func >> 1) 905#define BP_E1HVN(bp) (bp->func >> 1)
892#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 906#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
893 907
908#ifdef BCM_CNIC
909#define BCM_CNIC_CID_START 16
910#define BCM_ISCSI_ETH_CL_ID 17
911#endif
912
894 int pm_cap; 913 int pm_cap;
895 int pcie_cap; 914 int pcie_cap;
896 int mrrs; 915 int mrrs;
@@ -944,13 +963,11 @@ struct bnx2x {
944#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 963#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
945#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 964#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
946#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 965#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
947#define BNX2X_STATE_DISABLED 0xd000
948#define BNX2X_STATE_DIAG 0xe000 966#define BNX2X_STATE_DIAG 0xe000
949#define BNX2X_STATE_ERROR 0xf000 967#define BNX2X_STATE_ERROR 0xf000
950 968
951 int multi_mode; 969 int multi_mode;
952 int num_rx_queues; 970 int num_queues;
953 int num_tx_queues;
954 971
955 u32 rx_mode; 972 u32 rx_mode;
956#define BNX2X_RX_MODE_NONE 0 973#define BNX2X_RX_MODE_NONE 0
@@ -960,28 +977,51 @@ struct bnx2x {
960#define BNX2X_MAX_MULTICAST 64 977#define BNX2X_MAX_MULTICAST 64
961#define BNX2X_MAX_EMUL_MULTI 16 978#define BNX2X_MAX_EMUL_MULTI 16
962 979
980 u32 rx_mode_cl_mask;
981
963 dma_addr_t def_status_blk_mapping; 982 dma_addr_t def_status_blk_mapping;
964 983
965 struct bnx2x_slowpath *slowpath; 984 struct bnx2x_slowpath *slowpath;
966 dma_addr_t slowpath_mapping; 985 dma_addr_t slowpath_mapping;
967 986
968#ifdef BCM_ISCSI
969 void *t1;
970 dma_addr_t t1_mapping;
971 void *t2;
972 dma_addr_t t2_mapping;
973 void *timers;
974 dma_addr_t timers_mapping;
975 void *qm;
976 dma_addr_t qm_mapping;
977#endif
978
979 int dropless_fc; 987 int dropless_fc;
980 988
989#ifdef BCM_CNIC
990 u32 cnic_flags;
991#define BNX2X_CNIC_FLAG_MAC_SET 1
992
993 void *t1;
994 dma_addr_t t1_mapping;
995 void *t2;
996 dma_addr_t t2_mapping;
997 void *timers;
998 dma_addr_t timers_mapping;
999 void *qm;
1000 dma_addr_t qm_mapping;
1001 struct cnic_ops *cnic_ops;
1002 void *cnic_data;
1003 u32 cnic_tag;
1004 struct cnic_eth_dev cnic_eth_dev;
1005 struct host_status_block *cnic_sb;
1006 dma_addr_t cnic_sb_mapping;
1007#define CNIC_SB_ID(bp) BP_L_ID(bp)
1008 struct eth_spe *cnic_kwq;
1009 struct eth_spe *cnic_kwq_prod;
1010 struct eth_spe *cnic_kwq_cons;
1011 struct eth_spe *cnic_kwq_last;
1012 u16 cnic_kwq_pending;
1013 u16 cnic_spq_pending;
1014 struct mutex cnic_mutex;
1015 u8 iscsi_mac[6];
1016#endif
1017
981 int dmae_ready; 1018 int dmae_ready;
982 /* used to synchronize dmae accesses */ 1019 /* used to synchronize dmae accesses */
983 struct mutex dmae_mutex; 1020 struct mutex dmae_mutex;
984 1021
1022 /* used to protect the FW mail box */
1023 struct mutex fw_mb_mutex;
1024
985 /* used to synchronize stats collecting */ 1025 /* used to synchronize stats collecting */
986 int stats_state; 1026 int stats_state;
987 /* used by dmae command loader */ 1027 /* used by dmae command loader */
@@ -1030,20 +1070,15 @@ struct bnx2x {
1030}; 1070};
1031 1071
1032 1072
1033#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/(2 * E1HVN_MAX)) \ 1073#define BNX2X_MAX_QUEUES(bp) (IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
1034 : (MAX_CONTEXT/2)) 1074 : MAX_CONTEXT)
1035#define BNX2X_NUM_QUEUES(bp) (bp->num_rx_queues + bp->num_tx_queues) 1075#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1036#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 2) 1076#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1037 1077
1038#define for_each_rx_queue(bp, var) \
1039 for (var = 0; var < bp->num_rx_queues; var++)
1040#define for_each_tx_queue(bp, var) \
1041 for (var = bp->num_rx_queues; \
1042 var < BNX2X_NUM_QUEUES(bp); var++)
1043#define for_each_queue(bp, var) \ 1078#define for_each_queue(bp, var) \
1044 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1079 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
1045#define for_each_nondefault_queue(bp, var) \ 1080#define for_each_nondefault_queue(bp, var) \
1046 for (var = 1; var < bp->num_rx_queues; var++) 1081 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
1047 1082
1048 1083
1049void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32); 1084void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
@@ -1147,7 +1182,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1147#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 1182#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1148 1183
1149 1184
1150#define BNX2X_BTR 3 1185#define BNX2X_BTR 1
1151#define MAX_SPQ_PENDING 8 1186#define MAX_SPQ_PENDING 8
1152 1187
1153 1188
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 8e2261fad485..52585338ada8 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -7,6 +7,20 @@
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9 9
10struct license_key {
11 u32 reserved[6];
12
13#if defined(__BIG_ENDIAN)
14 u16 max_iscsi_init_conn;
15 u16 max_iscsi_trgt_conn;
16#elif defined(__LITTLE_ENDIAN)
17 u16 max_iscsi_trgt_conn;
18 u16 max_iscsi_init_conn;
19#endif
20
21 u32 reserved_a[6];
22};
23
10 24
11#define PORT_0 0 25#define PORT_0 0
12#define PORT_1 1 26#define PORT_1 1
@@ -250,6 +264,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
250#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
251#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
252#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
253#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
254#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 269#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
255 270
@@ -881,7 +896,7 @@ struct shmem_region { /* SharedMem Offset (size) */
881 896
882 struct shm_dev_info dev_info; /* 0x8 (0x438) */ 897 struct shm_dev_info dev_info; /* 0x8 (0x438) */
883 898
884 u8 reserved[52*PORT_MAX]; 899 struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
885 900
886 /* FW information (for internal FW use) */ 901 /* FW information (for internal FW use) */
887 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ 902 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
@@ -1245,8 +1260,8 @@ struct host_func_stats {
1245 1260
1246 1261
1247#define BCM_5710_FW_MAJOR_VERSION 5 1262#define BCM_5710_FW_MAJOR_VERSION 5
1248#define BCM_5710_FW_MINOR_VERSION 0 1263#define BCM_5710_FW_MINOR_VERSION 2
1249#define BCM_5710_FW_REVISION_VERSION 21 1264#define BCM_5710_FW_REVISION_VERSION 7
1250#define BCM_5710_FW_ENGINEERING_VERSION 0 1265#define BCM_5710_FW_ENGINEERING_VERSION 0
1251#define BCM_5710_FW_COMPILE_FLAGS 1 1266#define BCM_5710_FW_COMPILE_FLAGS 1
1252 1267
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index e32d3370862e..41b9b7bd3d8e 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1107,18 +1107,21 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1107 MDIO_REG_BANK_SERDES_DIGITAL, 1107 MDIO_REG_BANK_SERDES_DIGITAL,
1108 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1108 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1109 &control2); 1109 &control2);
1110 1110 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1111 1111 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1112 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1112 else
1113 1113 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 1114 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1115 params->speed_cap_mask, control2);
1115 CL45_WR_OVER_CL22(bp, params->port, 1116 CL45_WR_OVER_CL22(bp, params->port,
1116 params->phy_addr, 1117 params->phy_addr,
1117 MDIO_REG_BANK_SERDES_DIGITAL, 1118 MDIO_REG_BANK_SERDES_DIGITAL,
1118 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1119 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1119 control2); 1120 control2);
1120 1121
1121 if (phy_flags & PHY_XGXS_FLAG) { 1122 if ((phy_flags & PHY_XGXS_FLAG) &&
1123 (params->speed_cap_mask &
1124 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1122 DP(NETIF_MSG_LINK, "XGXS\n"); 1125 DP(NETIF_MSG_LINK, "XGXS\n");
1123 1126
1124 CL45_WR_OVER_CL22(bp, params->port, 1127 CL45_WR_OVER_CL22(bp, params->port,
@@ -1225,7 +1228,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1225 params->phy_addr, 1228 params->phy_addr,
1226 MDIO_REG_BANK_CL73_USERB0, 1229 MDIO_REG_BANK_CL73_USERB0,
1227 MDIO_CL73_USERB0_CL73_UCTRL, 1230 MDIO_CL73_USERB0_CL73_UCTRL,
1228 MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL); 1231 0xe);
1229 1232
1230 /* Enable BAM Station Manager*/ 1233 /* Enable BAM Station Manager*/
1231 CL45_WR_OVER_CL22(bp, params->port, 1234 CL45_WR_OVER_CL22(bp, params->port,
@@ -1236,29 +1239,25 @@ static void bnx2x_set_autoneg(struct link_params *params,
1236 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | 1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
1237 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1238 1241
1239 /* Merge CL73 and CL37 aneg resolution */ 1242 /* Advertise CL73 link speeds */
1240 CL45_RD_OVER_CL22(bp, params->port,
1241 params->phy_addr,
1242 MDIO_REG_BANK_CL73_USERB0,
1243 MDIO_CL73_USERB0_CL73_BAM_CTRL3,
1244 &reg_val);
1245
1246 if (params->speed_cap_mask &
1247 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
1248 /* Set the CL73 AN speed */
1249 CL45_RD_OVER_CL22(bp, params->port, 1243 CL45_RD_OVER_CL22(bp, params->port,
1250 params->phy_addr, 1244 params->phy_addr,
1251 MDIO_REG_BANK_CL73_IEEEB1, 1245 MDIO_REG_BANK_CL73_IEEEB1,
1252 MDIO_CL73_IEEEB1_AN_ADV2, 1246 MDIO_CL73_IEEEB1_AN_ADV2,
1253 &reg_val); 1247 &reg_val);
1248 if (params->speed_cap_mask &
1249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1250 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1251 if (params->speed_cap_mask &
1252 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1253 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1254 1254
1255 CL45_WR_OVER_CL22(bp, params->port, 1255 CL45_WR_OVER_CL22(bp, params->port,
1256 params->phy_addr, 1256 params->phy_addr,
1257 MDIO_REG_BANK_CL73_IEEEB1, 1257 MDIO_REG_BANK_CL73_IEEEB1,
1258 MDIO_CL73_IEEEB1_AN_ADV2, 1258 MDIO_CL73_IEEEB1_AN_ADV2,
1259 reg_val | MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4); 1259 reg_val);
1260 1260
1261 }
1262 /* CL73 Autoneg Enabled */ 1261 /* CL73 Autoneg Enabled */
1263 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 1262 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
1264 1263
@@ -1351,6 +1350,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1351 1350
1352static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc) 1351static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1353{ 1352{
1353 struct bnx2x *bp = params->bp;
1354 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 1354 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1355 /* resolve pause mode and advertisement 1355 /* resolve pause mode and advertisement
1356 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1356 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
@@ -1380,18 +1380,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1380 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1380 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1381 break; 1381 break;
1382 } 1382 }
1383 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1383} 1384}
1384 1385
1385static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1386static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1386 u16 ieee_fc) 1387 u16 ieee_fc)
1387{ 1388{
1388 struct bnx2x *bp = params->bp; 1389 struct bnx2x *bp = params->bp;
1390 u16 val;
1389 /* for AN, we are always publishing full duplex */ 1391 /* for AN, we are always publishing full duplex */
1390 1392
1391 CL45_WR_OVER_CL22(bp, params->port, 1393 CL45_WR_OVER_CL22(bp, params->port,
1392 params->phy_addr, 1394 params->phy_addr,
1393 MDIO_REG_BANK_COMBO_IEEE0, 1395 MDIO_REG_BANK_COMBO_IEEE0,
1394 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 1396 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1397 CL45_RD_OVER_CL22(bp, params->port,
1398 params->phy_addr,
1399 MDIO_REG_BANK_CL73_IEEEB1,
1400 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1401 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1402 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1403 CL45_WR_OVER_CL22(bp, params->port,
1404 params->phy_addr,
1405 MDIO_REG_BANK_CL73_IEEEB1,
1406 MDIO_CL73_IEEEB1_AN_ADV1, val);
1395} 1407}
1396 1408
1397static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73) 1409static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
@@ -1609,6 +1621,39 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1609 return ret; 1621 return ret;
1610} 1622}
1611 1623
1624static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1625{
1626 struct bnx2x *bp = params->bp;
1627 u16 pd_10g, status2_1000x;
1628 CL45_RD_OVER_CL22(bp, params->port,
1629 params->phy_addr,
1630 MDIO_REG_BANK_SERDES_DIGITAL,
1631 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1632 &status2_1000x);
1633 CL45_RD_OVER_CL22(bp, params->port,
1634 params->phy_addr,
1635 MDIO_REG_BANK_SERDES_DIGITAL,
1636 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1637 &status2_1000x);
1638 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
1639 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
1640 params->port);
1641 return 1;
1642 }
1643
1644 CL45_RD_OVER_CL22(bp, params->port,
1645 params->phy_addr,
1646 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1647 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1648 &pd_10g);
1649
1650 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
1651 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
1652 params->port);
1653 return 1;
1654 }
1655 return 0;
1656}
1612 1657
1613static void bnx2x_flow_ctrl_resolve(struct link_params *params, 1658static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1614 struct link_vars *vars, 1659 struct link_vars *vars,
@@ -1627,21 +1672,53 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1627 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 1672 (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
1628 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1673 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1629 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) { 1674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1630 CL45_RD_OVER_CL22(bp, params->port, 1675 if (bnx2x_direct_parallel_detect_used(params)) {
1631 params->phy_addr, 1676 vars->flow_ctrl = params->req_fc_auto_adv;
1632 MDIO_REG_BANK_COMBO_IEEE0, 1677 return;
1633 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 1678 }
1634 &ld_pause); 1679 if ((gp_status &
1635 CL45_RD_OVER_CL22(bp, params->port, 1680 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1636 params->phy_addr, 1681 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
1637 MDIO_REG_BANK_COMBO_IEEE0, 1682 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1638 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 1683 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1639 &lp_pause); 1684
1640 pause_result = (ld_pause & 1685 CL45_RD_OVER_CL22(bp, params->port,
1686 params->phy_addr,
1687 MDIO_REG_BANK_CL73_IEEEB1,
1688 MDIO_CL73_IEEEB1_AN_ADV1,
1689 &ld_pause);
1690 CL45_RD_OVER_CL22(bp, params->port,
1691 params->phy_addr,
1692 MDIO_REG_BANK_CL73_IEEEB1,
1693 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1694 &lp_pause);
1695 pause_result = (ld_pause &
1696 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
1697 >> 8;
1698 pause_result |= (lp_pause &
1699 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
1700 >> 10;
1701 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1702 pause_result);
1703 } else {
1704
1705 CL45_RD_OVER_CL22(bp, params->port,
1706 params->phy_addr,
1707 MDIO_REG_BANK_COMBO_IEEE0,
1708 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1709 &ld_pause);
1710 CL45_RD_OVER_CL22(bp, params->port,
1711 params->phy_addr,
1712 MDIO_REG_BANK_COMBO_IEEE0,
1713 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1714 &lp_pause);
1715 pause_result = (ld_pause &
1641 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 1716 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1642 pause_result |= (lp_pause & 1717 pause_result |= (lp_pause &
1643 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 1718 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1644 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); 1719 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
1720 pause_result);
1721 }
1645 bnx2x_pause_resolve(vars, pause_result); 1722 bnx2x_pause_resolve(vars, pause_result);
1646 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 1723 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1647 (bnx2x_ext_phy_resolve_fc(params, vars))) { 1724 (bnx2x_ext_phy_resolve_fc(params, vars))) {
@@ -1853,6 +1930,8 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1853 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1930 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1855 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1856 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) { 1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1857 vars->autoneg = AUTO_NEG_ENABLED; 1936 vars->autoneg = AUTO_NEG_ENABLED;
1858 1937
@@ -1987,8 +2066,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
1987 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2066 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
1988 mode); 2067 mode);
1989 2068
1990 bnx2x_set_led(bp, params->port, LED_MODE_OPER, 2069 bnx2x_set_led(params, LED_MODE_OPER, line_speed);
1991 line_speed, params->hw_led_mode, params->chip_id);
1992 return 0; 2070 return 0;
1993} 2071}
1994 2072
@@ -2122,6 +2200,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
2122 MDIO_PMA_REG_CTRL, 2200 MDIO_PMA_REG_CTRL,
2123 1<<15); 2201 1<<15);
2124 break; 2202 break;
2203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2204 break;
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2126 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n"); 2206 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2127 break; 2207 break;
@@ -2512,16 +2592,11 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2512 /* Need to wait 100ms after reset */ 2592 /* Need to wait 100ms after reset */
2513 msleep(100); 2593 msleep(100);
2514 2594
2515 /* Set serial boot control for external load */
2516 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2517 MDIO_PMA_DEVAD,
2518 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2519
2520 /* Micro controller re-boot */ 2595 /* Micro controller re-boot */
2521 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2596 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2522 MDIO_PMA_DEVAD, 2597 MDIO_PMA_DEVAD,
2523 MDIO_PMA_REG_GEN_CTRL, 2598 MDIO_PMA_REG_GEN_CTRL,
2524 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 2599 0x018B);
2525 2600
2526 /* Set soft reset */ 2601 /* Set soft reset */
2527 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2602 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
@@ -2529,14 +2604,10 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2529 MDIO_PMA_REG_GEN_CTRL, 2604 MDIO_PMA_REG_GEN_CTRL,
2530 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 2605 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2531 2606
2532 /* Set PLL register value to be same like in P13 ver */
2533 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2607 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2534 MDIO_PMA_DEVAD, 2608 MDIO_PMA_DEVAD,
2535 MDIO_PMA_REG_PLL_CTRL, 2609 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2536 0x73A0);
2537 2610
2538 /* Clear soft reset.
2539 Will automatically reset micro-controller re-boot */
2540 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2611 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2541 MDIO_PMA_DEVAD, 2612 MDIO_PMA_DEVAD,
2542 MDIO_PMA_REG_GEN_CTRL, 2613 MDIO_PMA_REG_GEN_CTRL,
@@ -3462,8 +3533,8 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3462 MDIO_PMA_REG_8481_LINK_SIGNAL, 3533 MDIO_PMA_REG_8481_LINK_SIGNAL,
3463 &val1); 3534 &val1);
3464 /* Set bit 2 to 0, and bits [1:0] to 10 */ 3535 /* Set bit 2 to 0, and bits [1:0] to 10 */
3465 val1 &= ~((1<<0) | (1<<2)); /* Clear bits 0,2*/ 3536 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
3466 val1 |= (1<<1); /* Set bit 1 */ 3537 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
3467 3538
3468 bnx2x_cl45_write(bp, params->port, 3539 bnx2x_cl45_write(bp, params->port,
3469 ext_phy_type, 3540 ext_phy_type,
@@ -3497,36 +3568,19 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3497 MDIO_PMA_REG_8481_LED2_MASK, 3568 MDIO_PMA_REG_8481_LED2_MASK,
3498 0); 3569 0);
3499 3570
3500 /* LED3 (10G/1G/100/10G Activity) */ 3571 /* Unmask LED3 for 10G link */
3501 bnx2x_cl45_read(bp, params->port,
3502 ext_phy_type,
3503 ext_phy_addr,
3504 MDIO_PMA_DEVAD,
3505 MDIO_PMA_REG_8481_LINK_SIGNAL,
3506 &val1);
3507 /* Enable blink based on source 4(Activity) */
3508 val1 &= ~((1<<7) | (1<<8)); /* Clear bits 7,8 */
3509 val1 |= (1<<6); /* Set only bit 6 */
3510 bnx2x_cl45_write(bp, params->port, 3572 bnx2x_cl45_write(bp, params->port,
3511 ext_phy_type, 3573 ext_phy_type,
3512 ext_phy_addr, 3574 ext_phy_addr,
3513 MDIO_PMA_DEVAD, 3575 MDIO_PMA_DEVAD,
3514 MDIO_PMA_REG_8481_LINK_SIGNAL,
3515 val1);
3516
3517 bnx2x_cl45_read(bp, params->port,
3518 ext_phy_type,
3519 ext_phy_addr,
3520 MDIO_PMA_DEVAD,
3521 MDIO_PMA_REG_8481_LED3_MASK, 3576 MDIO_PMA_REG_8481_LED3_MASK,
3522 &val1); 3577 0x6);
3523 val1 |= (1<<4); /* Unmask LED3 for 10G link */
3524 bnx2x_cl45_write(bp, params->port, 3578 bnx2x_cl45_write(bp, params->port,
3525 ext_phy_type, 3579 ext_phy_type,
3526 ext_phy_addr, 3580 ext_phy_addr,
3527 MDIO_PMA_DEVAD, 3581 MDIO_PMA_DEVAD,
3528 MDIO_PMA_REG_8481_LED3_MASK, 3582 MDIO_PMA_REG_8481_LED3_BLINK,
3529 val1); 3583 0);
3530} 3584}
3531 3585
3532 3586
@@ -3544,7 +3598,10 @@ static void bnx2x_init_internal_phy(struct link_params *params,
3544 bnx2x_set_preemphasis(params); 3598 bnx2x_set_preemphasis(params);
3545 3599
3546 /* forced speed requested? */ 3600 /* forced speed requested? */
3547 if (vars->line_speed != SPEED_AUTO_NEG) { 3601 if (vars->line_speed != SPEED_AUTO_NEG ||
3602 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3604 params->loopback_mode == LOOPBACK_EXT)) {
3548 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 3605 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3549 3606
3550 /* disable autoneg */ 3607 /* disable autoneg */
@@ -3693,19 +3750,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3693 } 3750 }
3694 } 3751 }
3695 /* Force speed */ 3752 /* Force speed */
3696 /* First enable LASI */
3697 bnx2x_cl45_write(bp, params->port,
3698 ext_phy_type,
3699 ext_phy_addr,
3700 MDIO_PMA_DEVAD,
3701 MDIO_PMA_REG_RX_ALARM_CTRL,
3702 0x0400);
3703 bnx2x_cl45_write(bp, params->port,
3704 ext_phy_type,
3705 ext_phy_addr,
3706 MDIO_PMA_DEVAD,
3707 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3708
3709 if (params->req_line_speed == SPEED_10000) { 3753 if (params->req_line_speed == SPEED_10000) {
3710 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); 3754 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3711 3755
@@ -3715,6 +3759,9 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3715 MDIO_PMA_DEVAD, 3759 MDIO_PMA_DEVAD,
3716 MDIO_PMA_REG_DIGITAL_CTRL, 3760 MDIO_PMA_REG_DIGITAL_CTRL,
3717 0x400); 3761 0x400);
3762 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3763 ext_phy_addr, MDIO_PMA_DEVAD,
3764 MDIO_PMA_REG_LASI_CTRL, 1);
3718 } else { 3765 } else {
3719 /* Force 1Gbps using autoneg with 1G 3766 /* Force 1Gbps using autoneg with 1G
3720 advertisment */ 3767 advertisment */
@@ -3756,6 +3803,17 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3756 MDIO_AN_DEVAD, 3803 MDIO_AN_DEVAD,
3757 MDIO_AN_REG_CTRL, 3804 MDIO_AN_REG_CTRL,
3758 0x1200); 3805 0x1200);
3806 bnx2x_cl45_write(bp, params->port,
3807 ext_phy_type,
3808 ext_phy_addr,
3809 MDIO_PMA_DEVAD,
3810 MDIO_PMA_REG_RX_ALARM_CTRL,
3811 0x0400);
3812 bnx2x_cl45_write(bp, params->port,
3813 ext_phy_type,
3814 ext_phy_addr,
3815 MDIO_PMA_DEVAD,
3816 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3759 3817
3760 } 3818 }
3761 bnx2x_save_bcm_spirom_ver(bp, params->port, 3819 bnx2x_save_bcm_spirom_ver(bp, params->port,
@@ -4291,6 +4349,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4291 break; 4349 break;
4292 } 4350 }
4293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 4351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
4294 /* This phy uses the NIG latch mechanism since link 4353 /* This phy uses the NIG latch mechanism since link
4295 indication arrives through its LED4 and not via 4354 indication arrives through its LED4 and not via
4296 its LASI signal, so we get steady signal 4355 its LASI signal, so we get steady signal
@@ -4298,6 +4357,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4298 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 4357 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
4299 1 << NIG_LATCH_BC_ENABLE_MI_INT); 4358 1 << NIG_LATCH_BC_ENABLE_MI_INT);
4300 4359
4360 bnx2x_cl45_write(bp, params->port,
4361 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
4362 ext_phy_addr,
4363 MDIO_PMA_DEVAD,
4364 MDIO_PMA_REG_CTRL, 0x0000);
4365
4301 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr); 4366 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
4302 if (params->req_line_speed == SPEED_AUTO_NEG) { 4367 if (params->req_line_speed == SPEED_AUTO_NEG) {
4303 4368
@@ -4394,17 +4459,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4394 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 4459 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4395 DP(NETIF_MSG_LINK, "Advertising 10G\n"); 4460 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4396 /* Restart autoneg for 10G*/ 4461 /* Restart autoneg for 10G*/
4397 bnx2x_cl45_read(bp, params->port, 4462
4398 ext_phy_type,
4399 ext_phy_addr,
4400 MDIO_AN_DEVAD,
4401 MDIO_AN_REG_CTRL, &val);
4402 val |= 0x200;
4403 bnx2x_cl45_write(bp, params->port, 4463 bnx2x_cl45_write(bp, params->port,
4404 ext_phy_type, 4464 ext_phy_type,
4405 ext_phy_addr, 4465 ext_phy_addr,
4406 MDIO_AN_DEVAD, 4466 MDIO_AN_DEVAD,
4407 MDIO_AN_REG_CTRL, val); 4467 MDIO_AN_REG_CTRL, 0x3200);
4408 } 4468 }
4409 } else { 4469 } else {
4410 /* Force speed */ 4470 /* Force speed */
@@ -5148,6 +5208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
5148 } 5208 }
5149 break; 5209 break;
5150 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 5210 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5151 /* Check 10G-BaseT link status */ 5212 /* Check 10G-BaseT link status */
5152 /* Check PMD signal ok */ 5213 /* Check PMD signal ok */
5153 bnx2x_cl45_read(bp, params->port, ext_phy_type, 5214 bnx2x_cl45_read(bp, params->port, ext_phy_type,
@@ -5363,8 +5424,10 @@ static void bnx2x_link_int_ack(struct link_params *params,
5363 (NIG_STATUS_XGXS0_LINK10G | 5424 (NIG_STATUS_XGXS0_LINK10G |
5364 NIG_STATUS_XGXS0_LINK_STATUS | 5425 NIG_STATUS_XGXS0_LINK_STATUS |
5365 NIG_STATUS_SERDES0_LINK_STATUS)); 5426 NIG_STATUS_SERDES0_LINK_STATUS));
5366 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) 5427 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5367 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) { 5428 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5429 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5430 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5368 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int); 5431 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5369 } 5432 }
5370 if (vars->phy_link_up) { 5433 if (vars->phy_link_up) {
@@ -5477,6 +5540,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
5477 status = bnx2x_format_ver(spirom_ver, version, len); 5540 status = bnx2x_format_ver(spirom_ver, version, len);
5478 break; 5541 break;
5479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5543 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5480 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 | 5544 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5481 (spirom_ver & 0x7F); 5545 (spirom_ver & 0x7F);
5482 status = bnx2x_format_ver(spirom_ver, version, len); 5546 status = bnx2x_format_ver(spirom_ver, version, len);
@@ -5728,13 +5792,15 @@ u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5728} 5792}
5729 5793
5730 5794
5731u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 5795u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
5732 u16 hw_led_mode, u32 chip_id)
5733{ 5796{
5797 u8 port = params->port;
5798 u16 hw_led_mode = params->hw_led_mode;
5734 u8 rc = 0; 5799 u8 rc = 0;
5735 u32 tmp; 5800 u32 tmp;
5736 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 5801 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5737 5802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5803 struct bnx2x *bp = params->bp;
5738 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 5804 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5739 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 5805 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5740 speed, hw_led_mode); 5806 speed, hw_led_mode);
@@ -5749,7 +5815,14 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
5749 break; 5815 break;
5750 5816
5751 case LED_MODE_OPER: 5817 case LED_MODE_OPER:
5752 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); 5818 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5819 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5820 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5821 } else {
5822 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5823 hw_led_mode);
5824 }
5825
5753 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 5826 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
5754 port*4, 0); 5827 port*4, 0);
5755 /* Set blinking rate to ~15.9Hz */ 5828 /* Set blinking rate to ~15.9Hz */
@@ -5761,7 +5834,7 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
5761 EMAC_WR(bp, EMAC_REG_EMAC_LED, 5834 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5762 (tmp & (~EMAC_LED_OVERRIDE))); 5835 (tmp & (~EMAC_LED_OVERRIDE)));
5763 5836
5764 if (!CHIP_IS_E1H(bp) && 5837 if (CHIP_IS_E1(bp) &&
5765 ((speed == SPEED_2500) || 5838 ((speed == SPEED_2500) ||
5766 (speed == SPEED_1000) || 5839 (speed == SPEED_1000) ||
5767 (speed == SPEED_100) || 5840 (speed == SPEED_100) ||
@@ -5864,6 +5937,7 @@ static u8 bnx2x_link_initialize(struct link_params *params,
5864 5937
5865 if (non_ext_phy || 5938 if (non_ext_phy ||
5866 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 5939 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5940 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
5867 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) || 5941 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
5868 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 5942 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
5869 if (params->req_line_speed == SPEED_AUTO_NEG) 5943 if (params->req_line_speed == SPEED_AUTO_NEG)
@@ -6030,10 +6104,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6030 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6104 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6031 params->port*4, 0); 6105 params->port*4, 0);
6032 6106
6033 bnx2x_set_led(bp, params->port, LED_MODE_OPER, 6107 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
6034 vars->line_speed, params->hw_led_mode,
6035 params->chip_id);
6036
6037 } else 6108 } else
6038 /* No loopback */ 6109 /* No loopback */
6039 { 6110 {
@@ -6091,15 +6162,13 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6091{ 6162{
6092 struct bnx2x *bp = params->bp; 6163 struct bnx2x *bp = params->bp;
6093 u32 ext_phy_config = params->ext_phy_config; 6164 u32 ext_phy_config = params->ext_phy_config;
6094 u16 hw_led_mode = params->hw_led_mode;
6095 u32 chip_id = params->chip_id;
6096 u8 port = params->port; 6165 u8 port = params->port;
6097 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 6166 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6098 u32 val = REG_RD(bp, params->shmem_base + 6167 u32 val = REG_RD(bp, params->shmem_base +
6099 offsetof(struct shmem_region, dev_info. 6168 offsetof(struct shmem_region, dev_info.
6100 port_feature_config[params->port]. 6169 port_feature_config[params->port].
6101 config)); 6170 config));
6102 6171 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6103 /* disable attentions */ 6172 /* disable attentions */
6104 vars->link_status = 0; 6173 vars->link_status = 0;
6105 bnx2x_update_mng(params, vars->link_status); 6174 bnx2x_update_mng(params, vars->link_status);
@@ -6127,7 +6196,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6127 * Hold it as vars low 6196 * Hold it as vars low
6128 */ 6197 */
6129 /* clear link led */ 6198 /* clear link led */
6130 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, hw_led_mode, chip_id); 6199 bnx2x_set_led(params, LED_MODE_OFF, 0);
6131 if (reset_ext_phy) { 6200 if (reset_ext_phy) {
6132 switch (ext_phy_type) { 6201 switch (ext_phy_type) {
6133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 6202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
@@ -6163,6 +6232,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6163 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr); 6232 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6164 break; 6233 break;
6165 } 6234 }
6235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6236 {
6237 u8 ext_phy_addr =
6238 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6239 bnx2x_cl45_write(bp, port,
6240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6241 ext_phy_addr,
6242 MDIO_AN_DEVAD,
6243 MDIO_AN_REG_CTRL, 0x0000);
6244 bnx2x_cl45_write(bp, port,
6245 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6246 ext_phy_addr,
6247 MDIO_PMA_DEVAD,
6248 MDIO_PMA_REG_CTRL, 1);
6249 break;
6250 }
6166 default: 6251 default:
6167 /* HW reset */ 6252 /* HW reset */
6168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6253 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
@@ -6198,9 +6283,7 @@ static u8 bnx2x_update_link_down(struct link_params *params,
6198 u8 port = params->port; 6283 u8 port = params->port;
6199 6284
6200 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 6285 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6201 bnx2x_set_led(bp, port, LED_MODE_OFF, 6286 bnx2x_set_led(params, LED_MODE_OFF, 0);
6202 0, params->hw_led_mode,
6203 params->chip_id);
6204 6287
6205 /* indicate no mac active */ 6288 /* indicate no mac active */
6206 vars->mac_type = MAC_TYPE_NONE; 6289 vars->mac_type = MAC_TYPE_NONE;
@@ -6237,15 +6320,13 @@ static u8 bnx2x_update_link_up(struct link_params *params,
6237 vars->link_status |= LINK_STATUS_LINK_UP; 6320 vars->link_status |= LINK_STATUS_LINK_UP;
6238 if (link_10g) { 6321 if (link_10g) {
6239 bnx2x_bmac_enable(params, vars, 0); 6322 bnx2x_bmac_enable(params, vars, 0);
6240 bnx2x_set_led(bp, port, LED_MODE_OPER, 6323 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6241 SPEED_10000, params->hw_led_mode,
6242 params->chip_id);
6243
6244 } else { 6324 } else {
6245 bnx2x_emac_enable(params, vars, 0);
6246 rc = bnx2x_emac_program(params, vars->line_speed, 6325 rc = bnx2x_emac_program(params, vars->line_speed,
6247 vars->duplex); 6326 vars->duplex);
6248 6327
6328 bnx2x_emac_enable(params, vars, 0);
6329
6249 /* AN complete? */ 6330 /* AN complete? */
6250 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 6331 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6251 if (!(vars->phy_flags & 6332 if (!(vars->phy_flags &
@@ -6343,6 +6424,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6343 6424
6344 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && 6425 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6345 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) && 6426 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6427 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6346 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) && 6428 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6347 (ext_phy_link_up && !vars->phy_link_up)) 6429 (ext_phy_link_up && !vars->phy_link_up))
6348 bnx2x_init_internal_phy(params, vars, 0); 6430 bnx2x_init_internal_phy(params, vars, 0);
@@ -6578,6 +6660,13 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6578 return 0; 6660 return 0;
6579} 6661}
6580 6662
6663
6664static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6665{
6666 /* HW reset */
6667 bnx2x_ext_phy_hw_reset(bp, 1);
6668 return 0;
6669}
6581u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base) 6670u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6582{ 6671{
6583 u8 rc = 0; 6672 u8 rc = 0;
@@ -6607,7 +6696,9 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6607 /* GPIO1 affects both ports, so there's need to pull 6696 /* GPIO1 affects both ports, so there's need to pull
6608 it for single port alone */ 6697 it for single port alone */
6609 rc = bnx2x_8726_common_init_phy(bp, shmem_base); 6698 rc = bnx2x_8726_common_init_phy(bp, shmem_base);
6610 6699 break;
6700 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6701 rc = bnx2x_84823_common_init_phy(bp, shmem_base);
6611 break; 6702 break;
6612 default: 6703 default:
6613 DP(NETIF_MSG_LINK, 6704 DP(NETIF_MSG_LINK,
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index f3e252264e1b..40c2981de8ed 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -178,8 +178,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
178 Basically, the CLC takes care of the led for the link, but in case one needs 178 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to 179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/ 180 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
182 u16 hw_led_mode, u32 chip_id);
183#define LED_MODE_OFF 0 182#define LED_MODE_OFF 0
184#define LED_MODE_OPER 2 183#define LED_MODE_OPER 2
185 184
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 20f0ed956df2..77ba13520d87 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -49,6 +49,7 @@
49#include <linux/prefetch.h> 49#include <linux/prefetch.h>
50#include <linux/zlib.h> 50#include <linux/zlib.h>
51#include <linux/io.h> 51#include <linux/io.h>
52#include <linux/stringify.h>
52 53
53 54
54#include "bnx2x.h" 55#include "bnx2x.h"
@@ -56,15 +57,20 @@
56#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
57#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
58 59
59#define DRV_MODULE_VERSION "1.52.1" 60#define DRV_MODULE_VERSION "1.52.1-5"
60#define DRV_MODULE_RELDATE "2009/08/12" 61#define DRV_MODULE_RELDATE "2009/11/09"
61#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
62 63
63#include <linux/firmware.h> 64#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h" 65#include "bnx2x_fw_file_hdr.h"
65/* FW files */ 66/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-" 67#define FW_FILE_VERSION \
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-" 68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
68 74
69/* Time in jiffies before concluding the transmitter is hung */ 75/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ) 76#define TX_TIMEOUT (5*HZ)
@@ -77,21 +83,18 @@ MODULE_AUTHOR("Eliezer Tamir");
77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION); 85MODULE_VERSION(DRV_MODULE_VERSION);
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
80 88
81static int multi_mode = 1; 89static int multi_mode = 1;
82module_param(multi_mode, int, 0); 90module_param(multi_mode, int, 0);
83MODULE_PARM_DESC(multi_mode, " Multi queue mode " 91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))"); 92 "(0 Disable; 1 Enable (default))");
85 93
86static int num_rx_queues; 94static int num_queues;
87module_param(num_rx_queues, int, 0); 95module_param(num_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1" 96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
89 " (default is half number of CPUs)"); 97 " (default is as a number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
95 98
96static int disable_tpa; 99static int disable_tpa;
97module_param(disable_tpa, int, 0); 100module_param(disable_tpa, int, 0);
@@ -550,7 +553,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551 554
552 /* Rx */ 555 /* Rx */
553 for_each_rx_queue(bp, i) { 556 for_each_queue(bp, i) {
554 struct bnx2x_fastpath *fp = &bp->fp[i]; 557 struct bnx2x_fastpath *fp = &bp->fp[i];
555 558
556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
@@ -567,7 +570,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
567 } 570 }
568 571
569 /* Tx */ 572 /* Tx */
570 for_each_tx_queue(bp, i) { 573 for_each_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i]; 574 struct bnx2x_fastpath *fp = &bp->fp[i];
572 575
573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
@@ -582,7 +585,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
582 585
583 /* Rings */ 586 /* Rings */
584 /* Rx */ 587 /* Rx */
585 for_each_rx_queue(bp, i) { 588 for_each_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i]; 589 struct bnx2x_fastpath *fp = &bp->fp[i];
587 590
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 591 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -616,7 +619,7 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
616 } 619 }
617 620
618 /* Tx */ 621 /* Tx */
619 for_each_tx_queue(bp, i) { 622 for_each_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i]; 623 struct bnx2x_fastpath *fp = &bp->fp[i];
621 624
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 625 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -742,6 +745,9 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
742 if (msix) { 745 if (msix) {
743 synchronize_irq(bp->msix_table[0].vector); 746 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1; 747 offset = 1;
748#ifdef BCM_CNIC
749 offset++;
750#endif
745 for_each_queue(bp, i) 751 for_each_queue(bp, i)
746 synchronize_irq(bp->msix_table[i + offset].vector); 752 synchronize_irq(bp->msix_table[i + offset].vector);
747 } else 753 } else
@@ -781,21 +787,13 @@ static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
781 barrier(); 787 barrier();
782} 788}
783 789
784static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) 790static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
785{ 791{
786 struct host_status_block *fpsb = fp->status_blk; 792 struct host_status_block *fpsb = fp->status_blk;
787 u16 rc = 0;
788 793
789 barrier(); /* status block is written to by the chip */ 794 barrier(); /* status block is written to by the chip */
790 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) { 795 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
791 fp->fp_c_idx = fpsb->c_status_block.status_block_index; 796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
792 rc |= 1;
793 }
794 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
795 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
796 rc |= 2;
797 }
798 return rc;
799} 797}
800 798
801static u16 bnx2x_ack_int(struct bnx2x *bp) 799static u16 bnx2x_ack_int(struct bnx2x *bp)
@@ -835,6 +833,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
835 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; 833 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
836 int nbd; 834 int nbd;
837 835
836 /* prefetch skb end pointer to speedup dev_kfree_skb() */
837 prefetch(&skb->end);
838
838 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n", 839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
839 idx, tx_buf, skb); 840 idx, tx_buf, skb);
840 841
@@ -879,7 +880,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
879 880
880 /* release skb */ 881 /* release skb */
881 WARN_ON(!skb); 882 WARN_ON(!skb);
882 dev_kfree_skb_any(skb); 883 dev_kfree_skb(skb);
883 tx_buf->first_bd = 0; 884 tx_buf->first_bd = 0;
884 tx_buf->skb = NULL; 885 tx_buf->skb = NULL;
885 886
@@ -909,19 +910,28 @@ static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
909 return (s16)(fp->bp->tx_ring_size) - used; 910 return (s16)(fp->bp->tx_ring_size) - used;
910} 911}
911 912
912static void bnx2x_tx_int(struct bnx2x_fastpath *fp) 913static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
914{
915 u16 hw_cons;
916
917 /* Tell compiler that status block fields can change */
918 barrier();
919 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
920 return hw_cons != fp->tx_pkt_cons;
921}
922
923static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
913{ 924{
914 struct bnx2x *bp = fp->bp; 925 struct bnx2x *bp = fp->bp;
915 struct netdev_queue *txq; 926 struct netdev_queue *txq;
916 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons; 927 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
917 int done = 0;
918 928
919#ifdef BNX2X_STOP_ON_ERROR 929#ifdef BNX2X_STOP_ON_ERROR
920 if (unlikely(bp->panic)) 930 if (unlikely(bp->panic))
921 return; 931 return -1;
922#endif 932#endif
923 933
924 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues); 934 txq = netdev_get_tx_queue(bp->dev, fp->index);
925 hw_cons = le16_to_cpu(*fp->tx_cons_sb); 935 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
926 sw_cons = fp->tx_pkt_cons; 936 sw_cons = fp->tx_pkt_cons;
927 937
@@ -942,7 +952,6 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
942*/ 952*/
943 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons); 953 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
944 sw_cons++; 954 sw_cons++;
945 done++;
946 } 955 }
947 956
948 fp->tx_pkt_cons = sw_cons; 957 fp->tx_pkt_cons = sw_cons;
@@ -964,8 +973,12 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
964 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)) 973 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
965 netif_tx_wake_queue(txq); 974 netif_tx_wake_queue(txq);
966 } 975 }
976 return 0;
967} 977}
968 978
979#ifdef BCM_CNIC
980static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
981#endif
969 982
970static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 983static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe) 984 union eth_rx_cqe *rr_cqe)
@@ -1022,16 +1035,24 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 1035 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1023 break; 1036 break;
1024 1037
1038#ifdef BCM_CNIC
1039 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1040 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1041 bnx2x_cnic_cfc_comp(bp, cid);
1042 break;
1043#endif
1025 1044
1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 1045 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 1047 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1029 bp->set_mac_pending = 0; 1048 bp->set_mac_pending--;
1049 smp_wmb();
1030 break; 1050 break;
1031 1051
1032 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): 1052 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1034 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 1053 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1054 bp->set_mac_pending--;
1055 smp_wmb();
1035 break; 1056 break;
1036 1057
1037 default: 1058 default:
@@ -1539,6 +1560,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1539 } else { 1560 } else {
1540 rx_buf = &fp->rx_buf_ring[bd_cons]; 1561 rx_buf = &fp->rx_buf_ring[bd_cons];
1541 skb = rx_buf->skb; 1562 skb = rx_buf->skb;
1563 prefetch(skb);
1564 prefetch((u8 *)skb + 256);
1542 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1565 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1543 pad = cqe->fast_path_cqe.placement_offset; 1566 pad = cqe->fast_path_cqe.placement_offset;
1544 1567
@@ -1720,27 +1743,13 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1720 if (unlikely(bp->panic)) 1743 if (unlikely(bp->panic))
1721 return IRQ_HANDLED; 1744 return IRQ_HANDLED;
1722#endif 1745#endif
1723 /* Handle Rx or Tx according to MSI-X vector */
1724 if (fp->is_rx_queue) {
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
1727 1746
1728 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1747 /* Handle Rx and Tx according to MSI-X vector */
1729 1748 prefetch(fp->rx_cons_sb);
1730 } else { 1749 prefetch(fp->tx_cons_sb);
1731 prefetch(fp->tx_cons_sb); 1750 prefetch(&fp->status_blk->u_status_block.status_block_index);
1732 prefetch(&fp->status_blk->c_status_block.status_block_index); 1751 prefetch(&fp->status_blk->c_status_block.status_block_index);
1733 1752 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1734 bnx2x_update_fpsb_idx(fp);
1735 rmb();
1736 bnx2x_tx_int(fp);
1737
1738 /* Re-enable interrupts */
1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1740 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1743 }
1744 1753
1745 return IRQ_HANDLED; 1754 return IRQ_HANDLED;
1746} 1755}
@@ -1775,35 +1784,32 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1775 1784
1776 mask = 0x2 << fp->sb_id; 1785 mask = 0x2 << fp->sb_id;
1777 if (status & mask) { 1786 if (status & mask) {
1778 /* Handle Rx or Tx according to SB id */ 1787 /* Handle Rx and Tx according to SB id */
1779 if (fp->is_rx_queue) { 1788 prefetch(fp->rx_cons_sb);
1780 prefetch(fp->rx_cons_sb); 1789 prefetch(&fp->status_blk->u_status_block.
1781 prefetch(&fp->status_blk->u_status_block. 1790 status_block_index);
1782 status_block_index); 1791 prefetch(fp->tx_cons_sb);
1783 1792 prefetch(&fp->status_blk->c_status_block.
1784 napi_schedule(&bnx2x_fp(bp, fp->index, napi)); 1793 status_block_index);
1785 1794 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1786 } else {
1787 prefetch(fp->tx_cons_sb);
1788 prefetch(&fp->status_blk->c_status_block.
1789 status_block_index);
1790
1791 bnx2x_update_fpsb_idx(fp);
1792 rmb();
1793 bnx2x_tx_int(fp);
1794
1795 /* Re-enable interrupts */
1796 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797 le16_to_cpu(fp->fp_u_idx),
1798 IGU_INT_NOP, 1);
1799 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800 le16_to_cpu(fp->fp_c_idx),
1801 IGU_INT_ENABLE, 1);
1802 }
1803 status &= ~mask; 1795 status &= ~mask;
1804 } 1796 }
1805 } 1797 }
1806 1798
1799#ifdef BCM_CNIC
1800 mask = 0x2 << CNIC_SB_ID(bp);
1801 if (status & (mask | 0x1)) {
1802 struct cnic_ops *c_ops = NULL;
1803
1804 rcu_read_lock();
1805 c_ops = rcu_dereference(bp->cnic_ops);
1806 if (c_ops)
1807 c_ops->cnic_handler(bp->cnic_data, NULL);
1808 rcu_read_unlock();
1809
1810 status &= ~mask;
1811 }
1812#endif
1807 1813
1808 if (unlikely(status & 0x1)) { 1814 if (unlikely(status & 0x1)) {
1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1815 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -2128,18 +2134,30 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2128 2134
2129static void bnx2x_link_report(struct bnx2x *bp) 2135static void bnx2x_link_report(struct bnx2x *bp)
2130{ 2136{
2131 if (bp->state == BNX2X_STATE_DISABLED) { 2137 if (bp->flags & MF_FUNC_DIS) {
2132 netif_carrier_off(bp->dev); 2138 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 2139 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2134 return; 2140 return;
2135 } 2141 }
2136 2142
2137 if (bp->link_vars.link_up) { 2143 if (bp->link_vars.link_up) {
2144 u16 line_speed;
2145
2138 if (bp->state == BNX2X_STATE_OPEN) 2146 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev); 2147 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); 2148 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2141 2149
2142 printk("%d Mbps ", bp->link_vars.line_speed); 2150 line_speed = bp->link_vars.line_speed;
2151 if (IS_E1HMF(bp)) {
2152 u16 vn_max_rate;
2153
2154 vn_max_rate =
2155 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2156 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2157 if (vn_max_rate < line_speed)
2158 line_speed = vn_max_rate;
2159 }
2160 printk("%d Mbps ", line_speed);
2143 2161
2144 if (bp->link_vars.duplex == DUPLEX_FULL) 2162 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex"); 2163 printk("full duplex");
@@ -2304,8 +2322,14 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2304 } 2322 }
2305 2323
2306 /* ... only if all min rates are zeros - disable fairness */ 2324 /* ... only if all min rates are zeros - disable fairness */
2307 if (all_zero) 2325 if (all_zero) {
2308 bp->vn_weight_sum = 0; 2326 bp->cmng.flags.cmng_enables &=
2327 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2328 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2329 " fairness will be disabled\n");
2330 } else
2331 bp->cmng.flags.cmng_enables |=
2332 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2309} 2333}
2310 2334
2311static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 2335static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
@@ -2324,17 +2348,14 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2324 } else { 2348 } else {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2349 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2350 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2327 /* If fairness is enabled (not all min rates are zeroes) and 2351 /* If min rate is zero - set it to 1 */
2328 if current min rate is zero - set it to 1. 2352 if (!vn_min_rate)
2329 This is a requirement of the algorithm. */
2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
2331 vn_min_rate = DEF_MIN_RATE; 2353 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2354 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 2355 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334 } 2356 }
2335
2336 DP(NETIF_MSG_IFUP, 2357 DP(NETIF_MSG_IFUP,
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n", 2358 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 2359 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2339 2360
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); 2361 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
@@ -2405,8 +2426,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2405 memset(&(pstats->mac_stx[0]), 0, 2426 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx)); 2427 sizeof(struct mac_stx));
2407 } 2428 }
2408 if ((bp->state == BNX2X_STATE_OPEN) || 2429 if (bp->state == BNX2X_STATE_OPEN)
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2430 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411 } 2431 }
2412 2432
@@ -2449,9 +2469,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2449 2469
2450static void bnx2x__link_status_update(struct bnx2x *bp) 2470static void bnx2x__link_status_update(struct bnx2x *bp)
2451{ 2471{
2452 int func = BP_FUNC(bp); 2472 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2453
2454 if (bp->state != BNX2X_STATE_OPEN)
2455 return; 2473 return;
2456 2474
2457 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2475 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -2461,7 +2479,6 @@ static void bnx2x__link_status_update(struct bnx2x *bp)
2461 else 2479 else
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2480 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463 2481
2464 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465 bnx2x_calc_vn_weight_sum(bp); 2482 bnx2x_calc_vn_weight_sum(bp);
2466 2483
2467 /* indicate link status */ 2484 /* indicate link status */
@@ -2501,6 +2518,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2501 u32 cnt = 1; 2518 u32 cnt = 1;
2502 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2519 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2503 2520
2521 mutex_lock(&bp->fw_mb_mutex);
2504 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2522 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2523 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2506 2524
@@ -2510,8 +2528,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2510 2528
2511 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2529 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2512 2530
2513 /* Give the FW up to 2 second (200*10ms) */ 2531 /* Give the FW up to 5 second (500*10ms) */
2514 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); 2532 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2515 2533
2516 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2534 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517 cnt*delay, rc, seq); 2535 cnt*delay, rc, seq);
@@ -2525,32 +2543,23 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2525 bnx2x_fw_dump(bp); 2543 bnx2x_fw_dump(bp);
2526 rc = 0; 2544 rc = 0;
2527 } 2545 }
2546 mutex_unlock(&bp->fw_mb_mutex);
2528 2547
2529 return rc; 2548 return rc;
2530} 2549}
2531 2550
2532static void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 2551static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set); 2552static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2534static void bnx2x_set_rx_mode(struct net_device *dev); 2553static void bnx2x_set_rx_mode(struct net_device *dev);
2535 2554
2536static void bnx2x_e1h_disable(struct bnx2x *bp) 2555static void bnx2x_e1h_disable(struct bnx2x *bp)
2537{ 2556{
2538 int port = BP_PORT(bp); 2557 int port = BP_PORT(bp);
2539 int i;
2540
2541 bp->rx_mode = BNX2X_RX_MODE_NONE;
2542 bnx2x_set_storm_rx_mode(bp);
2543 2558
2544 netif_tx_disable(bp->dev); 2559 netif_tx_disable(bp->dev);
2545 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2546 2560
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2548 2562
2549 bnx2x_set_mac_addr_e1h(bp, 0);
2550
2551 for (i = 0; i < MC_HASH_SIZE; i++)
2552 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2553
2554 netif_carrier_off(bp->dev); 2563 netif_carrier_off(bp->dev);
2555} 2564}
2556 2565
@@ -2560,13 +2569,13 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
2560 2569
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 2570 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2562 2571
2563 bnx2x_set_mac_addr_e1h(bp, 1);
2564
2565 /* Tx queue should be only reenabled */ 2572 /* Tx queue should be only reenabled */
2566 netif_tx_wake_all_queues(bp->dev); 2573 netif_tx_wake_all_queues(bp->dev);
2567 2574
2568 /* Initialize the receive filter. */ 2575 /*
2569 bnx2x_set_rx_mode(bp->dev); 2576 * Should not call netif_carrier_on since it will be called if the link
2577 * is up when checking for link state
2578 */
2570} 2579}
2571 2580
2572static void bnx2x_update_min_max(struct bnx2x *bp) 2581static void bnx2x_update_min_max(struct bnx2x *bp)
@@ -2605,21 +2614,23 @@ static void bnx2x_update_min_max(struct bnx2x *bp)
2605 2614
2606static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2615static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2607{ 2616{
2608 int func = BP_FUNC(bp);
2609
2610 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2617 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2612 2618
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 2619 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614 2620
2621 /*
2622 * This is the only place besides the function initialization
2623 * where the bp->flags can change so it is done without any
2624 * locks
2625 */
2615 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2626 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2627 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617 bp->state = BNX2X_STATE_DISABLED; 2628 bp->flags |= MF_FUNC_DIS;
2618 2629
2619 bnx2x_e1h_disable(bp); 2630 bnx2x_e1h_disable(bp);
2620 } else { 2631 } else {
2621 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2632 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622 bp->state = BNX2X_STATE_OPEN; 2633 bp->flags &= ~MF_FUNC_DIS;
2623 2634
2624 bnx2x_e1h_enable(bp); 2635 bnx2x_e1h_enable(bp);
2625 } 2636 }
@@ -2638,11 +2649,40 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); 2649 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2639} 2650}
2640 2651
2652/* must be called under the spq lock */
2653static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2654{
2655 struct eth_spe *next_spe = bp->spq_prod_bd;
2656
2657 if (bp->spq_prod_bd == bp->spq_last_bd) {
2658 bp->spq_prod_bd = bp->spq;
2659 bp->spq_prod_idx = 0;
2660 DP(NETIF_MSG_TIMER, "end of spq\n");
2661 } else {
2662 bp->spq_prod_bd++;
2663 bp->spq_prod_idx++;
2664 }
2665 return next_spe;
2666}
2667
2668/* must be called under the spq lock */
2669static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2670{
2671 int func = BP_FUNC(bp);
2672
2673 /* Make sure that BD data is updated before writing the producer */
2674 wmb();
2675
2676 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2677 bp->spq_prod_idx);
2678 mmiowb();
2679}
2680
2641/* the slow path queue is odd since completions arrive on the fastpath ring */ 2681/* the slow path queue is odd since completions arrive on the fastpath ring */
2642static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2682static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2643 u32 data_hi, u32 data_lo, int common) 2683 u32 data_hi, u32 data_lo, int common)
2644{ 2684{
2645 int func = BP_FUNC(bp); 2685 struct eth_spe *spe;
2646 2686
2647 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2648 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", 2688 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
@@ -2664,38 +2704,23 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2664 return -EBUSY; 2704 return -EBUSY;
2665 } 2705 }
2666 2706
2707 spe = bnx2x_sp_get_next(bp);
2708
2667 /* CID needs port number to be encoded int it */ 2709 /* CID needs port number to be encoded int it */
2668 bp->spq_prod_bd->hdr.conn_and_cmd_data = 2710 spe->hdr.conn_and_cmd_data =
2669 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2711 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2670 HW_CID(bp, cid))); 2712 HW_CID(bp, cid)));
2671 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2713 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2672 if (common) 2714 if (common)
2673 bp->spq_prod_bd->hdr.type |= 2715 spe->hdr.type |=
2674 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); 2716 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2675 2717
2676 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi); 2718 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2677 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo); 2719 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2678 2720
2679 bp->spq_left--; 2721 bp->spq_left--;
2680 2722
2681 if (bp->spq_prod_bd == bp->spq_last_bd) { 2723 bnx2x_sp_prod_update(bp);
2682 bp->spq_prod_bd = bp->spq;
2683 bp->spq_prod_idx = 0;
2684 DP(NETIF_MSG_TIMER, "end of spq\n");
2685
2686 } else {
2687 bp->spq_prod_bd++;
2688 bp->spq_prod_idx++;
2689 }
2690
2691 /* Make sure that BD data is updated before writing the producer */
2692 wmb();
2693
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2695 bp->spq_prod_idx);
2696
2697 mmiowb();
2698
2699 spin_unlock_bh(&bp->spq_lock); 2724 spin_unlock_bh(&bp->spq_lock);
2700 return 0; 2725 return 0;
2701} 2726}
@@ -3024,6 +3049,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3024 int func = BP_FUNC(bp); 3049 int func = BP_FUNC(bp);
3025 3050
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3052 bp->mf_config = SHMEM_RD(bp,
3053 mf_cfg.func_mf_config[func].config);
3027 val = SHMEM_RD(bp, func_mb[func].drv_status); 3054 val = SHMEM_RD(bp, func_mb[func].drv_status);
3028 if (val & DRV_STATUS_DCC_EVENT_MASK) 3055 if (val & DRV_STATUS_DCC_EVENT_MASK)
3029 bnx2x_dcc_event(bp, 3056 bnx2x_dcc_event(bp,
@@ -3227,6 +3254,17 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3227 return IRQ_HANDLED; 3254 return IRQ_HANDLED;
3228#endif 3255#endif
3229 3256
3257#ifdef BCM_CNIC
3258 {
3259 struct cnic_ops *c_ops;
3260
3261 rcu_read_lock();
3262 c_ops = rcu_dereference(bp->cnic_ops);
3263 if (c_ops)
3264 c_ops->cnic_handler(bp->cnic_data, NULL);
3265 rcu_read_unlock();
3266 }
3267#endif
3230 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 3268 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3231 3269
3232 return IRQ_HANDLED; 3270 return IRQ_HANDLED;
@@ -3958,7 +3996,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
3958 estats->no_buff_discard_hi = 0; 3996 estats->no_buff_discard_hi = 0;
3959 estats->no_buff_discard_lo = 0; 3997 estats->no_buff_discard_lo = 0;
3960 3998
3961 for_each_rx_queue(bp, i) { 3999 for_each_queue(bp, i) {
3962 struct bnx2x_fastpath *fp = &bp->fp[i]; 4000 struct bnx2x_fastpath *fp = &bp->fp[i];
3963 int cl_id = fp->cl_id; 4001 int cl_id = fp->cl_id;
3964 struct tstorm_per_client_stats *tclient = 4002 struct tstorm_per_client_stats *tclient =
@@ -4175,7 +4213,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
4175 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 4213 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4176 4214
4177 nstats->rx_dropped = estats->mac_discard; 4215 nstats->rx_dropped = estats->mac_discard;
4178 for_each_rx_queue(bp, i) 4216 for_each_queue(bp, i)
4179 nstats->rx_dropped += 4217 nstats->rx_dropped +=
4180 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 4218 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4181 4219
@@ -4229,7 +4267,7 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
4229 estats->rx_err_discard_pkt = 0; 4267 estats->rx_err_discard_pkt = 0;
4230 estats->rx_skb_alloc_failed = 0; 4268 estats->rx_skb_alloc_failed = 0;
4231 estats->hw_csum_err = 0; 4269 estats->hw_csum_err = 0;
4232 for_each_rx_queue(bp, i) { 4270 for_each_queue(bp, i) {
4233 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; 4271 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4234 4272
4235 estats->driver_xoff += qstats->driver_xoff; 4273 estats->driver_xoff += qstats->driver_xoff;
@@ -4260,7 +4298,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4260 4298
4261 if (bp->msglevel & NETIF_MSG_TIMER) { 4299 if (bp->msglevel & NETIF_MSG_TIMER) {
4262 struct bnx2x_fastpath *fp0_rx = bp->fp; 4300 struct bnx2x_fastpath *fp0_rx = bp->fp;
4263 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]); 4301 struct bnx2x_fastpath *fp0_tx = bp->fp;
4264 struct tstorm_per_client_stats *old_tclient = 4302 struct tstorm_per_client_stats *old_tclient =
4265 &bp->fp->old_tclient; 4303 &bp->fp->old_tclient;
4266 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats; 4304 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
@@ -4640,8 +4678,7 @@ static void bnx2x_timer(unsigned long data)
4640 } 4678 }
4641 } 4679 }
4642 4680
4643 if ((bp->state == BNX2X_STATE_OPEN) || 4681 if (bp->state == BNX2X_STATE_OPEN)
4644 (bp->state == BNX2X_STATE_DISABLED))
4645 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 4682 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4646 4683
4647timer_restart: 4684timer_restart:
@@ -4860,21 +4897,21 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4860 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4897 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4861 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, 4898 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4862 U_SB_ETH_RX_CQ_INDEX), 4899 U_SB_ETH_RX_CQ_INDEX),
4863 bp->rx_ticks/12); 4900 bp->rx_ticks/(4 * BNX2X_BTR));
4864 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4901 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4865 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, 4902 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4866 U_SB_ETH_RX_CQ_INDEX), 4903 U_SB_ETH_RX_CQ_INDEX),
4867 (bp->rx_ticks/12) ? 0 : 1); 4904 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4868 4905
4869 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4906 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4870 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4907 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4871 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 4908 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4872 C_SB_ETH_TX_CQ_INDEX), 4909 C_SB_ETH_TX_CQ_INDEX),
4873 bp->tx_ticks/12); 4910 bp->tx_ticks/(4 * BNX2X_BTR));
4874 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4911 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4875 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 4912 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4876 C_SB_ETH_TX_CQ_INDEX), 4913 C_SB_ETH_TX_CQ_INDEX),
4877 (bp->tx_ticks/12) ? 0 : 1); 4914 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4878 } 4915 }
4879} 4916}
4880 4917
@@ -4916,7 +4953,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4916 4953
4917 if (bp->flags & TPA_ENABLE_FLAG) { 4954 if (bp->flags & TPA_ENABLE_FLAG) {
4918 4955
4919 for_each_rx_queue(bp, j) { 4956 for_each_queue(bp, j) {
4920 struct bnx2x_fastpath *fp = &bp->fp[j]; 4957 struct bnx2x_fastpath *fp = &bp->fp[j];
4921 4958
4922 for (i = 0; i < max_agg_queues; i++) { 4959 for (i = 0; i < max_agg_queues; i++) {
@@ -4939,16 +4976,13 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4939 } 4976 }
4940 } 4977 }
4941 4978
4942 for_each_rx_queue(bp, j) { 4979 for_each_queue(bp, j) {
4943 struct bnx2x_fastpath *fp = &bp->fp[j]; 4980 struct bnx2x_fastpath *fp = &bp->fp[j];
4944 4981
4945 fp->rx_bd_cons = 0; 4982 fp->rx_bd_cons = 0;
4946 fp->rx_cons_sb = BNX2X_RX_SB_INDEX; 4983 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4947 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX; 4984 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4948 4985
4949 /* Mark queue as Rx */
4950 fp->is_rx_queue = 1;
4951
4952 /* "next page" elements initialization */ 4986 /* "next page" elements initialization */
4953 /* SGE ring */ 4987 /* SGE ring */
4954 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) { 4988 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
@@ -5054,7 +5088,7 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5054{ 5088{
5055 int i, j; 5089 int i, j;
5056 5090
5057 for_each_tx_queue(bp, j) { 5091 for_each_queue(bp, j) {
5058 struct bnx2x_fastpath *fp = &bp->fp[j]; 5092 struct bnx2x_fastpath *fp = &bp->fp[j];
5059 5093
5060 for (i = 1; i <= NUM_TX_RINGS; i++) { 5094 for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -5080,10 +5114,6 @@ static void bnx2x_init_tx_ring(struct bnx2x *bp)
5080 fp->tx_cons_sb = BNX2X_TX_SB_INDEX; 5114 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5081 fp->tx_pkt = 0; 5115 fp->tx_pkt = 0;
5082 } 5116 }
5083
5084 /* clean tx statistics */
5085 for_each_rx_queue(bp, i)
5086 bnx2x_fp(bp, i, tx_pkt) = 0;
5087} 5117}
5088 5118
5089static void bnx2x_init_sp_ring(struct bnx2x *bp) 5119static void bnx2x_init_sp_ring(struct bnx2x *bp)
@@ -5112,7 +5142,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5112{ 5142{
5113 int i; 5143 int i;
5114 5144
5115 for_each_rx_queue(bp, i) { 5145 /* Rx */
5146 for_each_queue(bp, i) {
5116 struct eth_context *context = bnx2x_sp(bp, context[i].eth); 5147 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5117 struct bnx2x_fastpath *fp = &bp->fp[i]; 5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5118 u8 cl_id = fp->cl_id; 5149 u8 cl_id = fp->cl_id;
@@ -5164,10 +5195,11 @@ static void bnx2x_init_context(struct bnx2x *bp)
5164 ETH_CONNECTION_TYPE); 5195 ETH_CONNECTION_TYPE);
5165 } 5196 }
5166 5197
5167 for_each_tx_queue(bp, i) { 5198 /* Tx */
5199 for_each_queue(bp, i) {
5168 struct bnx2x_fastpath *fp = &bp->fp[i]; 5200 struct bnx2x_fastpath *fp = &bp->fp[i];
5169 struct eth_context *context = 5201 struct eth_context *context =
5170 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth); 5202 bnx2x_sp(bp, context[i].eth);
5171 5203
5172 context->cstorm_st_context.sb_index_number = 5204 context->cstorm_st_context.sb_index_number =
5173 C_SB_ETH_TX_CQ_INDEX; 5205 C_SB_ETH_TX_CQ_INDEX;
@@ -5195,7 +5227,7 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
5195 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 5227 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5196 REG_WR8(bp, BAR_TSTRORM_INTMEM + 5228 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5197 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 5229 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5198 bp->fp->cl_id + (i % bp->num_rx_queues)); 5230 bp->fp->cl_id + (i % bp->num_queues));
5199} 5231}
5200 5232
5201static void bnx2x_set_client_config(struct bnx2x *bp) 5233static void bnx2x_set_client_config(struct bnx2x *bp)
@@ -5235,7 +5267,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5235{ 5267{
5236 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; 5268 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5237 int mode = bp->rx_mode; 5269 int mode = bp->rx_mode;
5238 int mask = (1 << BP_L_ID(bp)); 5270 int mask = bp->rx_mode_cl_mask;
5239 int func = BP_FUNC(bp); 5271 int func = BP_FUNC(bp);
5240 int port = BP_PORT(bp); 5272 int port = BP_PORT(bp);
5241 int i; 5273 int i;
@@ -5348,6 +5380,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5348 (*(u32 *)&tstorm_config)); 5380 (*(u32 *)&tstorm_config));
5349 5381
5350 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 5382 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5383 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5351 bnx2x_set_storm_rx_mode(bp); 5384 bnx2x_set_storm_rx_mode(bp);
5352 5385
5353 for_each_queue(bp, i) { 5386 for_each_queue(bp, i) {
@@ -5438,7 +5471,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5438 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5471 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5439 SGE_PAGE_SIZE * PAGES_PER_SGE), 5472 SGE_PAGE_SIZE * PAGES_PER_SGE),
5440 (u32)0xffff); 5473 (u32)0xffff);
5441 for_each_rx_queue(bp, i) { 5474 for_each_queue(bp, i) {
5442 struct bnx2x_fastpath *fp = &bp->fp[i]; 5475 struct bnx2x_fastpath *fp = &bp->fp[i];
5443 5476
5444 REG_WR(bp, BAR_USTRORM_INTMEM + 5477 REG_WR(bp, BAR_USTRORM_INTMEM +
@@ -5473,7 +5506,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5473 rx_pause.cqe_thr_high = 350; 5506 rx_pause.cqe_thr_high = 350;
5474 rx_pause.sge_thr_high = 0; 5507 rx_pause.sge_thr_high = 0;
5475 5508
5476 for_each_rx_queue(bp, i) { 5509 for_each_queue(bp, i) {
5477 struct bnx2x_fastpath *fp = &bp->fp[i]; 5510 struct bnx2x_fastpath *fp = &bp->fp[i];
5478 5511
5479 if (!fp->disable_tpa) { 5512 if (!fp->disable_tpa) {
@@ -5504,20 +5537,18 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5504 bp->link_vars.line_speed = SPEED_10000; 5537 bp->link_vars.line_speed = SPEED_10000;
5505 bnx2x_init_port_minmax(bp); 5538 bnx2x_init_port_minmax(bp);
5506 5539
5540 if (!BP_NOMCP(bp))
5541 bp->mf_config =
5542 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5507 bnx2x_calc_vn_weight_sum(bp); 5543 bnx2x_calc_vn_weight_sum(bp);
5508 5544
5509 for (vn = VN_0; vn < E1HVN_MAX; vn++) 5545 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5510 bnx2x_init_vn_minmax(bp, 2*vn + port); 5546 bnx2x_init_vn_minmax(bp, 2*vn + port);
5511 5547
5512 /* Enable rate shaping and fairness */ 5548 /* Enable rate shaping and fairness */
5513 bp->cmng.flags.cmng_enables = 5549 bp->cmng.flags.cmng_enables |=
5514 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 5550 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5515 if (bp->vn_weight_sum) 5551
5516 bp->cmng.flags.cmng_enables |=
5517 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5518 else
5519 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5520 " fairness will be disabled\n");
5521 } else { 5552 } else {
5522 /* rate shaping and fairness are disabled */ 5553 /* rate shaping and fairness are disabled */
5523 DP(NETIF_MSG_IFUP, 5554 DP(NETIF_MSG_IFUP,
@@ -5565,10 +5596,11 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5565 fp->state = BNX2X_FP_STATE_CLOSED; 5596 fp->state = BNX2X_FP_STATE_CLOSED;
5566 fp->index = i; 5597 fp->index = i;
5567 fp->cl_id = BP_L_ID(bp) + i; 5598 fp->cl_id = BP_L_ID(bp) + i;
5599#ifdef BCM_CNIC
5600 fp->sb_id = fp->cl_id + 1;
5601#else
5568 fp->sb_id = fp->cl_id; 5602 fp->sb_id = fp->cl_id;
5569 /* Suitable Rx and Tx SBs are served by the same client */ 5603#endif
5570 if (i >= bp->num_rx_queues)
5571 fp->cl_id -= bp->num_rx_queues;
5572 DP(NETIF_MSG_IFUP, 5604 DP(NETIF_MSG_IFUP,
5573 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n", 5605 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5574 i, bp, fp->status_blk, fp->cl_id, fp->sb_id); 5606 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
@@ -5867,7 +5899,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5867 msleep(50); 5899 msleep(50);
5868 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5900 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5869 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5901 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5870#ifndef BCM_ISCSI 5902#ifndef BCM_CNIC
5871 /* set NIC mode */ 5903 /* set NIC mode */
5872 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5904 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5873#endif 5905#endif
@@ -6006,6 +6038,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6006static int bnx2x_init_common(struct bnx2x *bp) 6038static int bnx2x_init_common(struct bnx2x *bp)
6007{ 6039{
6008 u32 val, i; 6040 u32 val, i;
6041#ifdef BCM_CNIC
6042 u32 wb_write[2];
6043#endif
6009 6044
6010 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 6045 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6011 6046
@@ -6048,7 +6083,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
6048#endif 6083#endif
6049 6084
6050 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 6085 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6051#ifdef BCM_ISCSI 6086#ifdef BCM_CNIC
6052 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 6087 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6053 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); 6088 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6054 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); 6089 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
@@ -6091,11 +6126,26 @@ static int bnx2x_init_common(struct bnx2x *bp)
6091 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6126 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6092 6127
6093 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 6128 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6129
6130#ifdef BCM_CNIC
6131 wb_write[0] = 0;
6132 wb_write[1] = 0;
6133 for (i = 0; i < 64; i++) {
6134 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6135 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6136
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6139 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6140 wb_write, 2);
6141 }
6142 }
6143#endif
6094 /* soft reset pulse */ 6144 /* soft reset pulse */
6095 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6145 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6096 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6146 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6097 6147
6098#ifdef BCM_ISCSI 6148#ifdef BCM_CNIC
6099 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); 6149 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6100#endif 6150#endif
6101 6151
@@ -6109,8 +6159,10 @@ static int bnx2x_init_common(struct bnx2x *bp)
6109 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 6159 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6110 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 6160 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6111 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6161 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6162#ifndef BCM_CNIC
6112 /* set NIC mode */ 6163 /* set NIC mode */
6113 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6164 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6165#endif
6114 if (CHIP_IS_E1H(bp)) 6166 if (CHIP_IS_E1H(bp))
6115 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 6167 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6116 6168
@@ -6145,6 +6197,18 @@ static int bnx2x_init_common(struct bnx2x *bp)
6145 /* TODO: replace with something meaningful */ 6197 /* TODO: replace with something meaningful */
6146 } 6198 }
6147 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 6199 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6200#ifdef BCM_CNIC
6201 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6202 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6203 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6204 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6205 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6206 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6207 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6208 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6209 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6210 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6211#endif
6148 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6212 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6149 6213
6150 if (sizeof(union cdu_context) != 1024) 6214 if (sizeof(union cdu_context) != 1024)
@@ -6261,38 +6325,14 @@ static int bnx2x_init_port(struct bnx2x *bp)
6261 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 6325 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6262 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 6326 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6263 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 6327 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6264#ifdef BCM_ISCSI
6265 /* Port0 1
6266 * Port1 385 */
6267 i++;
6268 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6269 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6270 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6271 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6272
6273 /* Port0 2
6274 * Port1 386 */
6275 i++;
6276 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6277 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6278 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6279 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6280
6281 /* Port0 3
6282 * Port1 387 */
6283 i++;
6284 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6285 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6286 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6287 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6288#endif
6289 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 6328 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6290 6329
6291#ifdef BCM_ISCSI 6330#ifdef BCM_CNIC
6292 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); 6331 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6293 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6294 6332
6295 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 6333 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6334 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6335 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6296#endif 6336#endif
6297 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6337 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6298 6338
@@ -6350,18 +6390,8 @@ static int bnx2x_init_port(struct bnx2x *bp)
6350 msleep(5); 6390 msleep(5);
6351 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6391 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6352 6392
6353#ifdef BCM_ISCSI 6393#ifdef BCM_CNIC
6354 /* tell the searcher where the T2 table is */ 6394 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6355 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6356
6357 wb_write[0] = U64_LO(bp->t2_mapping);
6358 wb_write[1] = U64_HI(bp->t2_mapping);
6359 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6360 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6361 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6362 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6363
6364 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6365#endif 6395#endif
6366 bnx2x_init_block(bp, CDU_BLOCK, init_stage); 6396 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6367 bnx2x_init_block(bp, CFC_BLOCK, init_stage); 6397 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
@@ -6470,7 +6500,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
6470#define PXP_ONE_ILT(x) (((x) << 10) | x) 6500#define PXP_ONE_ILT(x) (((x) << 10) | x)
6471#define PXP_ILT_RANGE(f, l) (((l) << 10) | f) 6501#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6472 6502
6503#ifdef BCM_CNIC
6504#define CNIC_ILT_LINES 127
6505#define CNIC_CTX_PER_ILT 16
6506#else
6473#define CNIC_ILT_LINES 0 6507#define CNIC_ILT_LINES 0
6508#endif
6474 6509
6475static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6510static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6476{ 6511{
@@ -6509,6 +6544,46 @@ static int bnx2x_init_func(struct bnx2x *bp)
6509 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, 6544 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6510 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); 6545 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6511 6546
6547#ifdef BCM_CNIC
6548 i += 1 + CNIC_ILT_LINES;
6549 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6550 if (CHIP_IS_E1(bp))
6551 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6552 else {
6553 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6554 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6555 }
6556
6557 i++;
6558 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6559 if (CHIP_IS_E1(bp))
6560 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6561 else {
6562 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6563 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6564 }
6565
6566 i++;
6567 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6568 if (CHIP_IS_E1(bp))
6569 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6570 else {
6571 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6572 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6573 }
6574
6575 /* tell the searcher where the T2 table is */
6576 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6577
6578 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6579 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6580
6581 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6582 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6583 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6584
6585 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6586#endif
6512 6587
6513 if (CHIP_IS_E1H(bp)) { 6588 if (CHIP_IS_E1H(bp)) {
6514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); 6589 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
@@ -6593,6 +6668,9 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6593 bnx2x_zero_def_sb(bp); 6668 bnx2x_zero_def_sb(bp);
6594 for_each_queue(bp, i) 6669 for_each_queue(bp, i)
6595 bnx2x_zero_sb(bp, BP_L_ID(bp) + i); 6670 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6671#ifdef BCM_CNIC
6672 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6673#endif
6596 6674
6597init_hw_err: 6675init_hw_err:
6598 bnx2x_gunzip_end(bp); 6676 bnx2x_gunzip_end(bp);
@@ -6632,7 +6710,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6632 sizeof(struct host_status_block)); 6710 sizeof(struct host_status_block));
6633 } 6711 }
6634 /* Rx */ 6712 /* Rx */
6635 for_each_rx_queue(bp, i) { 6713 for_each_queue(bp, i) {
6636 6714
6637 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6715 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6638 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); 6716 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -6652,7 +6730,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6652 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6730 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6653 } 6731 }
6654 /* Tx */ 6732 /* Tx */
6655 for_each_tx_queue(bp, i) { 6733 for_each_queue(bp, i) {
6656 6734
6657 /* fastpath tx rings: tx_buf tx_desc */ 6735 /* fastpath tx rings: tx_buf tx_desc */
6658 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); 6736 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -6668,11 +6746,13 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6668 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 6746 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6669 sizeof(struct bnx2x_slowpath)); 6747 sizeof(struct bnx2x_slowpath));
6670 6748
6671#ifdef BCM_ISCSI 6749#ifdef BCM_CNIC
6672 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); 6750 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6673 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); 6751 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6674 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); 6752 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6675 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); 6753 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6754 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6755 sizeof(struct host_status_block));
6676#endif 6756#endif
6677 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 6757 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6678 6758
@@ -6712,7 +6792,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6712 sizeof(struct host_status_block)); 6792 sizeof(struct host_status_block));
6713 } 6793 }
6714 /* Rx */ 6794 /* Rx */
6715 for_each_rx_queue(bp, i) { 6795 for_each_queue(bp, i) {
6716 6796
6717 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 6797 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6718 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring), 6798 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
@@ -6734,7 +6814,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6734 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 6814 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6735 } 6815 }
6736 /* Tx */ 6816 /* Tx */
6737 for_each_tx_queue(bp, i) { 6817 for_each_queue(bp, i) {
6738 6818
6739 /* fastpath tx rings: tx_buf tx_desc */ 6819 /* fastpath tx rings: tx_buf tx_desc */
6740 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring), 6820 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
@@ -6751,32 +6831,26 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6751 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 6831 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6752 sizeof(struct bnx2x_slowpath)); 6832 sizeof(struct bnx2x_slowpath));
6753 6833
6754#ifdef BCM_ISCSI 6834#ifdef BCM_CNIC
6755 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); 6835 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6756 6836
6757 /* Initialize T1 */
6758 for (i = 0; i < 64*1024; i += 64) {
6759 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6760 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6761 }
6762
6763 /* allocate searcher T2 table 6837 /* allocate searcher T2 table
6764 we allocate 1/4 of alloc num for T2 6838 we allocate 1/4 of alloc num for T2
6765 (which is not entered into the ILT) */ 6839 (which is not entered into the ILT) */
6766 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); 6840 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6767 6841
6768 /* Initialize T2 */ 6842 /* Initialize T2 (for 1024 connections) */
6769 for (i = 0; i < 16*1024; i += 64) 6843 for (i = 0; i < 16*1024; i += 64)
6770 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; 6844 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6771
6772 /* now fixup the last line in the block to point to the next block */
6773 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6774 6845
6775 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ 6846 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6776 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); 6847 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6777 6848
6778 /* QM queues (128*MAX_CONN) */ 6849 /* QM queues (128*MAX_CONN) */
6779 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); 6850 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6851
6852 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6853 sizeof(struct host_status_block));
6780#endif 6854#endif
6781 6855
6782 /* Slow path ring */ 6856 /* Slow path ring */
@@ -6796,7 +6870,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6796{ 6870{
6797 int i; 6871 int i;
6798 6872
6799 for_each_tx_queue(bp, i) { 6873 for_each_queue(bp, i) {
6800 struct bnx2x_fastpath *fp = &bp->fp[i]; 6874 struct bnx2x_fastpath *fp = &bp->fp[i];
6801 6875
6802 u16 bd_cons = fp->tx_bd_cons; 6876 u16 bd_cons = fp->tx_bd_cons;
@@ -6814,7 +6888,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6814{ 6888{
6815 int i, j; 6889 int i, j;
6816 6890
6817 for_each_rx_queue(bp, j) { 6891 for_each_queue(bp, j) {
6818 struct bnx2x_fastpath *fp = &bp->fp[j]; 6892 struct bnx2x_fastpath *fp = &bp->fp[j];
6819 6893
6820 for (i = 0; i < NUM_RX_BD; i++) { 6894 for (i = 0; i < NUM_RX_BD; i++) {
@@ -6852,6 +6926,9 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6852 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 6926 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6853 bp->msix_table[0].vector); 6927 bp->msix_table[0].vector);
6854 6928
6929#ifdef BCM_CNIC
6930 offset++;
6931#endif
6855 for_each_queue(bp, i) { 6932 for_each_queue(bp, i) {
6856 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " 6933 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6857 "state %x\n", i, bp->msix_table[i + offset].vector, 6934 "state %x\n", i, bp->msix_table[i + offset].vector,
@@ -6885,6 +6962,12 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6885 bp->msix_table[0].entry = igu_vec; 6962 bp->msix_table[0].entry = igu_vec;
6886 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); 6963 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6887 6964
6965#ifdef BCM_CNIC
6966 igu_vec = BP_L_ID(bp) + offset;
6967 bp->msix_table[1].entry = igu_vec;
6968 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
6969 offset++;
6970#endif
6888 for_each_queue(bp, i) { 6971 for_each_queue(bp, i) {
6889 igu_vec = BP_L_ID(bp) + offset + i; 6972 igu_vec = BP_L_ID(bp) + offset + i;
6890 bp->msix_table[i + offset].entry = igu_vec; 6973 bp->msix_table[i + offset].entry = igu_vec;
@@ -6915,14 +6998,13 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6915 return -EBUSY; 6998 return -EBUSY;
6916 } 6999 }
6917 7000
7001#ifdef BCM_CNIC
7002 offset++;
7003#endif
6918 for_each_queue(bp, i) { 7004 for_each_queue(bp, i) {
6919 struct bnx2x_fastpath *fp = &bp->fp[i]; 7005 struct bnx2x_fastpath *fp = &bp->fp[i];
6920 7006 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
6921 if (i < bp->num_rx_queues) 7007 bp->dev->name, i);
6922 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6923 else
6924 sprintf(fp->name, "%s-tx-%d",
6925 bp->dev->name, i - bp->num_rx_queues);
6926 7008
6927 rc = request_irq(bp->msix_table[i + offset].vector, 7009 rc = request_irq(bp->msix_table[i + offset].vector,
6928 bnx2x_msix_fp_int, 0, fp->name, fp); 7010 bnx2x_msix_fp_int, 0, fp->name, fp);
@@ -6981,7 +7063,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
6981{ 7063{
6982 int i; 7064 int i;
6983 7065
6984 for_each_rx_queue(bp, i) 7066 for_each_queue(bp, i)
6985 napi_enable(&bnx2x_fp(bp, i, napi)); 7067 napi_enable(&bnx2x_fp(bp, i, napi));
6986} 7068}
6987 7069
@@ -6989,7 +7071,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
6989{ 7071{
6990 int i; 7072 int i;
6991 7073
6992 for_each_rx_queue(bp, i) 7074 for_each_queue(bp, i)
6993 napi_disable(&bnx2x_fp(bp, i, napi)); 7075 napi_disable(&bnx2x_fp(bp, i, napi));
6994} 7076}
6995 7077
@@ -7015,14 +7097,25 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7015 bnx2x_int_disable_sync(bp, disable_hw); 7097 bnx2x_int_disable_sync(bp, disable_hw);
7016 bnx2x_napi_disable(bp); 7098 bnx2x_napi_disable(bp);
7017 netif_tx_disable(bp->dev); 7099 netif_tx_disable(bp->dev);
7018 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7019} 7100}
7020 7101
7021/* 7102/*
7022 * Init service functions 7103 * Init service functions
7023 */ 7104 */
7024 7105
7025static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) 7106/**
7107 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7108 *
7109 * @param bp driver descriptor
7110 * @param set set or clear an entry (1 or 0)
7111 * @param mac pointer to a buffer containing a MAC
7112 * @param cl_bit_vec bit vector of clients to register a MAC for
7113 * @param cam_offset offset in a CAM to use
7114 * @param with_bcast set broadcast MAC as well
7115 */
7116static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7117 u32 cl_bit_vec, u8 cam_offset,
7118 u8 with_bcast)
7026{ 7119{
7027 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 7120 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7028 int port = BP_PORT(bp); 7121 int port = BP_PORT(bp);
@@ -7031,25 +7124,25 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7031 * unicasts 0-31:port0 32-63:port1 7124 * unicasts 0-31:port0 32-63:port1
7032 * multicast 64-127:port0 128-191:port1 7125 * multicast 64-127:port0 128-191:port1
7033 */ 7126 */
7034 config->hdr.length = 2; 7127 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7035 config->hdr.offset = port ? 32 : 0; 7128 config->hdr.offset = cam_offset;
7036 config->hdr.client_id = bp->fp->cl_id; 7129 config->hdr.client_id = 0xff;
7037 config->hdr.reserved1 = 0; 7130 config->hdr.reserved1 = 0;
7038 7131
7039 /* primary MAC */ 7132 /* primary MAC */
7040 config->config_table[0].cam_entry.msb_mac_addr = 7133 config->config_table[0].cam_entry.msb_mac_addr =
7041 swab16(*(u16 *)&bp->dev->dev_addr[0]); 7134 swab16(*(u16 *)&mac[0]);
7042 config->config_table[0].cam_entry.middle_mac_addr = 7135 config->config_table[0].cam_entry.middle_mac_addr =
7043 swab16(*(u16 *)&bp->dev->dev_addr[2]); 7136 swab16(*(u16 *)&mac[2]);
7044 config->config_table[0].cam_entry.lsb_mac_addr = 7137 config->config_table[0].cam_entry.lsb_mac_addr =
7045 swab16(*(u16 *)&bp->dev->dev_addr[4]); 7138 swab16(*(u16 *)&mac[4]);
7046 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 7139 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7047 if (set) 7140 if (set)
7048 config->config_table[0].target_table_entry.flags = 0; 7141 config->config_table[0].target_table_entry.flags = 0;
7049 else 7142 else
7050 CAM_INVALIDATE(config->config_table[0]); 7143 CAM_INVALIDATE(config->config_table[0]);
7051 config->config_table[0].target_table_entry.clients_bit_vector = 7144 config->config_table[0].target_table_entry.clients_bit_vector =
7052 cpu_to_le32(1 << BP_L_ID(bp)); 7145 cpu_to_le32(cl_bit_vec);
7053 config->config_table[0].target_table_entry.vlan_id = 0; 7146 config->config_table[0].target_table_entry.vlan_id = 0;
7054 7147
7055 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 7148 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
@@ -7059,47 +7152,58 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7059 config->config_table[0].cam_entry.lsb_mac_addr); 7152 config->config_table[0].cam_entry.lsb_mac_addr);
7060 7153
7061 /* broadcast */ 7154 /* broadcast */
7062 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff); 7155 if (with_bcast) {
7063 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff); 7156 config->config_table[1].cam_entry.msb_mac_addr =
7064 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff); 7157 cpu_to_le16(0xffff);
7065 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 7158 config->config_table[1].cam_entry.middle_mac_addr =
7066 if (set) 7159 cpu_to_le16(0xffff);
7067 config->config_table[1].target_table_entry.flags = 7160 config->config_table[1].cam_entry.lsb_mac_addr =
7068 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 7161 cpu_to_le16(0xffff);
7069 else 7162 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7070 CAM_INVALIDATE(config->config_table[1]); 7163 if (set)
7071 config->config_table[1].target_table_entry.clients_bit_vector = 7164 config->config_table[1].target_table_entry.flags =
7072 cpu_to_le32(1 << BP_L_ID(bp)); 7165 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7073 config->config_table[1].target_table_entry.vlan_id = 0; 7166 else
7167 CAM_INVALIDATE(config->config_table[1]);
7168 config->config_table[1].target_table_entry.clients_bit_vector =
7169 cpu_to_le32(cl_bit_vec);
7170 config->config_table[1].target_table_entry.vlan_id = 0;
7171 }
7074 7172
7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7173 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7076 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 7174 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7077 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 7175 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7078} 7176}
7079 7177
7080static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) 7178/**
7179 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7180 *
7181 * @param bp driver descriptor
7182 * @param set set or clear an entry (1 or 0)
7183 * @param mac pointer to a buffer containing a MAC
7184 * @param cl_bit_vec bit vector of clients to register a MAC for
7185 * @param cam_offset offset in a CAM to use
7186 */
7187static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7188 u32 cl_bit_vec, u8 cam_offset)
7081{ 7189{
7082 struct mac_configuration_cmd_e1h *config = 7190 struct mac_configuration_cmd_e1h *config =
7083 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 7191 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7084 7192
7085 /* CAM allocation for E1H
7086 * unicasts: by func number
7087 * multicast: 20+FUNC*20, 20 each
7088 */
7089 config->hdr.length = 1; 7193 config->hdr.length = 1;
7090 config->hdr.offset = BP_FUNC(bp); 7194 config->hdr.offset = cam_offset;
7091 config->hdr.client_id = bp->fp->cl_id; 7195 config->hdr.client_id = 0xff;
7092 config->hdr.reserved1 = 0; 7196 config->hdr.reserved1 = 0;
7093 7197
7094 /* primary MAC */ 7198 /* primary MAC */
7095 config->config_table[0].msb_mac_addr = 7199 config->config_table[0].msb_mac_addr =
7096 swab16(*(u16 *)&bp->dev->dev_addr[0]); 7200 swab16(*(u16 *)&mac[0]);
7097 config->config_table[0].middle_mac_addr = 7201 config->config_table[0].middle_mac_addr =
7098 swab16(*(u16 *)&bp->dev->dev_addr[2]); 7202 swab16(*(u16 *)&mac[2]);
7099 config->config_table[0].lsb_mac_addr = 7203 config->config_table[0].lsb_mac_addr =
7100 swab16(*(u16 *)&bp->dev->dev_addr[4]); 7204 swab16(*(u16 *)&mac[4]);
7101 config->config_table[0].clients_bit_vector = 7205 config->config_table[0].clients_bit_vector =
7102 cpu_to_le32(1 << BP_L_ID(bp)); 7206 cpu_to_le32(cl_bit_vec);
7103 config->config_table[0].vlan_id = 0; 7207 config->config_table[0].vlan_id = 0;
7104 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 7208 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7105 if (set) 7209 if (set)
@@ -7108,11 +7212,11 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7108 config->config_table[0].flags = 7212 config->config_table[0].flags =
7109 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; 7213 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7110 7214
7111 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 7215 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7112 (set ? "setting" : "clearing"), 7216 (set ? "setting" : "clearing"),
7113 config->config_table[0].msb_mac_addr, 7217 config->config_table[0].msb_mac_addr,
7114 config->config_table[0].middle_mac_addr, 7218 config->config_table[0].middle_mac_addr,
7115 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 7219 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7116 7220
7117 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7221 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7118 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 7222 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -7164,6 +7268,69 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7164 return -EBUSY; 7268 return -EBUSY;
7165} 7269}
7166 7270
7271static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7272{
7273 bp->set_mac_pending++;
7274 smp_wmb();
7275
7276 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7277 (1 << bp->fp->cl_id), BP_FUNC(bp));
7278
7279 /* Wait for a completion */
7280 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7281}
7282
7283static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7284{
7285 bp->set_mac_pending++;
7286 smp_wmb();
7287
7288 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7289 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7290 1);
7291
7292 /* Wait for a completion */
7293 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7294}
7295
7296#ifdef BCM_CNIC
7297/**
7298 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7299 * MAC(s). This function will wait until the ramdord completion
7300 * returns.
7301 *
7302 * @param bp driver handle
7303 * @param set set or clear the CAM entry
7304 *
7305 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7306 */
7307static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7308{
7309 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7310
7311 bp->set_mac_pending++;
7312 smp_wmb();
7313
7314 /* Send a SET_MAC ramrod */
7315 if (CHIP_IS_E1(bp))
7316 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7317 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7318 1);
7319 else
7320 /* CAM allocation for E1H
7321 * unicasts: by func number
7322 * multicast: 20+FUNC*20, 20 each
7323 */
7324 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7325 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7326
7327 /* Wait for a completion when setting */
7328 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7329
7330 return 0;
7331}
7332#endif
7333
7167static int bnx2x_setup_leading(struct bnx2x *bp) 7334static int bnx2x_setup_leading(struct bnx2x *bp)
7168{ 7335{
7169 int rc; 7336 int rc;
@@ -7199,96 +7366,67 @@ static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7199 7366
7200static int bnx2x_poll(struct napi_struct *napi, int budget); 7367static int bnx2x_poll(struct napi_struct *napi, int budget);
7201 7368
7202static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out, 7369static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7203 int *num_tx_queues_out)
7204{ 7370{
7205 int _num_rx_queues = 0, _num_tx_queues = 0;
7206 7371
7207 switch (bp->multi_mode) { 7372 switch (bp->multi_mode) {
7208 case ETH_RSS_MODE_DISABLED: 7373 case ETH_RSS_MODE_DISABLED:
7209 _num_rx_queues = 1; 7374 bp->num_queues = 1;
7210 _num_tx_queues = 1;
7211 break; 7375 break;
7212 7376
7213 case ETH_RSS_MODE_REGULAR: 7377 case ETH_RSS_MODE_REGULAR:
7214 if (num_rx_queues) 7378 if (num_queues)
7215 _num_rx_queues = min_t(u32, num_rx_queues, 7379 bp->num_queues = min_t(u32, num_queues,
7216 BNX2X_MAX_QUEUES(bp)); 7380 BNX2X_MAX_QUEUES(bp));
7217 else 7381 else
7218 _num_rx_queues = min_t(u32, num_online_cpus(), 7382 bp->num_queues = min_t(u32, num_online_cpus(),
7219 BNX2X_MAX_QUEUES(bp)); 7383 BNX2X_MAX_QUEUES(bp));
7220
7221 if (num_tx_queues)
7222 _num_tx_queues = min_t(u32, num_tx_queues,
7223 BNX2X_MAX_QUEUES(bp));
7224 else
7225 _num_tx_queues = min_t(u32, num_online_cpus(),
7226 BNX2X_MAX_QUEUES(bp));
7227
7228 /* There must be not more Tx queues than Rx queues */
7229 if (_num_tx_queues > _num_rx_queues) {
7230 BNX2X_ERR("number of tx queues (%d) > "
7231 "number of rx queues (%d)"
7232 " defaulting to %d\n",
7233 _num_tx_queues, _num_rx_queues,
7234 _num_rx_queues);
7235 _num_tx_queues = _num_rx_queues;
7236 }
7237 break; 7384 break;
7238 7385
7239 7386
7240 default: 7387 default:
7241 _num_rx_queues = 1; 7388 bp->num_queues = 1;
7242 _num_tx_queues = 1;
7243 break; 7389 break;
7244 } 7390 }
7245
7246 *num_rx_queues_out = _num_rx_queues;
7247 *num_tx_queues_out = _num_tx_queues;
7248} 7391}
7249 7392
7250static int bnx2x_set_int_mode(struct bnx2x *bp) 7393static int bnx2x_set_num_queues(struct bnx2x *bp)
7251{ 7394{
7252 int rc = 0; 7395 int rc = 0;
7253 7396
7254 switch (int_mode) { 7397 switch (int_mode) {
7255 case INT_MODE_INTx: 7398 case INT_MODE_INTx:
7256 case INT_MODE_MSI: 7399 case INT_MODE_MSI:
7257 bp->num_rx_queues = 1; 7400 bp->num_queues = 1;
7258 bp->num_tx_queues = 1;
7259 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7401 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7260 break; 7402 break;
7261 7403
7262 case INT_MODE_MSIX: 7404 case INT_MODE_MSIX:
7263 default: 7405 default:
7264 /* Set interrupt mode according to bp->multi_mode value */ 7406 /* Set number of queues according to bp->multi_mode value */
7265 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues, 7407 bnx2x_set_num_queues_msix(bp);
7266 &bp->num_tx_queues);
7267 7408
7268 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n", 7409 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7269 bp->num_rx_queues, bp->num_tx_queues); 7410 bp->num_queues);
7270 7411
7271 /* if we can't use MSI-X we only need one fp, 7412 /* if we can't use MSI-X we only need one fp,
7272 * so try to enable MSI-X with the requested number of fp's 7413 * so try to enable MSI-X with the requested number of fp's
7273 * and fallback to MSI or legacy INTx with one fp 7414 * and fallback to MSI or legacy INTx with one fp
7274 */ 7415 */
7275 rc = bnx2x_enable_msix(bp); 7416 rc = bnx2x_enable_msix(bp);
7276 if (rc) { 7417 if (rc)
7277 /* failed to enable MSI-X */ 7418 /* failed to enable MSI-X */
7278 if (bp->multi_mode) 7419 bp->num_queues = 1;
7279 BNX2X_ERR("Multi requested but failed to "
7280 "enable MSI-X (rx %d tx %d), "
7281 "set number of queues to 1\n",
7282 bp->num_rx_queues, bp->num_tx_queues);
7283 bp->num_rx_queues = 1;
7284 bp->num_tx_queues = 1;
7285 }
7286 break; 7420 break;
7287 } 7421 }
7288 bp->dev->real_num_tx_queues = bp->num_tx_queues; 7422 bp->dev->real_num_tx_queues = bp->num_queues;
7289 return rc; 7423 return rc;
7290} 7424}
7291 7425
7426#ifdef BCM_CNIC
7427static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7428static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7429#endif
7292 7430
7293/* must be called with rtnl_lock */ 7431/* must be called with rtnl_lock */
7294static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 7432static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
@@ -7303,16 +7441,16 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7303 7441
7304 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; 7442 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7305 7443
7306 rc = bnx2x_set_int_mode(bp); 7444 rc = bnx2x_set_num_queues(bp);
7307 7445
7308 if (bnx2x_alloc_mem(bp)) 7446 if (bnx2x_alloc_mem(bp))
7309 return -ENOMEM; 7447 return -ENOMEM;
7310 7448
7311 for_each_rx_queue(bp, i) 7449 for_each_queue(bp, i)
7312 bnx2x_fp(bp, i, disable_tpa) = 7450 bnx2x_fp(bp, i, disable_tpa) =
7313 ((bp->flags & TPA_ENABLE_FLAG) == 0); 7451 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7314 7452
7315 for_each_rx_queue(bp, i) 7453 for_each_queue(bp, i)
7316 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 7454 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7317 bnx2x_poll, 128); 7455 bnx2x_poll, 128);
7318 7456
@@ -7326,7 +7464,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7326 } 7464 }
7327 } else { 7465 } else {
7328 /* Fall to INTx if failed to enable MSI-X due to lack of 7466 /* Fall to INTx if failed to enable MSI-X due to lack of
7329 memory (in bnx2x_set_int_mode()) */ 7467 memory (in bnx2x_set_num_queues()) */
7330 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx)) 7468 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7331 bnx2x_enable_msi(bp); 7469 bnx2x_enable_msi(bp);
7332 bnx2x_ack_int(bp); 7470 bnx2x_ack_int(bp);
@@ -7427,20 +7565,37 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7427 if (CHIP_IS_E1H(bp)) 7565 if (CHIP_IS_E1H(bp))
7428 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 7566 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7429 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 7567 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7430 bp->state = BNX2X_STATE_DISABLED; 7568 bp->flags |= MF_FUNC_DIS;
7431 } 7569 }
7432 7570
7433 if (bp->state == BNX2X_STATE_OPEN) { 7571 if (bp->state == BNX2X_STATE_OPEN) {
7572#ifdef BCM_CNIC
7573 /* Enable Timer scan */
7574 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7575#endif
7434 for_each_nondefault_queue(bp, i) { 7576 for_each_nondefault_queue(bp, i) {
7435 rc = bnx2x_setup_multi(bp, i); 7577 rc = bnx2x_setup_multi(bp, i);
7436 if (rc) 7578 if (rc)
7579#ifdef BCM_CNIC
7580 goto load_error4;
7581#else
7437 goto load_error3; 7582 goto load_error3;
7583#endif
7438 } 7584 }
7439 7585
7440 if (CHIP_IS_E1(bp)) 7586 if (CHIP_IS_E1(bp))
7441 bnx2x_set_mac_addr_e1(bp, 1); 7587 bnx2x_set_eth_mac_addr_e1(bp, 1);
7442 else 7588 else
7443 bnx2x_set_mac_addr_e1h(bp, 1); 7589 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7590#ifdef BCM_CNIC
7591 /* Set iSCSI L2 MAC */
7592 mutex_lock(&bp->cnic_mutex);
7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7596 }
7597 mutex_unlock(&bp->cnic_mutex);
7598#endif
7444 } 7599 }
7445 7600
7446 if (bp->port.pmf) 7601 if (bp->port.pmf)
@@ -7481,9 +7636,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7481 /* start the timer */ 7636 /* start the timer */
7482 mod_timer(&bp->timer, jiffies + bp->current_interval); 7637 mod_timer(&bp->timer, jiffies + bp->current_interval);
7483 7638
7639#ifdef BCM_CNIC
7640 bnx2x_setup_cnic_irq_info(bp);
7641 if (bp->state == BNX2X_STATE_OPEN)
7642 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7643#endif
7484 7644
7485 return 0; 7645 return 0;
7486 7646
7647#ifdef BCM_CNIC
7648load_error4:
7649 /* Disable Timer scan */
7650 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7651#endif
7487load_error3: 7652load_error3:
7488 bnx2x_int_disable_sync(bp, 1); 7653 bnx2x_int_disable_sync(bp, 1);
7489 if (!BP_NOMCP(bp)) { 7654 if (!BP_NOMCP(bp)) {
@@ -7493,14 +7658,14 @@ load_error3:
7493 bp->port.pmf = 0; 7658 bp->port.pmf = 0;
7494 /* Free SKBs, SGEs, TPA pool and driver internals */ 7659 /* Free SKBs, SGEs, TPA pool and driver internals */
7495 bnx2x_free_skbs(bp); 7660 bnx2x_free_skbs(bp);
7496 for_each_rx_queue(bp, i) 7661 for_each_queue(bp, i)
7497 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 7662 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7498load_error2: 7663load_error2:
7499 /* Release IRQs */ 7664 /* Release IRQs */
7500 bnx2x_free_irq(bp); 7665 bnx2x_free_irq(bp);
7501load_error1: 7666load_error1:
7502 bnx2x_napi_disable(bp); 7667 bnx2x_napi_disable(bp);
7503 for_each_rx_queue(bp, i) 7668 for_each_queue(bp, i)
7504 netif_napi_del(&bnx2x_fp(bp, i, napi)); 7669 netif_napi_del(&bnx2x_fp(bp, i, napi));
7505 bnx2x_free_mem(bp); 7670 bnx2x_free_mem(bp);
7506 7671
@@ -7591,6 +7756,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
7591 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7756 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7592 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7757 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7593 7758
7759#ifdef BCM_CNIC
7760 /* Disable Timer scan */
7761 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7762 /*
7763 * Wait for at least 10ms and up to 2 second for the timers scan to
7764 * complete
7765 */
7766 for (i = 0; i < 200; i++) {
7767 msleep(10);
7768 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7769 break;
7770 }
7771#endif
7594 /* Clear ILT */ 7772 /* Clear ILT */
7595 base = FUNC_ILT_BASE(func); 7773 base = FUNC_ILT_BASE(func);
7596 for (i = base; i < base + ILT_PER_FUNC; i++) 7774 for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7657,6 +7835,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7657 u32 reset_code = 0; 7835 u32 reset_code = 0;
7658 int i, cnt, rc; 7836 int i, cnt, rc;
7659 7837
7838#ifdef BCM_CNIC
7839 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7840#endif
7660 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 7841 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7661 7842
7662 /* Set "drop all" */ 7843 /* Set "drop all" */
@@ -7675,7 +7856,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7675 bnx2x_free_irq(bp); 7856 bnx2x_free_irq(bp);
7676 7857
7677 /* Wait until tx fastpath tasks complete */ 7858 /* Wait until tx fastpath tasks complete */
7678 for_each_tx_queue(bp, i) { 7859 for_each_queue(bp, i) {
7679 struct bnx2x_fastpath *fp = &bp->fp[i]; 7860 struct bnx2x_fastpath *fp = &bp->fp[i];
7680 7861
7681 cnt = 1000; 7862 cnt = 1000;
@@ -7703,7 +7884,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7703 struct mac_configuration_cmd *config = 7884 struct mac_configuration_cmd *config =
7704 bnx2x_sp(bp, mcast_config); 7885 bnx2x_sp(bp, mcast_config);
7705 7886
7706 bnx2x_set_mac_addr_e1(bp, 0); 7887 bnx2x_set_eth_mac_addr_e1(bp, 0);
7707 7888
7708 for (i = 0; i < config->hdr.length; i++) 7889 for (i = 0; i < config->hdr.length; i++)
7709 CAM_INVALIDATE(config->config_table[i]); 7890 CAM_INVALIDATE(config->config_table[i]);
@@ -7716,6 +7897,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7716 config->hdr.client_id = bp->fp->cl_id; 7897 config->hdr.client_id = bp->fp->cl_id;
7717 config->hdr.reserved1 = 0; 7898 config->hdr.reserved1 = 0;
7718 7899
7900 bp->set_mac_pending++;
7901 smp_wmb();
7902
7719 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7903 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7720 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 7904 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7721 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 7905 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
@@ -7723,13 +7907,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7723 } else { /* E1H */ 7907 } else { /* E1H */
7724 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7908 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7725 7909
7726 bnx2x_set_mac_addr_e1h(bp, 0); 7910 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7727 7911
7728 for (i = 0; i < MC_HASH_SIZE; i++) 7912 for (i = 0; i < MC_HASH_SIZE; i++)
7729 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7913 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7730 7914
7731 REG_WR(bp, MISC_REG_E1HMF_MODE, 0); 7915 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7732 } 7916 }
7917#ifdef BCM_CNIC
7918 /* Clear iSCSI L2 MAC */
7919 mutex_lock(&bp->cnic_mutex);
7920 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7921 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7922 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7923 }
7924 mutex_unlock(&bp->cnic_mutex);
7925#endif
7733 7926
7734 if (unload_mode == UNLOAD_NORMAL) 7927 if (unload_mode == UNLOAD_NORMAL)
7735 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 7928 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -7806,9 +7999,9 @@ unload_error:
7806 7999
7807 /* Free SKBs, SGEs, TPA pool and driver internals */ 8000 /* Free SKBs, SGEs, TPA pool and driver internals */
7808 bnx2x_free_skbs(bp); 8001 bnx2x_free_skbs(bp);
7809 for_each_rx_queue(bp, i) 8002 for_each_queue(bp, i)
7810 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 8003 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7811 for_each_rx_queue(bp, i) 8004 for_each_queue(bp, i)
7812 netif_napi_del(&bnx2x_fp(bp, i, napi)); 8005 netif_napi_del(&bnx2x_fp(bp, i, napi));
7813 bnx2x_free_mem(bp); 8006 bnx2x_free_mem(bp);
7814 8007
@@ -8506,6 +8699,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8506 bp->link_params.req_flow_ctrl, bp->port.advertising); 8699 bp->link_params.req_flow_ctrl, bp->port.advertising);
8507} 8700}
8508 8701
8702static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8703{
8704 mac_hi = cpu_to_be16(mac_hi);
8705 mac_lo = cpu_to_be32(mac_lo);
8706 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8707 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8708}
8709
8509static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8710static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8510{ 8711{
8511 int port = BP_PORT(bp); 8712 int port = BP_PORT(bp);
@@ -8587,14 +8788,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8587 8788
8588 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 8789 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8589 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 8790 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8590 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); 8791 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8591 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8592 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8593 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8594 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8595 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8596 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8792 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8597 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8793 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8794
8795#ifdef BCM_CNIC
8796 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8797 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8798 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8799#endif
8598} 8800}
8599 8801
8600static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8802static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -8690,6 +8892,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8690 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ 8892 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8691 8893
8692 mutex_init(&bp->port.phy_mutex); 8894 mutex_init(&bp->port.phy_mutex);
8895 mutex_init(&bp->fw_mb_mutex);
8896#ifdef BCM_CNIC
8897 mutex_init(&bp->cnic_mutex);
8898#endif
8693 8899
8694 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 8900 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8695 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 8901 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
@@ -8738,8 +8944,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8738 8944
8739 bp->rx_csum = 1; 8945 bp->rx_csum = 1;
8740 8946
8741 bp->tx_ticks = 50; 8947 /* make sure that the numbers are in the right granularity */
8742 bp->rx_ticks = 25; 8948 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8949 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8743 8950
8744 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 8951 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8745 bp->current_interval = (poll ? poll : timer_interval); 8952 bp->current_interval = (poll ? poll : timer_interval);
@@ -8765,20 +8972,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8765 cmd->supported = bp->port.supported; 8972 cmd->supported = bp->port.supported;
8766 cmd->advertising = bp->port.advertising; 8973 cmd->advertising = bp->port.advertising;
8767 8974
8768 if (netif_carrier_ok(dev)) { 8975 if ((bp->state == BNX2X_STATE_OPEN) &&
8976 !(bp->flags & MF_FUNC_DIS) &&
8977 (bp->link_vars.link_up)) {
8769 cmd->speed = bp->link_vars.line_speed; 8978 cmd->speed = bp->link_vars.line_speed;
8770 cmd->duplex = bp->link_vars.duplex; 8979 cmd->duplex = bp->link_vars.duplex;
8771 } else { 8980 if (IS_E1HMF(bp)) {
8772 cmd->speed = bp->link_params.req_line_speed; 8981 u16 vn_max_rate;
8773 cmd->duplex = bp->link_params.req_duplex;
8774 }
8775 if (IS_E1HMF(bp)) {
8776 u16 vn_max_rate;
8777 8982
8778 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> 8983 vn_max_rate =
8984 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8779 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 8985 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8780 if (vn_max_rate < cmd->speed) 8986 if (vn_max_rate < cmd->speed)
8781 cmd->speed = vn_max_rate; 8987 cmd->speed = vn_max_rate;
8988 }
8989 } else {
8990 cmd->speed = -1;
8991 cmd->duplex = -1;
8782 } 8992 }
8783 8993
8784 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { 8994 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
@@ -9163,6 +9373,9 @@ static u32 bnx2x_get_link(struct net_device *dev)
9163{ 9373{
9164 struct bnx2x *bp = netdev_priv(dev); 9374 struct bnx2x *bp = netdev_priv(dev);
9165 9375
9376 if (bp->flags & MF_FUNC_DIS)
9377 return 0;
9378
9166 return bp->link_vars.link_up; 9379 return bp->link_vars.link_up;
9167} 9380}
9168 9381
@@ -9567,8 +9780,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
9567 9780
9568 } else if (eeprom->magic == 0x50485952) { 9781 } else if (eeprom->magic == 0x50485952) {
9569 /* 'PHYR' (0x50485952): re-init link after FW upgrade */ 9782 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9570 if ((bp->state == BNX2X_STATE_OPEN) || 9783 if (bp->state == BNX2X_STATE_OPEN) {
9571 (bp->state == BNX2X_STATE_DISABLED)) {
9572 bnx2x_acquire_phy_lock(bp); 9784 bnx2x_acquire_phy_lock(bp);
9573 rc |= bnx2x_link_reset(&bp->link_params, 9785 rc |= bnx2x_link_reset(&bp->link_params,
9574 &bp->link_vars, 1); 9786 &bp->link_vars, 1);
@@ -9818,11 +10030,6 @@ static const struct {
9818 { "idle check (online)" } 10030 { "idle check (online)" }
9819}; 10031};
9820 10032
9821static int bnx2x_self_test_count(struct net_device *dev)
9822{
9823 return BNX2X_NUM_TESTS;
9824}
9825
9826static int bnx2x_test_registers(struct bnx2x *bp) 10033static int bnx2x_test_registers(struct bnx2x *bp)
9827{ 10034{
9828 int idx, i, rc = -ENODEV; 10035 int idx, i, rc = -ENODEV;
@@ -9990,7 +10197,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9990 struct sk_buff *skb; 10197 struct sk_buff *skb;
9991 unsigned char *packet; 10198 unsigned char *packet;
9992 struct bnx2x_fastpath *fp_rx = &bp->fp[0]; 10199 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9993 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues]; 10200 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
9994 u16 tx_start_idx, tx_idx; 10201 u16 tx_start_idx, tx_idx;
9995 u16 rx_start_idx, rx_idx; 10202 u16 rx_start_idx, rx_idx;
9996 u16 pkt_prod, bd_prod; 10203 u16 pkt_prod, bd_prod;
@@ -10067,13 +10274,12 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10067 10274
10068 fp_tx->tx_db.data.prod += 2; 10275 fp_tx->tx_db.data.prod += 2;
10069 barrier(); 10276 barrier();
10070 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw); 10277 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
10071 10278
10072 mmiowb(); 10279 mmiowb();
10073 10280
10074 num_pkts++; 10281 num_pkts++;
10075 fp_tx->tx_bd_prod += 2; /* start + pbd */ 10282 fp_tx->tx_bd_prod += 2; /* start + pbd */
10076 bp->dev->trans_start = jiffies;
10077 10283
10078 udelay(100); 10284 udelay(100);
10079 10285
@@ -10223,14 +10429,16 @@ static int bnx2x_test_intr(struct bnx2x *bp)
10223 config->hdr.client_id = bp->fp->cl_id; 10429 config->hdr.client_id = bp->fp->cl_id;
10224 config->hdr.reserved1 = 0; 10430 config->hdr.reserved1 = 0;
10225 10431
10432 bp->set_mac_pending++;
10433 smp_wmb();
10226 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 10434 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10227 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 10435 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 10436 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10229 if (rc == 0) { 10437 if (rc == 0) {
10230 bp->set_mac_pending++;
10231 for (i = 0; i < 10; i++) { 10438 for (i = 0; i < 10; i++) {
10232 if (!bp->set_mac_pending) 10439 if (!bp->set_mac_pending)
10233 break; 10440 break;
10441 smp_rmb();
10234 msleep_interruptible(10); 10442 msleep_interruptible(10);
10235 } 10443 }
10236 if (i == 10) 10444 if (i == 10)
@@ -10264,7 +10472,7 @@ static void bnx2x_self_test(struct net_device *dev,
10264 /* disable input for TX port IF */ 10472 /* disable input for TX port IF */
10265 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 10473 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10266 10474
10267 link_up = bp->link_vars.link_up; 10475 link_up = (bnx2x_link_test(bp) == 0);
10268 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10476 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10269 bnx2x_nic_load(bp, LOAD_DIAG); 10477 bnx2x_nic_load(bp, LOAD_DIAG);
10270 /* wait until link state is restored */ 10478 /* wait until link state is restored */
@@ -10436,6 +10644,36 @@ static const struct {
10436#define IS_E1HMF_MODE_STAT(bp) \ 10644#define IS_E1HMF_MODE_STAT(bp) \
10437 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS)) 10645 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10438 10646
10647static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10648{
10649 struct bnx2x *bp = netdev_priv(dev);
10650 int i, num_stats;
10651
10652 switch(stringset) {
10653 case ETH_SS_STATS:
10654 if (is_multi(bp)) {
10655 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
10656 if (!IS_E1HMF_MODE_STAT(bp))
10657 num_stats += BNX2X_NUM_STATS;
10658 } else {
10659 if (IS_E1HMF_MODE_STAT(bp)) {
10660 num_stats = 0;
10661 for (i = 0; i < BNX2X_NUM_STATS; i++)
10662 if (IS_FUNC_STAT(i))
10663 num_stats++;
10664 } else
10665 num_stats = BNX2X_NUM_STATS;
10666 }
10667 return num_stats;
10668
10669 case ETH_SS_TEST:
10670 return BNX2X_NUM_TESTS;
10671
10672 default:
10673 return -EINVAL;
10674 }
10675}
10676
10439static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 10677static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10440{ 10678{
10441 struct bnx2x *bp = netdev_priv(dev); 10679 struct bnx2x *bp = netdev_priv(dev);
@@ -10445,7 +10683,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10445 case ETH_SS_STATS: 10683 case ETH_SS_STATS:
10446 if (is_multi(bp)) { 10684 if (is_multi(bp)) {
10447 k = 0; 10685 k = 0;
10448 for_each_rx_queue(bp, i) { 10686 for_each_queue(bp, i) {
10449 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 10687 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10450 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 10688 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10451 bnx2x_q_stats_arr[j].string, i); 10689 bnx2x_q_stats_arr[j].string, i);
@@ -10473,28 +10711,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10473 } 10711 }
10474} 10712}
10475 10713
10476static int bnx2x_get_stats_count(struct net_device *dev)
10477{
10478 struct bnx2x *bp = netdev_priv(dev);
10479 int i, num_stats;
10480
10481 if (is_multi(bp)) {
10482 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10483 if (!IS_E1HMF_MODE_STAT(bp))
10484 num_stats += BNX2X_NUM_STATS;
10485 } else {
10486 if (IS_E1HMF_MODE_STAT(bp)) {
10487 num_stats = 0;
10488 for (i = 0; i < BNX2X_NUM_STATS; i++)
10489 if (IS_FUNC_STAT(i))
10490 num_stats++;
10491 } else
10492 num_stats = BNX2X_NUM_STATS;
10493 }
10494
10495 return num_stats;
10496}
10497
10498static void bnx2x_get_ethtool_stats(struct net_device *dev, 10714static void bnx2x_get_ethtool_stats(struct net_device *dev,
10499 struct ethtool_stats *stats, u64 *buf) 10715 struct ethtool_stats *stats, u64 *buf)
10500{ 10716{
@@ -10504,7 +10720,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10504 10720
10505 if (is_multi(bp)) { 10721 if (is_multi(bp)) {
10506 k = 0; 10722 k = 0;
10507 for_each_rx_queue(bp, i) { 10723 for_each_queue(bp, i) {
10508 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 10724 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10509 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 10725 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10510 if (bnx2x_q_stats_arr[j].size == 0) { 10726 if (bnx2x_q_stats_arr[j].size == 0) {
@@ -10570,7 +10786,6 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10570static int bnx2x_phys_id(struct net_device *dev, u32 data) 10786static int bnx2x_phys_id(struct net_device *dev, u32 data)
10571{ 10787{
10572 struct bnx2x *bp = netdev_priv(dev); 10788 struct bnx2x *bp = netdev_priv(dev);
10573 int port = BP_PORT(bp);
10574 int i; 10789 int i;
10575 10790
10576 if (!netif_running(dev)) 10791 if (!netif_running(dev))
@@ -10584,13 +10799,10 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10584 10799
10585 for (i = 0; i < (data * 2); i++) { 10800 for (i = 0; i < (data * 2); i++) {
10586 if ((i % 2) == 0) 10801 if ((i % 2) == 0)
10587 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, 10802 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10588 bp->link_params.hw_led_mode, 10803 SPEED_1000);
10589 bp->link_params.chip_id);
10590 else 10804 else
10591 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, 10805 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10592 bp->link_params.hw_led_mode,
10593 bp->link_params.chip_id);
10594 10806
10595 msleep_interruptible(500); 10807 msleep_interruptible(500);
10596 if (signal_pending(current)) 10808 if (signal_pending(current))
@@ -10598,10 +10810,8 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10598 } 10810 }
10599 10811
10600 if (bp->link_vars.link_up) 10812 if (bp->link_vars.link_up)
10601 bnx2x_set_led(bp, port, LED_MODE_OPER, 10813 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10602 bp->link_vars.line_speed, 10814 bp->link_vars.line_speed);
10603 bp->link_params.hw_led_mode,
10604 bp->link_params.chip_id);
10605 10815
10606 return 0; 10816 return 0;
10607} 10817}
@@ -10637,11 +10847,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
10637 .set_sg = ethtool_op_set_sg, 10847 .set_sg = ethtool_op_set_sg,
10638 .get_tso = ethtool_op_get_tso, 10848 .get_tso = ethtool_op_get_tso,
10639 .set_tso = bnx2x_set_tso, 10849 .set_tso = bnx2x_set_tso,
10640 .self_test_count = bnx2x_self_test_count,
10641 .self_test = bnx2x_self_test, 10850 .self_test = bnx2x_self_test,
10851 .get_sset_count = bnx2x_get_sset_count,
10642 .get_strings = bnx2x_get_strings, 10852 .get_strings = bnx2x_get_strings,
10643 .phys_id = bnx2x_phys_id, 10853 .phys_id = bnx2x_phys_id,
10644 .get_stats_count = bnx2x_get_stats_count,
10645 .get_ethtool_stats = bnx2x_get_ethtool_stats, 10854 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10646}; 10855};
10647 10856
@@ -10707,54 +10916,60 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10707 10916
10708static int bnx2x_poll(struct napi_struct *napi, int budget) 10917static int bnx2x_poll(struct napi_struct *napi, int budget)
10709{ 10918{
10919 int work_done = 0;
10710 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath, 10920 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10711 napi); 10921 napi);
10712 struct bnx2x *bp = fp->bp; 10922 struct bnx2x *bp = fp->bp;
10713 int work_done = 0;
10714 10923
10924 while (1) {
10715#ifdef BNX2X_STOP_ON_ERROR 10925#ifdef BNX2X_STOP_ON_ERROR
10716 if (unlikely(bp->panic)) 10926 if (unlikely(bp->panic)) {
10717 goto poll_panic; 10927 napi_complete(napi);
10928 return 0;
10929 }
10718#endif 10930#endif
10719 10931
10720 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb); 10932 if (bnx2x_has_tx_work(fp))
10721 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256); 10933 bnx2x_tx_int(fp);
10722
10723 bnx2x_update_fpsb_idx(fp);
10724
10725 if (bnx2x_has_rx_work(fp)) {
10726 work_done = bnx2x_rx_int(fp, budget);
10727 10934
10728 /* must not complete if we consumed full budget */ 10935 if (bnx2x_has_rx_work(fp)) {
10729 if (work_done >= budget) 10936 work_done += bnx2x_rx_int(fp, budget - work_done);
10730 goto poll_again;
10731 }
10732 10937
10733 /* bnx2x_has_rx_work() reads the status block, thus we need to 10938 /* must not complete if we consumed full budget */
10734 * ensure that status block indices have been actually read 10939 if (work_done >= budget)
10735 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work) 10940 break;
10736 * so that we won't write the "newer" value of the status block to IGU 10941 }
10737 * (if there was a DMA right after bnx2x_has_rx_work and
10738 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10739 * may be postponed to right before bnx2x_ack_sb). In this case
10740 * there will never be another interrupt until there is another update
10741 * of the status block, while there is still unhandled work.
10742 */
10743 rmb();
10744 10942
10745 if (!bnx2x_has_rx_work(fp)) { 10943 /* Fall out from the NAPI loop if needed */
10746#ifdef BNX2X_STOP_ON_ERROR 10944 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10747poll_panic: 10945 bnx2x_update_fpsb_idx(fp);
10748#endif 10946 /* bnx2x_has_rx_work() reads the status block, thus we need
10749 napi_complete(napi); 10947 * to ensure that status block indices have been actually read
10948 * (bnx2x_update_fpsb_idx) prior to this check
10949 * (bnx2x_has_rx_work) so that we won't write the "newer"
10950 * value of the status block to IGU (if there was a DMA right
10951 * after bnx2x_has_rx_work and if there is no rmb, the memory
10952 * reading (bnx2x_update_fpsb_idx) may be postponed to right
10953 * before bnx2x_ack_sb). In this case there will never be
10954 * another interrupt until there is another update of the
10955 * status block, while there is still unhandled work.
10956 */
10957 rmb();
10750 10958
10751 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 10959 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
10752 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1); 10960 napi_complete(napi);
10753 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 10961 /* Re-enable interrupts */
10754 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1); 10962 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10963 le16_to_cpu(fp->fp_c_idx),
10964 IGU_INT_NOP, 1);
10965 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10966 le16_to_cpu(fp->fp_u_idx),
10967 IGU_INT_ENABLE, 1);
10968 break;
10969 }
10970 }
10755 } 10971 }
10756 10972
10757poll_again:
10758 return work_done; 10973 return work_done;
10759} 10974}
10760 10975
@@ -10843,10 +11058,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10843 } 11058 }
10844 11059
10845 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 11060 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10846 rc |= XMIT_GSO_V4; 11061 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
10847 11062
10848 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 11063 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10849 rc |= XMIT_GSO_V6; 11064 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
10850 11065
10851 return rc; 11066 return rc;
10852} 11067}
@@ -10939,7 +11154,7 @@ exit_lbl:
10939static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) 11154static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10940{ 11155{
10941 struct bnx2x *bp = netdev_priv(dev); 11156 struct bnx2x *bp = netdev_priv(dev);
10942 struct bnx2x_fastpath *fp, *fp_stat; 11157 struct bnx2x_fastpath *fp;
10943 struct netdev_queue *txq; 11158 struct netdev_queue *txq;
10944 struct sw_tx_bd *tx_buf; 11159 struct sw_tx_bd *tx_buf;
10945 struct eth_tx_start_bd *tx_start_bd; 11160 struct eth_tx_start_bd *tx_start_bd;
@@ -10961,11 +11176,10 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10961 fp_index = skb_get_queue_mapping(skb); 11176 fp_index = skb_get_queue_mapping(skb);
10962 txq = netdev_get_tx_queue(dev, fp_index); 11177 txq = netdev_get_tx_queue(dev, fp_index);
10963 11178
10964 fp = &bp->fp[fp_index + bp->num_rx_queues]; 11179 fp = &bp->fp[fp_index];
10965 fp_stat = &bp->fp[fp_index];
10966 11180
10967 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) { 11181 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10968 fp_stat->eth_q_stats.driver_xoff++; 11182 fp->eth_q_stats.driver_xoff++;
10969 netif_tx_stop_queue(txq); 11183 netif_tx_stop_queue(txq);
10970 BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); 11184 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10971 return NETDEV_TX_BUSY; 11185 return NETDEV_TX_BUSY;
@@ -11191,7 +11405,7 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11191 11405
11192 fp->tx_db.data.prod += nbd; 11406 fp->tx_db.data.prod += nbd;
11193 barrier(); 11407 barrier();
11194 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw); 11408 DOORBELL(bp, fp->index, fp->tx_db.raw);
11195 11409
11196 mmiowb(); 11410 mmiowb();
11197 11411
@@ -11202,11 +11416,11 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11202 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod 11416 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11203 if we put Tx into XOFF state. */ 11417 if we put Tx into XOFF state. */
11204 smp_mb(); 11418 smp_mb();
11205 fp_stat->eth_q_stats.driver_xoff++; 11419 fp->eth_q_stats.driver_xoff++;
11206 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 11420 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
11207 netif_tx_wake_queue(txq); 11421 netif_tx_wake_queue(txq);
11208 } 11422 }
11209 fp_stat->tx_pkt++; 11423 fp->tx_pkt++;
11210 11424
11211 return NETDEV_TX_OK; 11425 return NETDEV_TX_OK;
11212} 11426}
@@ -11321,6 +11535,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11321 config->hdr.client_id = bp->fp->cl_id; 11535 config->hdr.client_id = bp->fp->cl_id;
11322 config->hdr.reserved1 = 0; 11536 config->hdr.reserved1 = 0;
11323 11537
11538 bp->set_mac_pending++;
11539 smp_wmb();
11540
11324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 11541 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11325 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 11542 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11326 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 11543 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
@@ -11370,9 +11587,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 11587 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11371 if (netif_running(dev)) { 11588 if (netif_running(dev)) {
11372 if (CHIP_IS_E1(bp)) 11589 if (CHIP_IS_E1(bp))
11373 bnx2x_set_mac_addr_e1(bp, 1); 11590 bnx2x_set_eth_mac_addr_e1(bp, 1);
11374 else 11591 else
11375 bnx2x_set_mac_addr_e1h(bp, 1); 11592 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11376 } 11593 }
11377 11594
11378 return 0; 11595 return 0;
@@ -11830,21 +12047,14 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11830 12047
11831static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 12048static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11832{ 12049{
11833 char fw_file_name[40] = {0}; 12050 const char *fw_file_name;
11834 struct bnx2x_fw_file_hdr *fw_hdr; 12051 struct bnx2x_fw_file_hdr *fw_hdr;
11835 int rc, offset; 12052 int rc;
11836 12053
11837 /* Create a FW file name */
11838 if (CHIP_IS_E1(bp)) 12054 if (CHIP_IS_E1(bp))
11839 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1); 12055 fw_file_name = FW_FILE_NAME_E1;
11840 else 12056 else
11841 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H); 12057 fw_file_name = FW_FILE_NAME_E1H;
11842
11843 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11844 BCM_5710_FW_MAJOR_VERSION,
11845 BCM_5710_FW_MINOR_VERSION,
11846 BCM_5710_FW_REVISION_VERSION,
11847 BCM_5710_FW_ENGINEERING_VERSION);
11848 12058
11849 printk(KERN_INFO PFX "Loading %s\n", fw_file_name); 12059 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11850 12060
@@ -12098,9 +12308,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12098 12308
12099 /* Free SKBs, SGEs, TPA pool and driver internals */ 12309 /* Free SKBs, SGEs, TPA pool and driver internals */
12100 bnx2x_free_skbs(bp); 12310 bnx2x_free_skbs(bp);
12101 for_each_rx_queue(bp, i) 12311 for_each_queue(bp, i)
12102 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 12312 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12103 for_each_rx_queue(bp, i) 12313 for_each_queue(bp, i)
12104 netif_napi_del(&bnx2x_fp(bp, i, napi)); 12314 netif_napi_del(&bnx2x_fp(bp, i, napi));
12105 bnx2x_free_mem(bp); 12315 bnx2x_free_mem(bp);
12106 12316
@@ -12276,4 +12486,287 @@ static void __exit bnx2x_cleanup(void)
12276module_init(bnx2x_init); 12486module_init(bnx2x_init);
12277module_exit(bnx2x_cleanup); 12487module_exit(bnx2x_cleanup);
12278 12488
12489#ifdef BCM_CNIC
12490
12491/* count denotes the number of new completions we have seen */
12492static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12493{
12494 struct eth_spe *spe;
12495
12496#ifdef BNX2X_STOP_ON_ERROR
12497 if (unlikely(bp->panic))
12498 return;
12499#endif
12500
12501 spin_lock_bh(&bp->spq_lock);
12502 bp->cnic_spq_pending -= count;
12503
12504 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12505 bp->cnic_spq_pending++) {
12506
12507 if (!bp->cnic_kwq_pending)
12508 break;
12509
12510 spe = bnx2x_sp_get_next(bp);
12511 *spe = *bp->cnic_kwq_cons;
12512
12513 bp->cnic_kwq_pending--;
12514
12515 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12516 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12517
12518 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12519 bp->cnic_kwq_cons = bp->cnic_kwq;
12520 else
12521 bp->cnic_kwq_cons++;
12522 }
12523 bnx2x_sp_prod_update(bp);
12524 spin_unlock_bh(&bp->spq_lock);
12525}
12526
12527static int bnx2x_cnic_sp_queue(struct net_device *dev,
12528 struct kwqe_16 *kwqes[], u32 count)
12529{
12530 struct bnx2x *bp = netdev_priv(dev);
12531 int i;
12532
12533#ifdef BNX2X_STOP_ON_ERROR
12534 if (unlikely(bp->panic))
12535 return -EIO;
12536#endif
12537
12538 spin_lock_bh(&bp->spq_lock);
12539
12540 for (i = 0; i < count; i++) {
12541 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12542
12543 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12544 break;
12545
12546 *bp->cnic_kwq_prod = *spe;
12547
12548 bp->cnic_kwq_pending++;
12549
12550 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12551 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12552 spe->data.mac_config_addr.hi,
12553 spe->data.mac_config_addr.lo,
12554 bp->cnic_kwq_pending);
12555
12556 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12557 bp->cnic_kwq_prod = bp->cnic_kwq;
12558 else
12559 bp->cnic_kwq_prod++;
12560 }
12561
12562 spin_unlock_bh(&bp->spq_lock);
12563
12564 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12565 bnx2x_cnic_sp_post(bp, 0);
12566
12567 return i;
12568}
12569
12570static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12571{
12572 struct cnic_ops *c_ops;
12573 int rc = 0;
12574
12575 mutex_lock(&bp->cnic_mutex);
12576 c_ops = bp->cnic_ops;
12577 if (c_ops)
12578 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12579 mutex_unlock(&bp->cnic_mutex);
12580
12581 return rc;
12582}
12583
12584static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12585{
12586 struct cnic_ops *c_ops;
12587 int rc = 0;
12588
12589 rcu_read_lock();
12590 c_ops = rcu_dereference(bp->cnic_ops);
12591 if (c_ops)
12592 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12593 rcu_read_unlock();
12594
12595 return rc;
12596}
12597
12598/*
12599 * for commands that have no data
12600 */
12601static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12602{
12603 struct cnic_ctl_info ctl = {0};
12604
12605 ctl.cmd = cmd;
12606
12607 return bnx2x_cnic_ctl_send(bp, &ctl);
12608}
12609
12610static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12611{
12612 struct cnic_ctl_info ctl;
12613
12614 /* first we tell CNIC and only then we count this as a completion */
12615 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12616 ctl.data.comp.cid = cid;
12617
12618 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12619 bnx2x_cnic_sp_post(bp, 1);
12620}
12621
12622static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12623{
12624 struct bnx2x *bp = netdev_priv(dev);
12625 int rc = 0;
12626
12627 switch (ctl->cmd) {
12628 case DRV_CTL_CTXTBL_WR_CMD: {
12629 u32 index = ctl->data.io.offset;
12630 dma_addr_t addr = ctl->data.io.dma_addr;
12631
12632 bnx2x_ilt_wr(bp, index, addr);
12633 break;
12634 }
12635
12636 case DRV_CTL_COMPLETION_CMD: {
12637 int count = ctl->data.comp.comp_count;
12638
12639 bnx2x_cnic_sp_post(bp, count);
12640 break;
12641 }
12642
12643 /* rtnl_lock is held. */
12644 case DRV_CTL_START_L2_CMD: {
12645 u32 cli = ctl->data.ring.client_id;
12646
12647 bp->rx_mode_cl_mask |= (1 << cli);
12648 bnx2x_set_storm_rx_mode(bp);
12649 break;
12650 }
12651
12652 /* rtnl_lock is held. */
12653 case DRV_CTL_STOP_L2_CMD: {
12654 u32 cli = ctl->data.ring.client_id;
12655
12656 bp->rx_mode_cl_mask &= ~(1 << cli);
12657 bnx2x_set_storm_rx_mode(bp);
12658 break;
12659 }
12660
12661 default:
12662 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12663 rc = -EINVAL;
12664 }
12665
12666 return rc;
12667}
12668
12669static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12670{
12671 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12672
12673 if (bp->flags & USING_MSIX_FLAG) {
12674 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12675 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12676 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12677 } else {
12678 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12679 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12680 }
12681 cp->irq_arr[0].status_blk = bp->cnic_sb;
12682 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12683 cp->irq_arr[1].status_blk = bp->def_status_blk;
12684 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12685
12686 cp->num_irq = 2;
12687}
12688
12689static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12690 void *data)
12691{
12692 struct bnx2x *bp = netdev_priv(dev);
12693 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12694
12695 if (ops == NULL)
12696 return -EINVAL;
12697
12698 if (atomic_read(&bp->intr_sem) != 0)
12699 return -EBUSY;
12700
12701 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12702 if (!bp->cnic_kwq)
12703 return -ENOMEM;
12704
12705 bp->cnic_kwq_cons = bp->cnic_kwq;
12706 bp->cnic_kwq_prod = bp->cnic_kwq;
12707 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12708
12709 bp->cnic_spq_pending = 0;
12710 bp->cnic_kwq_pending = 0;
12711
12712 bp->cnic_data = data;
12713
12714 cp->num_irq = 0;
12715 cp->drv_state = CNIC_DRV_STATE_REGD;
12716
12717 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12718
12719 bnx2x_setup_cnic_irq_info(bp);
12720 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12721 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12722 rcu_assign_pointer(bp->cnic_ops, ops);
12723
12724 return 0;
12725}
12726
12727static int bnx2x_unregister_cnic(struct net_device *dev)
12728{
12729 struct bnx2x *bp = netdev_priv(dev);
12730 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12731
12732 mutex_lock(&bp->cnic_mutex);
12733 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12734 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12735 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12736 }
12737 cp->drv_state = 0;
12738 rcu_assign_pointer(bp->cnic_ops, NULL);
12739 mutex_unlock(&bp->cnic_mutex);
12740 synchronize_rcu();
12741 kfree(bp->cnic_kwq);
12742 bp->cnic_kwq = NULL;
12743
12744 return 0;
12745}
12746
12747struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12748{
12749 struct bnx2x *bp = netdev_priv(dev);
12750 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12751
12752 cp->drv_owner = THIS_MODULE;
12753 cp->chip_id = CHIP_ID(bp);
12754 cp->pdev = bp->pdev;
12755 cp->io_base = bp->regview;
12756 cp->io_base2 = bp->doorbells;
12757 cp->max_kwqe_pending = 8;
12758 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12759 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12760 cp->ctx_tbl_len = CNIC_ILT_LINES;
12761 cp->starting_cid = BCM_CNIC_CID_START;
12762 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12763 cp->drv_ctl = bnx2x_drv_ctl;
12764 cp->drv_register_cnic = bnx2x_register_cnic;
12765 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12766
12767 return cp;
12768}
12769EXPORT_SYMBOL(bnx2x_cnic_probe);
12770
12771#endif /* BCM_CNIC */
12279 12772
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index aa76cbada5e2..b668173ffcb4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -4772,18 +4772,28 @@
4772#define PCI_ID_VAL2 0x438 4772#define PCI_ID_VAL2 0x438
4773 4773
4774 4774
4775#define MDIO_REG_BANK_CL73_IEEEB0 0x0 4775#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4776#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 4776#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
4777#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 4777#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
4778#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000 4778#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
4779#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000 4779#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
4780 4780
4781#define MDIO_REG_BANK_CL73_IEEEB1 0x10 4781#define MDIO_REG_BANK_CL73_IEEEB1 0x10
4782#define MDIO_CL73_IEEEB1_AN_ADV2 0x01 4782#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
4783#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
4784#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
4785#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
4786#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
4787#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
4783#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000 4788#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
4784#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020 4789#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
4785#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040 4790#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
4786#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080 4791#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
4792#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
4793#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
4794#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
4795#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
4796#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
4787 4797
4788#define MDIO_REG_BANK_RX0 0x80b0 4798#define MDIO_REG_BANK_RX0 0x80b0
4789#define MDIO_RX0_RX_STATUS 0x10 4799#define MDIO_RX0_RX_STATUS 0x10
@@ -4910,6 +4920,8 @@
4910 4920
4911 4921
4912#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 4922#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
4923#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
4924#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
4913#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11 4925#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
4914#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1 4926#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
4915#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13 4927#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
@@ -4934,6 +4946,8 @@
4934#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010 4946#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
4935#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008 4947#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
4936#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000 4948#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
4949#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
4950#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
4937#define MDIO_SERDES_DIGITAL_MISC1 0x18 4951#define MDIO_SERDES_DIGITAL_MISC1 0x18
4938#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000 4952#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
4939#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000 4953#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
@@ -5115,6 +5129,7 @@ Theotherbitsarereservedandshouldbezero*/
5115#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c 5129#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5116#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f 5130#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5117#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 5131#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5132#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5118#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 5133#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5119#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b 5134#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5120 5135
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c3fa31c9f2a7..88c3fe80b355 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -446,6 +446,48 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
446///////////////////////////////////////////////////////////////////////////////// 446/////////////////////////////////////////////////////////////////////////////////
447 447
448/** 448/**
449 * __choose_matched - update a port's matched variable from a received lacpdu
450 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at
452 *
453 * Update the value of the matched variable, using parameter values from a
454 * newly received lacpdu. Parameter values for the partner carried in the
455 * received PDU are compared with the corresponding operational parameter
456 * values for the actor. Matched is set to TRUE if all of these parameters
457 * match and the PDU parameter partner_state.aggregation has the same value as
458 * actor_oper_port_state.aggregation and lacp will actively maintain the link
459 * in the aggregation. Matched is also set to TRUE if the value of
460 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
461 * an individual link and lacp will actively maintain the link. Otherwise,
462 * matched is set to FALSE. LACP is considered to be actively maintaining the
463 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
464 * the actor's actor_oper_port_state.lacp_activity and the PDU's
465 * partner_state.lacp_activity variables are TRUE.
466 *
467 * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is
468 * used here to implement the language from 802.3ad 43.4.9 that requires
469 * recordPDU to "match" the LACPDU parameters to the stored values.
470 */
471static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
472{
473 // check if all parameters are alike
474 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
475 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
476 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
477 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
478 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
479 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
480 // or this is individual link(aggregation == FALSE)
481 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
482 ) {
483 // update the state machine Matched variable
484 port->sm_vars |= AD_PORT_MATCHED;
485 } else {
486 port->sm_vars &= ~AD_PORT_MATCHED;
487 }
488}
489
490/**
449 * __record_pdu - record parameters from a received lacpdu 491 * __record_pdu - record parameters from a received lacpdu
450 * @lacpdu: the lacpdu we've received 492 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at 493 * @port: the port we're looking at
@@ -459,6 +501,7 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
459 if (lacpdu && port) { 501 if (lacpdu && port) {
460 struct port_params *partner = &port->partner_oper; 502 struct port_params *partner = &port->partner_oper;
461 503
504 __choose_matched(lacpdu, port);
462 // record the new parameter values for the partner operational 505 // record the new parameter values for the partner operational
463 partner->port_number = ntohs(lacpdu->actor_port); 506 partner->port_number = ntohs(lacpdu->actor_port);
464 partner->port_priority = ntohs(lacpdu->actor_port_priority); 507 partner->port_priority = ntohs(lacpdu->actor_port_priority);
@@ -563,47 +606,6 @@ static void __update_default_selected(struct port *port)
563} 606}
564 607
565/** 608/**
566 * __choose_matched - update a port's matched variable from a received lacpdu
567 * @lacpdu: the lacpdu we've received
568 * @port: the port we're looking at
569 *
570 * Update the value of the matched variable, using parameter values from a
571 * newly received lacpdu. Parameter values for the partner carried in the
572 * received PDU are compared with the corresponding operational parameter
573 * values for the actor. Matched is set to TRUE if all of these parameters
574 * match and the PDU parameter partner_state.aggregation has the same value as
575 * actor_oper_port_state.aggregation and lacp will actively maintain the link
576 * in the aggregation. Matched is also set to TRUE if the value of
577 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
578 * an individual link and lacp will actively maintain the link. Otherwise,
579 * matched is set to FALSE. LACP is considered to be actively maintaining the
580 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
581 * the actor's actor_oper_port_state.lacp_activity and the PDU's
582 * partner_state.lacp_activity variables are TRUE.
583 */
584static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
585{
586 // validate lacpdu and port
587 if (lacpdu && port) {
588 // check if all parameters are alike
589 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
590 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
591 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
592 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
593 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
594 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
595 // or this is individual link(aggregation == FALSE)
596 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
597 ) {
598 // update the state machine Matched variable
599 port->sm_vars |= AD_PORT_MATCHED;
600 } else {
601 port->sm_vars &= ~AD_PORT_MATCHED;
602 }
603 }
604}
605
606/**
607 * __update_ntt - update a port's ntt variable from a received lacpdu 609 * __update_ntt - update a port's ntt variable from a received lacpdu
608 * @lacpdu: the lacpdu we've received 610 * @lacpdu: the lacpdu we've received
609 * @port: the port we're looking at 611 * @port: the port we're looking at
@@ -1134,7 +1136,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1134 __update_selected(lacpdu, port); 1136 __update_selected(lacpdu, port);
1135 __update_ntt(lacpdu, port); 1137 __update_ntt(lacpdu, port);
1136 __record_pdu(lacpdu, port); 1138 __record_pdu(lacpdu, port);
1137 __choose_matched(lacpdu, port);
1138 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); 1139 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
1139 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1140 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
1140 // verify that if the aggregator is enabled, the port is enabled too. 1141 // verify that if the aggregator is enabled, the port is enabled too.
@@ -1956,7 +1957,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1956 struct port *port, *prev_port, *temp_port; 1957 struct port *port, *prev_port, *temp_port;
1957 struct aggregator *aggregator, *new_aggregator, *temp_aggregator; 1958 struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
1958 int select_new_active_agg = 0; 1959 int select_new_active_agg = 0;
1959 1960
1960 // find the aggregator related to this slave 1961 // find the aggregator related to this slave
1961 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1962 aggregator = &(SLAVE_AD_INFO(slave).aggregator);
1962 1963
@@ -2024,7 +2025,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2024 2025
2025 // clear the aggregator 2026 // clear the aggregator
2026 ad_clear_agg(aggregator); 2027 ad_clear_agg(aggregator);
2027 2028
2028 if (select_new_active_agg) { 2029 if (select_new_active_agg) {
2029 ad_agg_selection_logic(__get_first_agg(port)); 2030 ad_agg_selection_logic(__get_first_agg(port));
2030 } 2031 }
@@ -2075,7 +2076,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2075 } 2076 }
2076 } 2077 }
2077 } 2078 }
2078 port->slave=NULL; 2079 port->slave=NULL;
2079} 2080}
2080 2081
2081/** 2082/**
@@ -2301,7 +2302,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2301} 2302}
2302 2303
2303/* 2304/*
2304 * set link state for bonding master: if we have an active 2305 * set link state for bonding master: if we have an active
2305 * aggregator, we're up, if not, we're down. Presumes that we cannot 2306 * aggregator, we're up, if not, we're down. Presumes that we cannot
2306 * have an active aggregator if there are no slaves with link up. 2307 * have an active aggregator if there are no slaves with link up.
2307 * 2308 *
@@ -2395,7 +2396,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2395 goto out; 2396 goto out;
2396 } 2397 }
2397 2398
2398 slave_agg_no = bond->xmit_hash_policy(skb, dev, slaves_in_agg); 2399 slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
2399 2400
2400 bond_for_each_slave(bond, slave, i) { 2401 bond_for_each_slave(bond, slave, i) {
2401 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; 2402 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
@@ -2445,9 +2446,6 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2445 struct slave *slave = NULL; 2446 struct slave *slave = NULL;
2446 int ret = NET_RX_DROP; 2447 int ret = NET_RX_DROP;
2447 2448
2448 if (dev_net(dev) != &init_net)
2449 goto out;
2450
2451 if (!(dev->flags & IFF_MASTER)) 2449 if (!(dev->flags & IFF_MASTER))
2452 goto out; 2450 goto out;
2453 2451
@@ -2468,4 +2466,3 @@ out:
2468 2466
2469 return ret; 2467 return ret;
2470} 2468}
2471
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9b5936f072dc..0d30d1e5e53f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -355,9 +355,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
355 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 355 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
356 int res = NET_RX_DROP; 356 int res = NET_RX_DROP;
357 357
358 if (dev_net(bond_dev) != &init_net)
359 goto out;
360
361 while (bond_dev->priv_flags & IFF_802_1Q_VLAN) 358 while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
362 bond_dev = vlan_dev_real_dev(bond_dev); 359 bond_dev = vlan_dev_real_dev(bond_dev);
363 360
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 83921abae12d..b72e1dc8cf8f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -25,6 +25,7 @@
25#include <net/ipv6.h> 25#include <net/ipv6.h>
26#include <net/ndisc.h> 26#include <net/ndisc.h>
27#include <net/addrconf.h> 27#include <net/addrconf.h>
28#include <net/netns/generic.h>
28#include "bonding.h" 29#include "bonding.h"
29 30
30/* 31/*
@@ -152,11 +153,9 @@ static int bond_inet6addr_event(struct notifier_block *this,
152 struct net_device *vlan_dev, *event_dev = ifa->idev->dev; 153 struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
153 struct bonding *bond; 154 struct bonding *bond;
154 struct vlan_entry *vlan; 155 struct vlan_entry *vlan;
156 struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
155 157
156 if (dev_net(event_dev) != &init_net) 158 list_for_each_entry(bond, &bn->dev_list, bond_list) {
157 return NOTIFY_DONE;
158
159 list_for_each_entry(bond, &bond_dev_list, bond_list) {
160 if (bond->dev == event_dev) { 159 if (bond->dev == event_dev) {
161 switch (event) { 160 switch (event) {
162 case NETDEV_UP: 161 case NETDEV_UP:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 40fb5eefc72e..726bd755338f 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -75,6 +75,7 @@
75#include <linux/jiffies.h> 75#include <linux/jiffies.h>
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h>
78#include "bonding.h" 79#include "bonding.h"
79#include "bond_3ad.h" 80#include "bond_3ad.h"
80#include "bond_alb.h" 81#include "bond_alb.h"
@@ -94,6 +95,7 @@ static int downdelay;
94static int use_carrier = 1; 95static int use_carrier = 1;
95static char *mode; 96static char *mode;
96static char *primary; 97static char *primary;
98static char *primary_reselect;
97static char *lacp_rate; 99static char *lacp_rate;
98static char *ad_select; 100static char *ad_select;
99static char *xmit_hash_policy; 101static char *xmit_hash_policy;
@@ -126,6 +128,14 @@ MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
126 "6 for balance-alb"); 128 "6 for balance-alb");
127module_param(primary, charp, 0); 129module_param(primary, charp, 0);
128MODULE_PARM_DESC(primary, "Primary network device to use"); 130MODULE_PARM_DESC(primary, "Primary network device to use");
131module_param(primary_reselect, charp, 0);
132MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
133 "once it comes up; "
134 "0 for always (default), "
135 "1 for only if speed of primary is "
136 "better, "
137 "2 for only on active slave "
138 "failure");
129module_param(lacp_rate, charp, 0); 139module_param(lacp_rate, charp, 0);
130MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " 140MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
131 "(slow/fast)"); 141 "(slow/fast)");
@@ -148,11 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the
148static const char * const version = 158static const char * const version =
149 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
150 160
151LIST_HEAD(bond_dev_list); 161int bond_net_id __read_mostly;
152
153#ifdef CONFIG_PROC_FS
154static struct proc_dir_entry *bond_proc_dir;
155#endif
156 162
157static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 163static __be32 arp_target[BOND_MAX_ARP_TARGETS];
158static int arp_ip_count; 164static int arp_ip_count;
@@ -200,6 +206,13 @@ const struct bond_parm_tbl fail_over_mac_tbl[] = {
200{ NULL, -1}, 206{ NULL, -1},
201}; 207};
202 208
209const struct bond_parm_tbl pri_reselect_tbl[] = {
210{ "always", BOND_PRI_RESELECT_ALWAYS},
211{ "better", BOND_PRI_RESELECT_BETTER},
212{ "failure", BOND_PRI_RESELECT_FAILURE},
213{ NULL, -1},
214};
215
203struct bond_parm_tbl ad_select_tbl[] = { 216struct bond_parm_tbl ad_select_tbl[] = {
204{ "stable", BOND_AD_STABLE}, 217{ "stable", BOND_AD_STABLE},
205{ "bandwidth", BOND_AD_BANDWIDTH}, 218{ "bandwidth", BOND_AD_BANDWIDTH},
@@ -211,7 +224,7 @@ struct bond_parm_tbl ad_select_tbl[] = {
211 224
212static void bond_send_gratuitous_arp(struct bonding *bond); 225static void bond_send_gratuitous_arp(struct bonding *bond);
213static int bond_init(struct net_device *bond_dev); 226static int bond_init(struct net_device *bond_dev);
214static void bond_deinit(struct net_device *bond_dev); 227static void bond_uninit(struct net_device *bond_dev);
215 228
216/*---------------------------- General routines -----------------------------*/ 229/*---------------------------- General routines -----------------------------*/
217 230
@@ -1070,6 +1083,25 @@ out:
1070 1083
1071} 1084}
1072 1085
1086static bool bond_should_change_active(struct bonding *bond)
1087{
1088 struct slave *prim = bond->primary_slave;
1089 struct slave *curr = bond->curr_active_slave;
1090
1091 if (!prim || !curr || curr->link != BOND_LINK_UP)
1092 return true;
1093 if (bond->force_primary) {
1094 bond->force_primary = false;
1095 return true;
1096 }
1097 if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
1098 (prim->speed < curr->speed ||
1099 (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
1100 return false;
1101 if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
1102 return false;
1103 return true;
1104}
1073 1105
1074/** 1106/**
1075 * find_best_interface - select the best available slave to be the active one 1107 * find_best_interface - select the best available slave to be the active one
@@ -1084,7 +1116,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1084 int mintime = bond->params.updelay; 1116 int mintime = bond->params.updelay;
1085 int i; 1117 int i;
1086 1118
1087 new_active = old_active = bond->curr_active_slave; 1119 new_active = bond->curr_active_slave;
1088 1120
1089 if (!new_active) { /* there were no active slaves left */ 1121 if (!new_active) { /* there were no active slaves left */
1090 if (bond->slave_cnt > 0) /* found one slave */ 1122 if (bond->slave_cnt > 0) /* found one slave */
@@ -1094,7 +1126,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1094 } 1126 }
1095 1127
1096 if ((bond->primary_slave) && 1128 if ((bond->primary_slave) &&
1097 bond->primary_slave->link == BOND_LINK_UP) { 1129 bond->primary_slave->link == BOND_LINK_UP &&
1130 bond_should_change_active(bond)) {
1098 new_active = bond->primary_slave; 1131 new_active = bond->primary_slave;
1099 } 1132 }
1100 1133
@@ -1678,8 +1711,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1678 1711
1679 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1712 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1680 /* if there is a primary slave, remember it */ 1713 /* if there is a primary slave, remember it */
1681 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) 1714 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1682 bond->primary_slave = new_slave; 1715 bond->primary_slave = new_slave;
1716 bond->force_primary = true;
1717 }
1683 } 1718 }
1684 1719
1685 write_lock_bh(&bond->curr_slave_lock); 1720 write_lock_bh(&bond->curr_slave_lock);
@@ -1965,25 +2000,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1965} 2000}
1966 2001
1967/* 2002/*
1968* Destroy a bonding device.
1969* Must be under rtnl_lock when this function is called.
1970*/
1971static void bond_uninit(struct net_device *bond_dev)
1972{
1973 struct bonding *bond = netdev_priv(bond_dev);
1974
1975 bond_deinit(bond_dev);
1976 bond_destroy_sysfs_entry(bond);
1977
1978 if (bond->wq)
1979 destroy_workqueue(bond->wq);
1980
1981 netif_addr_lock_bh(bond_dev);
1982 bond_mc_list_destroy(bond);
1983 netif_addr_unlock_bh(bond_dev);
1984}
1985
1986/*
1987* First release a slave and than destroy the bond if no more slaves are left. 2003* First release a slave and than destroy the bond if no more slaves are left.
1988* Must be under rtnl_lock when this function is called. 2004* Must be under rtnl_lock when this function is called.
1989*/ 2005*/
@@ -2567,7 +2583,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2567 fl.fl4_dst = targets[i]; 2583 fl.fl4_dst = targets[i];
2568 fl.fl4_tos = RTO_ONLINK; 2584 fl.fl4_tos = RTO_ONLINK;
2569 2585
2570 rv = ip_route_output_key(&init_net, &rt, &fl); 2586 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
2571 if (rv) { 2587 if (rv) {
2572 if (net_ratelimit()) { 2588 if (net_ratelimit()) {
2573 pr_warning(DRV_NAME 2589 pr_warning(DRV_NAME
@@ -2675,9 +2691,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2675 unsigned char *arp_ptr; 2691 unsigned char *arp_ptr;
2676 __be32 sip, tip; 2692 __be32 sip, tip;
2677 2693
2678 if (dev_net(dev) != &init_net)
2679 goto out;
2680
2681 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2694 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2682 goto out; 2695 goto out;
2683 2696
@@ -3201,11 +3214,14 @@ static void bond_info_show_master(struct seq_file *seq)
3201 } 3214 }
3202 3215
3203 if (USES_PRIMARY(bond->params.mode)) { 3216 if (USES_PRIMARY(bond->params.mode)) {
3204 seq_printf(seq, "Primary Slave: %s\n", 3217 seq_printf(seq, "Primary Slave: %s",
3205 (bond->primary_slave) ? 3218 (bond->primary_slave) ?
3206 bond->primary_slave->dev->name : "None"); 3219 bond->primary_slave->dev->name : "None");
3220 if (bond->primary_slave)
3221 seq_printf(seq, " (primary_reselect %s)",
3222 pri_reselect_tbl[bond->params.primary_reselect].modename);
3207 3223
3208 seq_printf(seq, "Currently Active Slave: %s\n", 3224 seq_printf(seq, "\nCurrently Active Slave: %s\n",
3209 (curr) ? curr->dev->name : "None"); 3225 (curr) ? curr->dev->name : "None");
3210 } 3226 }
3211 3227
@@ -3334,13 +3350,14 @@ static const struct file_operations bond_info_fops = {
3334 .release = seq_release, 3350 .release = seq_release,
3335}; 3351};
3336 3352
3337static int bond_create_proc_entry(struct bonding *bond) 3353static void bond_create_proc_entry(struct bonding *bond)
3338{ 3354{
3339 struct net_device *bond_dev = bond->dev; 3355 struct net_device *bond_dev = bond->dev;
3356 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3340 3357
3341 if (bond_proc_dir) { 3358 if (bn->proc_dir) {
3342 bond->proc_entry = proc_create_data(bond_dev->name, 3359 bond->proc_entry = proc_create_data(bond_dev->name,
3343 S_IRUGO, bond_proc_dir, 3360 S_IRUGO, bn->proc_dir,
3344 &bond_info_fops, bond); 3361 &bond_info_fops, bond);
3345 if (bond->proc_entry == NULL) 3362 if (bond->proc_entry == NULL)
3346 pr_warning(DRV_NAME 3363 pr_warning(DRV_NAME
@@ -3349,14 +3366,15 @@ static int bond_create_proc_entry(struct bonding *bond)
3349 else 3366 else
3350 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); 3367 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
3351 } 3368 }
3352
3353 return 0;
3354} 3369}
3355 3370
3356static void bond_remove_proc_entry(struct bonding *bond) 3371static void bond_remove_proc_entry(struct bonding *bond)
3357{ 3372{
3358 if (bond_proc_dir && bond->proc_entry) { 3373 struct net_device *bond_dev = bond->dev;
3359 remove_proc_entry(bond->proc_file_name, bond_proc_dir); 3374 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3375
3376 if (bn->proc_dir && bond->proc_entry) {
3377 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
3360 memset(bond->proc_file_name, 0, IFNAMSIZ); 3378 memset(bond->proc_file_name, 0, IFNAMSIZ);
3361 bond->proc_entry = NULL; 3379 bond->proc_entry = NULL;
3362 } 3380 }
@@ -3365,11 +3383,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3365/* Create the bonding directory under /proc/net, if doesn't exist yet. 3383/* Create the bonding directory under /proc/net, if doesn't exist yet.
3366 * Caller must hold rtnl_lock. 3384 * Caller must hold rtnl_lock.
3367 */ 3385 */
3368static void bond_create_proc_dir(void) 3386static void bond_create_proc_dir(struct bond_net *bn)
3369{ 3387{
3370 if (!bond_proc_dir) { 3388 if (!bn->proc_dir) {
3371 bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net); 3389 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
3372 if (!bond_proc_dir) 3390 if (!bn->proc_dir)
3373 pr_warning(DRV_NAME 3391 pr_warning(DRV_NAME
3374 ": Warning: cannot create /proc/net/%s\n", 3392 ": Warning: cannot create /proc/net/%s\n",
3375 DRV_NAME); 3393 DRV_NAME);
@@ -3379,17 +3397,17 @@ static void bond_create_proc_dir(void)
3379/* Destroy the bonding directory under /proc/net, if empty. 3397/* Destroy the bonding directory under /proc/net, if empty.
3380 * Caller must hold rtnl_lock. 3398 * Caller must hold rtnl_lock.
3381 */ 3399 */
3382static void bond_destroy_proc_dir(void) 3400static void bond_destroy_proc_dir(struct bond_net *bn)
3383{ 3401{
3384 if (bond_proc_dir) { 3402 if (bn->proc_dir) {
3385 remove_proc_entry(DRV_NAME, init_net.proc_net); 3403 remove_proc_entry(DRV_NAME, bn->net->proc_net);
3386 bond_proc_dir = NULL; 3404 bn->proc_dir = NULL;
3387 } 3405 }
3388} 3406}
3389 3407
3390#else /* !CONFIG_PROC_FS */ 3408#else /* !CONFIG_PROC_FS */
3391 3409
3392static int bond_create_proc_entry(struct bonding *bond) 3410static void bond_create_proc_entry(struct bonding *bond)
3393{ 3411{
3394} 3412}
3395 3413
@@ -3397,11 +3415,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3397{ 3415{
3398} 3416}
3399 3417
3400static void bond_create_proc_dir(void) 3418static void bond_create_proc_dir(struct bond_net *bn)
3401{ 3419{
3402} 3420}
3403 3421
3404static void bond_destroy_proc_dir(void) 3422static void bond_destroy_proc_dir(struct bond_net *bn)
3405{ 3423{
3406} 3424}
3407 3425
@@ -3418,9 +3436,6 @@ static int bond_event_changename(struct bonding *bond)
3418 bond_remove_proc_entry(bond); 3436 bond_remove_proc_entry(bond);
3419 bond_create_proc_entry(bond); 3437 bond_create_proc_entry(bond);
3420 3438
3421 bond_destroy_sysfs_entry(bond);
3422 bond_create_sysfs_entry(bond);
3423
3424 return NOTIFY_DONE; 3439 return NOTIFY_DONE;
3425} 3440}
3426 3441
@@ -3432,9 +3447,6 @@ static int bond_master_netdev_event(unsigned long event,
3432 switch (event) { 3447 switch (event) {
3433 case NETDEV_CHANGENAME: 3448 case NETDEV_CHANGENAME:
3434 return bond_event_changename(event_bond); 3449 return bond_event_changename(event_bond);
3435 case NETDEV_UNREGISTER:
3436 bond_release_all(event_bond->dev);
3437 break;
3438 default: 3450 default:
3439 break; 3451 break;
3440 } 3452 }
@@ -3526,9 +3538,6 @@ static int bond_netdev_event(struct notifier_block *this,
3526{ 3538{
3527 struct net_device *event_dev = (struct net_device *)ptr; 3539 struct net_device *event_dev = (struct net_device *)ptr;
3528 3540
3529 if (dev_net(event_dev) != &init_net)
3530 return NOTIFY_DONE;
3531
3532 pr_debug("event_dev: %s, event: %lx\n", 3541 pr_debug("event_dev: %s, event: %lx\n",
3533 (event_dev ? event_dev->name : "None"), 3542 (event_dev ? event_dev->name : "None"),
3534 event); 3543 event);
@@ -3561,13 +3570,11 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3561{ 3570{
3562 struct in_ifaddr *ifa = ptr; 3571 struct in_ifaddr *ifa = ptr;
3563 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev; 3572 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
3573 struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
3564 struct bonding *bond; 3574 struct bonding *bond;
3565 struct vlan_entry *vlan; 3575 struct vlan_entry *vlan;
3566 3576
3567 if (dev_net(ifa->ifa_dev->dev) != &init_net) 3577 list_for_each_entry(bond, &bn->dev_list, bond_list) {
3568 return NOTIFY_DONE;
3569
3570 list_for_each_entry(bond, &bond_dev_list, bond_list) {
3571 if (bond->dev == event_dev) { 3578 if (bond->dev == event_dev) {
3572 switch (event) { 3579 switch (event) {
3573 case NETDEV_UP: 3580 case NETDEV_UP:
@@ -3657,8 +3664,7 @@ void bond_unregister_arp(struct bonding *bond)
3657 * Hash for the output device based upon layer 2 and layer 3 data. If 3664 * Hash for the output device based upon layer 2 and layer 3 data. If
3658 * the packet is not IP mimic bond_xmit_hash_policy_l2() 3665 * the packet is not IP mimic bond_xmit_hash_policy_l2()
3659 */ 3666 */
3660static int bond_xmit_hash_policy_l23(struct sk_buff *skb, 3667static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3661 struct net_device *bond_dev, int count)
3662{ 3668{
3663 struct ethhdr *data = (struct ethhdr *)skb->data; 3669 struct ethhdr *data = (struct ethhdr *)skb->data;
3664 struct iphdr *iph = ip_hdr(skb); 3670 struct iphdr *iph = ip_hdr(skb);
@@ -3676,8 +3682,7 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3676 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3682 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3677 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3683 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3678 */ 3684 */
3679static int bond_xmit_hash_policy_l34(struct sk_buff *skb, 3685static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3680 struct net_device *bond_dev, int count)
3681{ 3686{
3682 struct ethhdr *data = (struct ethhdr *)skb->data; 3687 struct ethhdr *data = (struct ethhdr *)skb->data;
3683 struct iphdr *iph = ip_hdr(skb); 3688 struct iphdr *iph = ip_hdr(skb);
@@ -3701,8 +3706,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3701/* 3706/*
3702 * Hash for the output device based upon layer 2 data 3707 * Hash for the output device based upon layer 2 data
3703 */ 3708 */
3704static int bond_xmit_hash_policy_l2(struct sk_buff *skb, 3709static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3705 struct net_device *bond_dev, int count)
3706{ 3710{
3707 struct ethhdr *data = (struct ethhdr *)skb->data; 3711 struct ethhdr *data = (struct ethhdr *)skb->data;
3708 3712
@@ -3939,7 +3943,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3939 if (!capable(CAP_NET_ADMIN)) 3943 if (!capable(CAP_NET_ADMIN))
3940 return -EPERM; 3944 return -EPERM;
3941 3945
3942 slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave); 3946 slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
3943 3947
3944 pr_debug("slave_dev=%p: \n", slave_dev); 3948 pr_debug("slave_dev=%p: \n", slave_dev);
3945 3949
@@ -4295,7 +4299,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4295 if (!BOND_IS_OK(bond)) 4299 if (!BOND_IS_OK(bond))
4296 goto out; 4300 goto out;
4297 4301
4298 slave_no = bond->xmit_hash_policy(skb, bond_dev, bond->slave_cnt); 4302 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
4299 4303
4300 bond_for_each_slave(bond, slave, i) { 4304 bond_for_each_slave(bond, slave, i) {
4301 slave_no--; 4305 slave_no--;
@@ -4576,37 +4580,29 @@ static void bond_work_cancel_all(struct bonding *bond)
4576 cancel_delayed_work(&bond->ad_work); 4580 cancel_delayed_work(&bond->ad_work);
4577} 4581}
4578 4582
4579/* De-initialize device specific data. 4583/*
4580 * Caller must hold rtnl_lock. 4584* Destroy a bonding device.
4581 */ 4585* Must be under rtnl_lock when this function is called.
4582static void bond_deinit(struct net_device *bond_dev) 4586*/
4587static void bond_uninit(struct net_device *bond_dev)
4583{ 4588{
4584 struct bonding *bond = netdev_priv(bond_dev); 4589 struct bonding *bond = netdev_priv(bond_dev);
4585 4590
4591 /* Release the bonded slaves */
4592 bond_release_all(bond_dev);
4593
4586 list_del(&bond->bond_list); 4594 list_del(&bond->bond_list);
4587 4595
4588 bond_work_cancel_all(bond); 4596 bond_work_cancel_all(bond);
4589 4597
4590 bond_remove_proc_entry(bond); 4598 bond_remove_proc_entry(bond);
4591}
4592
4593/* Unregister and free all bond devices.
4594 * Caller must hold rtnl_lock.
4595 */
4596static void bond_free_all(void)
4597{
4598 struct bonding *bond, *nxt;
4599
4600 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
4601 struct net_device *bond_dev = bond->dev;
4602 4599
4603 bond_work_cancel_all(bond); 4600 if (bond->wq)
4604 /* Release the bonded slaves */ 4601 destroy_workqueue(bond->wq);
4605 bond_release_all(bond_dev);
4606 unregister_netdevice(bond_dev);
4607 }
4608 4602
4609 bond_destroy_proc_dir(); 4603 netif_addr_lock_bh(bond_dev);
4604 bond_mc_list_destroy(bond);
4605 netif_addr_unlock_bh(bond_dev);
4610} 4606}
4611 4607
4612/*------------------------- Module initialization ---------------------------*/ 4608/*------------------------- Module initialization ---------------------------*/
@@ -4646,7 +4642,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4646 4642
4647static int bond_check_params(struct bond_params *params) 4643static int bond_check_params(struct bond_params *params)
4648{ 4644{
4649 int arp_validate_value, fail_over_mac_value; 4645 int arp_validate_value, fail_over_mac_value, primary_reselect_value;
4650 4646
4651 /* 4647 /*
4652 * Convert string parameters. 4648 * Convert string parameters.
@@ -4665,7 +4661,8 @@ static int bond_check_params(struct bond_params *params)
4665 if ((bond_mode != BOND_MODE_XOR) && 4661 if ((bond_mode != BOND_MODE_XOR) &&
4666 (bond_mode != BOND_MODE_8023AD)) { 4662 (bond_mode != BOND_MODE_8023AD)) {
4667 pr_info(DRV_NAME 4663 pr_info(DRV_NAME
4668 ": xor_mode param is irrelevant in mode %s\n", 4664 ": xmit_hash_policy param is irrelevant in"
4665 " mode %s\n",
4669 bond_mode_name(bond_mode)); 4666 bond_mode_name(bond_mode));
4670 } else { 4667 } else {
4671 xmit_hashtype = bond_parse_parm(xmit_hash_policy, 4668 xmit_hashtype = bond_parse_parm(xmit_hash_policy,
@@ -4945,6 +4942,20 @@ static int bond_check_params(struct bond_params *params)
4945 primary = NULL; 4942 primary = NULL;
4946 } 4943 }
4947 4944
4945 if (primary && primary_reselect) {
4946 primary_reselect_value = bond_parse_parm(primary_reselect,
4947 pri_reselect_tbl);
4948 if (primary_reselect_value == -1) {
4949 pr_err(DRV_NAME
4950 ": Error: Invalid primary_reselect \"%s\"\n",
4951 primary_reselect ==
4952 NULL ? "NULL" : primary_reselect);
4953 return -EINVAL;
4954 }
4955 } else {
4956 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
4957 }
4958
4948 if (fail_over_mac) { 4959 if (fail_over_mac) {
4949 fail_over_mac_value = bond_parse_parm(fail_over_mac, 4960 fail_over_mac_value = bond_parse_parm(fail_over_mac,
4950 fail_over_mac_tbl); 4961 fail_over_mac_tbl);
@@ -4976,6 +4987,7 @@ static int bond_check_params(struct bond_params *params)
4976 params->use_carrier = use_carrier; 4987 params->use_carrier = use_carrier;
4977 params->lacp_fast = lacp_fast; 4988 params->lacp_fast = lacp_fast;
4978 params->primary[0] = 0; 4989 params->primary[0] = 0;
4990 params->primary_reselect = primary_reselect_value;
4979 params->fail_over_mac = fail_over_mac_value; 4991 params->fail_over_mac = fail_over_mac_value;
4980 4992
4981 if (primary) { 4993 if (primary) {
@@ -5012,6 +5024,7 @@ static void bond_set_lockdep_class(struct net_device *dev)
5012static int bond_init(struct net_device *bond_dev) 5024static int bond_init(struct net_device *bond_dev)
5013{ 5025{
5014 struct bonding *bond = netdev_priv(bond_dev); 5026 struct bonding *bond = netdev_priv(bond_dev);
5027 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5015 5028
5016 pr_debug("Begin bond_init for %s\n", bond_dev->name); 5029 pr_debug("Begin bond_init for %s\n", bond_dev->name);
5017 5030
@@ -5024,30 +5037,41 @@ static int bond_init(struct net_device *bond_dev)
5024 netif_carrier_off(bond_dev); 5037 netif_carrier_off(bond_dev);
5025 5038
5026 bond_create_proc_entry(bond); 5039 bond_create_proc_entry(bond);
5027 list_add_tail(&bond->bond_list, &bond_dev_list); 5040 list_add_tail(&bond->bond_list, &bn->dev_list);
5028 5041
5042 bond_prepare_sysfs_group(bond);
5029 return 0; 5043 return 0;
5030} 5044}
5031 5045
5046static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
5047{
5048 if (tb[IFLA_ADDRESS]) {
5049 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
5050 return -EINVAL;
5051 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
5052 return -EADDRNOTAVAIL;
5053 }
5054 return 0;
5055}
5056
5057static struct rtnl_link_ops bond_link_ops __read_mostly = {
5058 .kind = "bond",
5059 .priv_size = sizeof(struct bonding),
5060 .setup = bond_setup,
5061 .validate = bond_validate,
5062};
5063
5032/* Create a new bond based on the specified name and bonding parameters. 5064/* Create a new bond based on the specified name and bonding parameters.
5033 * If name is NULL, obtain a suitable "bond%d" name for us. 5065 * If name is NULL, obtain a suitable "bond%d" name for us.
5034 * Caller must NOT hold rtnl_lock; we need to release it here before we 5066 * Caller must NOT hold rtnl_lock; we need to release it here before we
5035 * set up our sysfs entries. 5067 * set up our sysfs entries.
5036 */ 5068 */
5037int bond_create(const char *name) 5069int bond_create(struct net *net, const char *name)
5038{ 5070{
5039 struct net_device *bond_dev; 5071 struct net_device *bond_dev;
5040 int res; 5072 int res;
5041 5073
5042 rtnl_lock(); 5074 rtnl_lock();
5043 /* Check to see if the bond already exists. */
5044 /* FIXME: pass netns from caller */
5045 if (name && __dev_get_by_name(&init_net, name)) {
5046 pr_err(DRV_NAME ": cannot add bond %s; already exists\n",
5047 name);
5048 res = -EEXIST;
5049 goto out_rtnl;
5050 }
5051 5075
5052 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 5076 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
5053 bond_setup); 5077 bond_setup);
@@ -5055,9 +5079,12 @@ int bond_create(const char *name)
5055 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n", 5079 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
5056 name); 5080 name);
5057 res = -ENOMEM; 5081 res = -ENOMEM;
5058 goto out_rtnl; 5082 goto out;
5059 } 5083 }
5060 5084
5085 dev_net_set(bond_dev, net);
5086 bond_dev->rtnl_link_ops = &bond_link_ops;
5087
5061 if (!name) { 5088 if (!name) {
5062 res = dev_alloc_name(bond_dev, "bond%d"); 5089 res = dev_alloc_name(bond_dev, "bond%d");
5063 if (res < 0) 5090 if (res < 0)
@@ -5065,27 +5092,55 @@ int bond_create(const char *name)
5065 } 5092 }
5066 5093
5067 res = register_netdevice(bond_dev); 5094 res = register_netdevice(bond_dev);
5068 if (res < 0)
5069 goto out_bond;
5070
5071 res = bond_create_sysfs_entry(netdev_priv(bond_dev));
5072 if (res < 0)
5073 goto out_unreg;
5074 5095
5096out:
5075 rtnl_unlock(); 5097 rtnl_unlock();
5076 return 0; 5098 return res;
5077
5078out_unreg:
5079 unregister_netdevice(bond_dev);
5080out_bond:
5081 bond_deinit(bond_dev);
5082out_netdev: 5099out_netdev:
5083 free_netdev(bond_dev); 5100 free_netdev(bond_dev);
5084out_rtnl: 5101 goto out;
5085 rtnl_unlock();
5086 return res;
5087} 5102}
5088 5103
5104static int bond_net_init(struct net *net)
5105{
5106 struct bond_net *bn;
5107 int err;
5108
5109 err = -ENOMEM;
5110 bn = kzalloc(sizeof(struct bond_net), GFP_KERNEL);
5111 if (bn == NULL)
5112 goto out;
5113
5114 bn->net = net;
5115 INIT_LIST_HEAD(&bn->dev_list);
5116
5117 err = net_assign_generic(net, bond_net_id, bn);
5118 if (err)
5119 goto out_free;
5120
5121 bond_create_proc_dir(bn);
5122out:
5123 return err;
5124out_free:
5125 kfree(bn);
5126 goto out;
5127}
5128
5129static void bond_net_exit(struct net *net)
5130{
5131 struct bond_net *bn;
5132
5133 bn = net_generic(net, bond_net_id);
5134
5135 bond_destroy_proc_dir(bn);
5136 kfree(bn);
5137}
5138
5139static struct pernet_operations bond_net_ops = {
5140 .init = bond_net_init,
5141 .exit = bond_net_exit,
5142};
5143
5089static int __init bonding_init(void) 5144static int __init bonding_init(void)
5090{ 5145{
5091 int i; 5146 int i;
@@ -5097,10 +5152,16 @@ static int __init bonding_init(void)
5097 if (res) 5152 if (res)
5098 goto out; 5153 goto out;
5099 5154
5100 bond_create_proc_dir(); 5155 res = register_pernet_gen_subsys(&bond_net_id, &bond_net_ops);
5156 if (res)
5157 goto out;
5158
5159 res = rtnl_link_register(&bond_link_ops);
5160 if (res)
5161 goto err_link;
5101 5162
5102 for (i = 0; i < max_bonds; i++) { 5163 for (i = 0; i < max_bonds; i++) {
5103 res = bond_create(NULL); 5164 res = bond_create(&init_net, NULL);
5104 if (res) 5165 if (res)
5105 goto err; 5166 goto err;
5106 } 5167 }
@@ -5112,14 +5173,13 @@ static int __init bonding_init(void)
5112 register_netdevice_notifier(&bond_netdev_notifier); 5173 register_netdevice_notifier(&bond_netdev_notifier);
5113 register_inetaddr_notifier(&bond_inetaddr_notifier); 5174 register_inetaddr_notifier(&bond_inetaddr_notifier);
5114 bond_register_ipv6_notifier(); 5175 bond_register_ipv6_notifier();
5115
5116 goto out;
5117err:
5118 rtnl_lock();
5119 bond_free_all();
5120 rtnl_unlock();
5121out: 5176out:
5122 return res; 5177 return res;
5178err:
5179 rtnl_link_unregister(&bond_link_ops);
5180err_link:
5181 unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
5182 goto out;
5123 5183
5124} 5184}
5125 5185
@@ -5131,9 +5191,8 @@ static void __exit bonding_exit(void)
5131 5191
5132 bond_destroy_sysfs(); 5192 bond_destroy_sysfs();
5133 5193
5134 rtnl_lock(); 5194 rtnl_link_unregister(&bond_link_ops);
5135 bond_free_all(); 5195 unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
5136 rtnl_unlock();
5137} 5196}
5138 5197
5139module_init(bonding_init); 5198module_init(bonding_init);
@@ -5142,3 +5201,4 @@ MODULE_LICENSE("GPL");
5142MODULE_VERSION(DRV_VERSION); 5201MODULE_VERSION(DRV_VERSION);
5143MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION); 5202MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
5144MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 5203MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
5204MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 8762a27a2a18..4e00b4f83641 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -36,6 +36,8 @@
36#include <linux/rtnetlink.h> 36#include <linux/rtnetlink.h>
37#include <linux/etherdevice.h> 37#include <linux/etherdevice.h>
38#include <net/net_namespace.h> 38#include <net/net_namespace.h>
39#include <net/netns/generic.h>
40#include <linux/nsproxy.h>
39 41
40#include "bonding.h" 42#include "bonding.h"
41 43
@@ -48,12 +50,14 @@
48 */ 50 */
49static ssize_t bonding_show_bonds(struct class *cls, char *buf) 51static ssize_t bonding_show_bonds(struct class *cls, char *buf)
50{ 52{
53 struct net *net = current->nsproxy->net_ns;
54 struct bond_net *bn = net_generic(net, bond_net_id);
51 int res = 0; 55 int res = 0;
52 struct bonding *bond; 56 struct bonding *bond;
53 57
54 rtnl_lock(); 58 rtnl_lock();
55 59
56 list_for_each_entry(bond, &bond_dev_list, bond_list) { 60 list_for_each_entry(bond, &bn->dev_list, bond_list) {
57 if (res > (PAGE_SIZE - IFNAMSIZ)) { 61 if (res > (PAGE_SIZE - IFNAMSIZ)) {
58 /* not enough space for another interface name */ 62 /* not enough space for another interface name */
59 if ((PAGE_SIZE - res) > 10) 63 if ((PAGE_SIZE - res) > 10)
@@ -70,11 +74,12 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
70 return res; 74 return res;
71} 75}
72 76
73static struct net_device *bond_get_by_name(const char *ifname) 77static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
74{ 78{
79 struct bond_net *bn = net_generic(net, bond_net_id);
75 struct bonding *bond; 80 struct bonding *bond;
76 81
77 list_for_each_entry(bond, &bond_dev_list, bond_list) { 82 list_for_each_entry(bond, &bn->dev_list, bond_list) {
78 if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0) 83 if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
79 return bond->dev; 84 return bond->dev;
80 } 85 }
@@ -92,6 +97,7 @@ static struct net_device *bond_get_by_name(const char *ifname)
92static ssize_t bonding_store_bonds(struct class *cls, 97static ssize_t bonding_store_bonds(struct class *cls,
93 const char *buffer, size_t count) 98 const char *buffer, size_t count)
94{ 99{
100 struct net *net = current->nsproxy->net_ns;
95 char command[IFNAMSIZ + 1] = {0, }; 101 char command[IFNAMSIZ + 1] = {0, };
96 char *ifname; 102 char *ifname;
97 int rv, res = count; 103 int rv, res = count;
@@ -105,7 +111,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
105 if (command[0] == '+') { 111 if (command[0] == '+') {
106 pr_info(DRV_NAME 112 pr_info(DRV_NAME
107 ": %s is being created...\n", ifname); 113 ": %s is being created...\n", ifname);
108 rv = bond_create(ifname); 114 rv = bond_create(net, ifname);
109 if (rv) { 115 if (rv) {
110 pr_info(DRV_NAME ": Bond creation failed.\n"); 116 pr_info(DRV_NAME ": Bond creation failed.\n");
111 res = rv; 117 res = rv;
@@ -114,7 +120,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
114 struct net_device *bond_dev; 120 struct net_device *bond_dev;
115 121
116 rtnl_lock(); 122 rtnl_lock();
117 bond_dev = bond_get_by_name(ifname); 123 bond_dev = bond_get_by_name(net, ifname);
118 if (bond_dev) { 124 if (bond_dev) {
119 pr_info(DRV_NAME ": %s is being deleted...\n", 125 pr_info(DRV_NAME ": %s is being deleted...\n",
120 ifname); 126 ifname);
@@ -239,8 +245,7 @@ static ssize_t bonding_store_slaves(struct device *d,
239 /* Got a slave name in ifname. Is it already in the list? */ 245 /* Got a slave name in ifname. Is it already in the list? */
240 found = 0; 246 found = 0;
241 247
242 /* FIXME: get netns from sysfs object */ 248 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
243 dev = __dev_get_by_name(&init_net, ifname);
244 if (!dev) { 249 if (!dev) {
245 pr_info(DRV_NAME 250 pr_info(DRV_NAME
246 ": %s: Interface %s does not exist!\n", 251 ": %s: Interface %s does not exist!\n",
@@ -1214,6 +1219,58 @@ static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
1214 bonding_show_primary, bonding_store_primary); 1219 bonding_show_primary, bonding_store_primary);
1215 1220
1216/* 1221/*
1222 * Show and set the primary_reselect flag.
1223 */
1224static ssize_t bonding_show_primary_reselect(struct device *d,
1225 struct device_attribute *attr,
1226 char *buf)
1227{
1228 struct bonding *bond = to_bond(d);
1229
1230 return sprintf(buf, "%s %d\n",
1231 pri_reselect_tbl[bond->params.primary_reselect].modename,
1232 bond->params.primary_reselect);
1233}
1234
1235static ssize_t bonding_store_primary_reselect(struct device *d,
1236 struct device_attribute *attr,
1237 const char *buf, size_t count)
1238{
1239 int new_value, ret = count;
1240 struct bonding *bond = to_bond(d);
1241
1242 if (!rtnl_trylock())
1243 return restart_syscall();
1244
1245 new_value = bond_parse_parm(buf, pri_reselect_tbl);
1246 if (new_value < 0) {
1247 pr_err(DRV_NAME
1248 ": %s: Ignoring invalid primary_reselect value %.*s.\n",
1249 bond->dev->name,
1250 (int) strlen(buf) - 1, buf);
1251 ret = -EINVAL;
1252 goto out;
1253 }
1254
1255 bond->params.primary_reselect = new_value;
1256 pr_info(DRV_NAME ": %s: setting primary_reselect to %s (%d).\n",
1257 bond->dev->name, pri_reselect_tbl[new_value].modename,
1258 new_value);
1259
1260 read_lock(&bond->lock);
1261 write_lock_bh(&bond->curr_slave_lock);
1262 bond_select_active_slave(bond);
1263 write_unlock_bh(&bond->curr_slave_lock);
1264 read_unlock(&bond->lock);
1265out:
1266 rtnl_unlock();
1267 return ret;
1268}
1269static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
1270 bonding_show_primary_reselect,
1271 bonding_store_primary_reselect);
1272
1273/*
1217 * Show and set the use_carrier flag. 1274 * Show and set the use_carrier flag.
1218 */ 1275 */
1219static ssize_t bonding_show_carrier(struct device *d, 1276static ssize_t bonding_show_carrier(struct device *d,
@@ -1502,6 +1559,7 @@ static struct attribute *per_bond_attrs[] = {
1502 &dev_attr_num_unsol_na.attr, 1559 &dev_attr_num_unsol_na.attr,
1503 &dev_attr_miimon.attr, 1560 &dev_attr_miimon.attr,
1504 &dev_attr_primary.attr, 1561 &dev_attr_primary.attr,
1562 &dev_attr_primary_reselect.attr,
1505 &dev_attr_use_carrier.attr, 1563 &dev_attr_use_carrier.attr,
1506 &dev_attr_active_slave.attr, 1564 &dev_attr_active_slave.attr,
1507 &dev_attr_mii_status.attr, 1565 &dev_attr_mii_status.attr,
@@ -1564,24 +1622,8 @@ void bond_destroy_sysfs(void)
1564 * Initialize sysfs for each bond. This sets up and registers 1622 * Initialize sysfs for each bond. This sets up and registers
1565 * the 'bondctl' directory for each individual bond under /sys/class/net. 1623 * the 'bondctl' directory for each individual bond under /sys/class/net.
1566 */ 1624 */
1567int bond_create_sysfs_entry(struct bonding *bond) 1625void bond_prepare_sysfs_group(struct bonding *bond)
1568{ 1626{
1569 struct net_device *dev = bond->dev; 1627 bond->dev->sysfs_groups[0] = &bonding_group;
1570 int err;
1571
1572 err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
1573 if (err)
1574 pr_emerg("eek! didn't create group!\n");
1575
1576 return err;
1577}
1578/*
1579 * Remove sysfs entries for each bond.
1580 */
1581void bond_destroy_sysfs_entry(struct bonding *bond)
1582{
1583 struct net_device *dev = bond->dev;
1584
1585 sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
1586} 1628}
1587 1629
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 68247714466f..a51ae7dc8d51 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,15 +23,13 @@
23#include "bond_3ad.h" 23#include "bond_3ad.h"
24#include "bond_alb.h" 24#include "bond_alb.h"
25 25
26#define DRV_VERSION "3.5.0" 26#define DRV_VERSION "3.6.0"
27#define DRV_RELDATE "November 4, 2008" 27#define DRV_RELDATE "September 26, 2009"
28#define DRV_NAME "bonding" 28#define DRV_NAME "bonding"
29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
30 30
31#define BOND_MAX_ARP_TARGETS 16 31#define BOND_MAX_ARP_TARGETS 16
32 32
33extern struct list_head bond_dev_list;
34
35#define IS_UP(dev) \ 33#define IS_UP(dev) \
36 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 34 ((((dev)->flags & IFF_UP) == IFF_UP) && \
37 netif_running(dev) && \ 35 netif_running(dev) && \
@@ -131,6 +129,7 @@ struct bond_params {
131 int lacp_fast; 129 int lacp_fast;
132 int ad_select; 130 int ad_select;
133 char primary[IFNAMSIZ]; 131 char primary[IFNAMSIZ];
132 int primary_reselect;
134 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 133 __be32 arp_targets[BOND_MAX_ARP_TARGETS];
135}; 134};
136 135
@@ -190,6 +189,7 @@ struct bonding {
190 struct slave *curr_active_slave; 189 struct slave *curr_active_slave;
191 struct slave *current_arp_slave; 190 struct slave *current_arp_slave;
192 struct slave *primary_slave; 191 struct slave *primary_slave;
192 bool force_primary;
193 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 193 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
194 rwlock_t lock; 194 rwlock_t lock;
195 rwlock_t curr_slave_lock; 195 rwlock_t curr_slave_lock;
@@ -204,7 +204,7 @@ struct bonding {
204#endif /* CONFIG_PROC_FS */ 204#endif /* CONFIG_PROC_FS */
205 struct list_head bond_list; 205 struct list_head bond_list;
206 struct dev_mc_list *mc_list; 206 struct dev_mc_list *mc_list;
207 int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); 207 int (*xmit_hash_policy)(struct sk_buff *, int);
208 __be32 master_ip; 208 __be32 master_ip;
209 u16 flags; 209 u16 flags;
210 u16 rr_tx_counter; 210 u16 rr_tx_counter;
@@ -258,6 +258,10 @@ static inline bool bond_is_lb(const struct bonding *bond)
258 || bond->params.mode == BOND_MODE_ALB; 258 || bond->params.mode == BOND_MODE_ALB;
259} 259}
260 260
261#define BOND_PRI_RESELECT_ALWAYS 0
262#define BOND_PRI_RESELECT_BETTER 1
263#define BOND_PRI_RESELECT_FAILURE 2
264
261#define BOND_FOM_NONE 0 265#define BOND_FOM_NONE 0
262#define BOND_FOM_ACTIVE 1 266#define BOND_FOM_ACTIVE 1
263#define BOND_FOM_FOLLOW 2 267#define BOND_FOM_FOLLOW 2
@@ -321,12 +325,11 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
321 325
322struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 326struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
323int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 327int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
324int bond_create(const char *name); 328int bond_create(struct net *net, const char *name);
325int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); 329int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
326int bond_create_sysfs(void); 330int bond_create_sysfs(void);
327void bond_destroy_sysfs(void); 331void bond_destroy_sysfs(void);
328void bond_destroy_sysfs_entry(struct bonding *bond); 332void bond_prepare_sysfs_group(struct bonding *bond);
329int bond_create_sysfs_entry(struct bonding *bond);
330int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave); 333int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
331void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); 334void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
332int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 335int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
@@ -341,13 +344,22 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
341void bond_register_arp(struct bonding *); 344void bond_register_arp(struct bonding *);
342void bond_unregister_arp(struct bonding *); 345void bond_unregister_arp(struct bonding *);
343 346
347struct bond_net {
348 struct net * net; /* Associated network namespace */
349 struct list_head dev_list;
350#ifdef CONFIG_PROC_FS
351 struct proc_dir_entry * proc_dir;
352#endif
353};
354
344/* exported from bond_main.c */ 355/* exported from bond_main.c */
345extern struct list_head bond_dev_list; 356extern int bond_net_id;
346extern const struct bond_parm_tbl bond_lacp_tbl[]; 357extern const struct bond_parm_tbl bond_lacp_tbl[];
347extern const struct bond_parm_tbl bond_mode_tbl[]; 358extern const struct bond_parm_tbl bond_mode_tbl[];
348extern const struct bond_parm_tbl xmit_hashtype_tbl[]; 359extern const struct bond_parm_tbl xmit_hashtype_tbl[];
349extern const struct bond_parm_tbl arp_validate_tbl[]; 360extern const struct bond_parm_tbl arp_validate_tbl[];
350extern const struct bond_parm_tbl fail_over_mac_tbl[]; 361extern const struct bond_parm_tbl fail_over_mac_tbl[];
362extern const struct bond_parm_tbl pri_reselect_tbl[];
351extern struct bond_parm_tbl ad_select_tbl[]; 363extern struct bond_parm_tbl ad_select_tbl[];
352 364
353#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 365#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -370,4 +382,3 @@ static inline void bond_unregister_ipv6_notifier(void)
370#endif 382#endif
371 383
372#endif /* _LINUX_BONDING_H */ 384#endif /* _LINUX_BONDING_H */
373
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 772f6d2489ce..bb803fa1e6a7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,6 +41,21 @@ config CAN_AT91
41 ---help--- 41 ---help---
42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
43 43
44config CAN_TI_HECC
45 depends on CAN_DEV && ARCH_OMAP3
46 tristate "TI High End CAN Controller"
47 ---help---
48 Driver for TI HECC (High End CAN Controller) module found on many
49 TI devices. The device specifications are available from www.ti.com
50
51config CAN_MCP251X
52 tristate "Microchip MCP251x SPI CAN controllers"
53 depends on CAN_DEV && SPI
54 ---help---
55 Driver for the Microchip MCP251x SPI CAN controllers.
56
57source "drivers/net/can/mscan/Kconfig"
58
44source "drivers/net/can/sja1000/Kconfig" 59source "drivers/net/can/sja1000/Kconfig"
45 60
46source "drivers/net/can/usb/Kconfig" 61source "drivers/net/can/usb/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0dea62721f2f..56899fef1c6a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,9 @@ can-dev-y := dev.o
10obj-y += usb/ 10obj-y += usb/
11 11
12obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_MSCAN) += mscan/
13obj-$(CONFIG_CAN_AT91) += at91_can.o 14obj-$(CONFIG_CAN_AT91) += at91_can.o
15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
14 17
15ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 18ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f67ae285a35a..cbe3fce53e3b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
221 set_mb_mode_prio(priv, mb, mode, 0); 221 set_mb_mode_prio(priv, mb, mode, 0);
222} 222}
223 223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/* 224/*
257 * Swtich transceiver on or off 225 * Swtich transceiver on or off
258 */ 226 */
@@ -1087,7 +1055,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1087 goto exit_release; 1055 goto exit_release;
1088 } 1056 }
1089 1057
1090 dev = alloc_candev(sizeof(struct at91_priv)); 1058 dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
1091 if (!dev) { 1059 if (!dev) {
1092 err = -ENOMEM; 1060 err = -ENOMEM;
1093 goto exit_iounmap; 1061 goto exit_iounmap;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2868fe842a41..c1bb29f0322b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -245,7 +245,7 @@ static void can_flush_echo_skb(struct net_device *dev)
245 struct net_device_stats *stats = &dev->stats; 245 struct net_device_stats *stats = &dev->stats;
246 int i; 246 int i;
247 247
248 for (i = 0; i < CAN_ECHO_SKB_MAX; i++) { 248 for (i = 0; i < priv->echo_skb_max; i++) {
249 if (priv->echo_skb[i]) { 249 if (priv->echo_skb[i]) {
250 kfree_skb(priv->echo_skb[i]); 250 kfree_skb(priv->echo_skb[i]);
251 priv->echo_skb[i] = NULL; 251 priv->echo_skb[i] = NULL;
@@ -262,10 +262,13 @@ static void can_flush_echo_skb(struct net_device *dev)
262 * of the device driver. The driver must protect access to 262 * of the device driver. The driver must protect access to
263 * priv->echo_skb, if necessary. 263 * priv->echo_skb, if necessary.
264 */ 264 */
265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx) 265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
266 unsigned int idx)
266{ 267{
267 struct can_priv *priv = netdev_priv(dev); 268 struct can_priv *priv = netdev_priv(dev);
268 269
270 BUG_ON(idx >= priv->echo_skb_max);
271
269 /* check flag whether this packet has to be looped back */ 272 /* check flag whether this packet has to be looped back */
270 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { 273 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
271 kfree_skb(skb); 274 kfree_skb(skb);
@@ -311,10 +314,12 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
311 * is handled in the device driver. The driver must protect 314 * is handled in the device driver. The driver must protect
312 * access to priv->echo_skb, if necessary. 315 * access to priv->echo_skb, if necessary.
313 */ 316 */
314void can_get_echo_skb(struct net_device *dev, int idx) 317void can_get_echo_skb(struct net_device *dev, unsigned int idx)
315{ 318{
316 struct can_priv *priv = netdev_priv(dev); 319 struct can_priv *priv = netdev_priv(dev);
317 320
321 BUG_ON(idx >= priv->echo_skb_max);
322
318 if (priv->echo_skb[idx]) { 323 if (priv->echo_skb[idx]) {
319 netif_rx(priv->echo_skb[idx]); 324 netif_rx(priv->echo_skb[idx]);
320 priv->echo_skb[idx] = NULL; 325 priv->echo_skb[idx] = NULL;
@@ -327,10 +332,12 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb);
327 * 332 *
328 * The function is typically called when TX failed. 333 * The function is typically called when TX failed.
329 */ 334 */
330void can_free_echo_skb(struct net_device *dev, int idx) 335void can_free_echo_skb(struct net_device *dev, unsigned int idx)
331{ 336{
332 struct can_priv *priv = netdev_priv(dev); 337 struct can_priv *priv = netdev_priv(dev);
333 338
339 BUG_ON(idx >= priv->echo_skb_max);
340
334 if (priv->echo_skb[idx]) { 341 if (priv->echo_skb[idx]) {
335 kfree_skb(priv->echo_skb[idx]); 342 kfree_skb(priv->echo_skb[idx]);
336 priv->echo_skb[idx] = NULL; 343 priv->echo_skb[idx] = NULL;
@@ -359,17 +366,12 @@ void can_restart(unsigned long data)
359 can_flush_echo_skb(dev); 366 can_flush_echo_skb(dev);
360 367
361 /* send restart message upstream */ 368 /* send restart message upstream */
362 skb = dev_alloc_skb(sizeof(struct can_frame)); 369 skb = alloc_can_err_skb(dev, &cf);
363 if (skb == NULL) { 370 if (skb == NULL) {
364 err = -ENOMEM; 371 err = -ENOMEM;
365 goto restart; 372 goto restart;
366 } 373 }
367 skb->dev = dev; 374 cf->can_id |= CAN_ERR_RESTARTED;
368 skb->protocol = htons(ETH_P_CAN);
369 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
370 memset(cf, 0, sizeof(struct can_frame));
371 cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
372 cf->can_dlc = CAN_ERR_DLC;
373 375
374 netif_rx(skb); 376 netif_rx(skb);
375 377
@@ -442,20 +444,66 @@ static void can_setup(struct net_device *dev)
442 dev->features = NETIF_F_NO_CSUM; 444 dev->features = NETIF_F_NO_CSUM;
443} 445}
444 446
447struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
448{
449 struct sk_buff *skb;
450
451 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
452 if (unlikely(!skb))
453 return NULL;
454
455 skb->protocol = htons(ETH_P_CAN);
456 skb->pkt_type = PACKET_BROADCAST;
457 skb->ip_summed = CHECKSUM_UNNECESSARY;
458 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
459 memset(*cf, 0, sizeof(struct can_frame));
460
461 return skb;
462}
463EXPORT_SYMBOL_GPL(alloc_can_skb);
464
465struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
466{
467 struct sk_buff *skb;
468
469 skb = alloc_can_skb(dev, cf);
470 if (unlikely(!skb))
471 return NULL;
472
473 (*cf)->can_id = CAN_ERR_FLAG;
474 (*cf)->can_dlc = CAN_ERR_DLC;
475
476 return skb;
477}
478EXPORT_SYMBOL_GPL(alloc_can_err_skb);
479
445/* 480/*
446 * Allocate and setup space for the CAN network device 481 * Allocate and setup space for the CAN network device
447 */ 482 */
448struct net_device *alloc_candev(int sizeof_priv) 483struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
449{ 484{
450 struct net_device *dev; 485 struct net_device *dev;
451 struct can_priv *priv; 486 struct can_priv *priv;
487 int size;
452 488
453 dev = alloc_netdev(sizeof_priv, "can%d", can_setup); 489 if (echo_skb_max)
490 size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) +
491 echo_skb_max * sizeof(struct sk_buff *);
492 else
493 size = sizeof_priv;
494
495 dev = alloc_netdev(size, "can%d", can_setup);
454 if (!dev) 496 if (!dev)
455 return NULL; 497 return NULL;
456 498
457 priv = netdev_priv(dev); 499 priv = netdev_priv(dev);
458 500
501 if (echo_skb_max) {
502 priv->echo_skb_max = echo_skb_max;
503 priv->echo_skb = (void *)priv +
504 ALIGN(sizeof_priv, sizeof(struct sk_buff *));
505 }
506
459 priv->state = CAN_STATE_STOPPED; 507 priv->state = CAN_STATE_STOPPED;
460 508
461 init_timer(&priv->restart_timer); 509 init_timer(&priv->restart_timer);
@@ -647,7 +695,7 @@ nla_put_failure:
647 return -EMSGSIZE; 695 return -EMSGSIZE;
648} 696}
649 697
650static int can_newlink(struct net_device *dev, 698static int can_newlink(struct net *src_net, struct net_device *dev,
651 struct nlattr *tb[], struct nlattr *data[]) 699 struct nlattr *tb[], struct nlattr *data[])
652{ 700{
653 return -EOPNOTSUPP; 701 return -EOPNOTSUPP;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
new file mode 100644
index 000000000000..78b1b69b2921
--- /dev/null
+++ b/drivers/net/can/mcp251x.c
@@ -0,0 +1,1166 @@
1/*
2 * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
3 *
4 * MCP2510 support and bug fixes by Christian Pellegrin
5 * <chripell@evolware.org>
6 *
7 * Copyright 2009 Christian Pellegrin EVOL S.r.l.
8 *
9 * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
10 * Written under contract by:
11 * Chris Elston, Katalix Systems, Ltd.
12 *
13 * Based on Microchip MCP251x CAN controller driver written by
14 * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
15 *
16 * Based on CAN bus driver for the CCAN controller written by
17 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
18 * - Simon Kallweit, intefo AG
19 * Copyright 2007
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the version 2 of the GNU General Public License
23 * as published by the Free Software Foundation
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 *
34 *
35 *
36 * Your platform definition file should specify something like:
37 *
38 * static struct mcp251x_platform_data mcp251x_info = {
39 * .oscillator_frequency = 8000000,
40 * .board_specific_setup = &mcp251x_setup,
41 * .model = CAN_MCP251X_MCP2510,
42 * .power_enable = mcp251x_power_enable,
43 * .transceiver_enable = NULL,
44 * };
45 *
46 * static struct spi_board_info spi_board_info[] = {
47 * {
48 * .modalias = "mcp251x",
49 * .platform_data = &mcp251x_info,
50 * .irq = IRQ_EINT13,
51 * .max_speed_hz = 2*1000*1000,
52 * .chip_select = 2,
53 * },
54 * };
55 *
56 * Please see mcp251x.h for a description of the fields in
57 * struct mcp251x_platform_data.
58 *
59 */
60
61#include <linux/can.h>
62#include <linux/can/core.h>
63#include <linux/can/dev.h>
64#include <linux/can/platform/mcp251x.h>
65#include <linux/completion.h>
66#include <linux/delay.h>
67#include <linux/device.h>
68#include <linux/dma-mapping.h>
69#include <linux/freezer.h>
70#include <linux/interrupt.h>
71#include <linux/io.h>
72#include <linux/kernel.h>
73#include <linux/module.h>
74#include <linux/netdevice.h>
75#include <linux/platform_device.h>
76#include <linux/spi/spi.h>
77#include <linux/uaccess.h>
78
79/* SPI interface instruction set */
80#define INSTRUCTION_WRITE 0x02
81#define INSTRUCTION_READ 0x03
82#define INSTRUCTION_BIT_MODIFY 0x05
83#define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
84#define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
85#define INSTRUCTION_RESET 0xC0
86
87/* MPC251x registers */
88#define CANSTAT 0x0e
89#define CANCTRL 0x0f
90# define CANCTRL_REQOP_MASK 0xe0
91# define CANCTRL_REQOP_CONF 0x80
92# define CANCTRL_REQOP_LISTEN_ONLY 0x60
93# define CANCTRL_REQOP_LOOPBACK 0x40
94# define CANCTRL_REQOP_SLEEP 0x20
95# define CANCTRL_REQOP_NORMAL 0x00
96# define CANCTRL_OSM 0x08
97# define CANCTRL_ABAT 0x10
98#define TEC 0x1c
99#define REC 0x1d
100#define CNF1 0x2a
101# define CNF1_SJW_SHIFT 6
102#define CNF2 0x29
103# define CNF2_BTLMODE 0x80
104# define CNF2_SAM 0x40
105# define CNF2_PS1_SHIFT 3
106#define CNF3 0x28
107# define CNF3_SOF 0x08
108# define CNF3_WAKFIL 0x04
109# define CNF3_PHSEG2_MASK 0x07
110#define CANINTE 0x2b
111# define CANINTE_MERRE 0x80
112# define CANINTE_WAKIE 0x40
113# define CANINTE_ERRIE 0x20
114# define CANINTE_TX2IE 0x10
115# define CANINTE_TX1IE 0x08
116# define CANINTE_TX0IE 0x04
117# define CANINTE_RX1IE 0x02
118# define CANINTE_RX0IE 0x01
119#define CANINTF 0x2c
120# define CANINTF_MERRF 0x80
121# define CANINTF_WAKIF 0x40
122# define CANINTF_ERRIF 0x20
123# define CANINTF_TX2IF 0x10
124# define CANINTF_TX1IF 0x08
125# define CANINTF_TX0IF 0x04
126# define CANINTF_RX1IF 0x02
127# define CANINTF_RX0IF 0x01
128#define EFLG 0x2d
129# define EFLG_EWARN 0x01
130# define EFLG_RXWAR 0x02
131# define EFLG_TXWAR 0x04
132# define EFLG_RXEP 0x08
133# define EFLG_TXEP 0x10
134# define EFLG_TXBO 0x20
135# define EFLG_RX0OVR 0x40
136# define EFLG_RX1OVR 0x80
137#define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
138# define TXBCTRL_ABTF 0x40
139# define TXBCTRL_MLOA 0x20
140# define TXBCTRL_TXERR 0x10
141# define TXBCTRL_TXREQ 0x08
142#define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
143# define SIDH_SHIFT 3
144#define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
145# define SIDL_SID_MASK 7
146# define SIDL_SID_SHIFT 5
147# define SIDL_EXIDE_SHIFT 3
148# define SIDL_EID_SHIFT 16
149# define SIDL_EID_MASK 3
150#define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF)
151#define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF)
152#define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF)
153# define DLC_RTR_SHIFT 6
154#define TXBCTRL_OFF 0
155#define TXBSIDH_OFF 1
156#define TXBSIDL_OFF 2
157#define TXBEID8_OFF 3
158#define TXBEID0_OFF 4
159#define TXBDLC_OFF 5
160#define TXBDAT_OFF 6
161#define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
162# define RXBCTRL_BUKT 0x04
163# define RXBCTRL_RXM0 0x20
164# define RXBCTRL_RXM1 0x40
165#define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
166# define RXBSIDH_SHIFT 3
167#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
168# define RXBSIDL_IDE 0x08
169# define RXBSIDL_EID 3
170# define RXBSIDL_SHIFT 5
171#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
172#define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF)
173#define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF)
174# define RXBDLC_LEN_MASK 0x0f
175# define RXBDLC_RTR 0x40
176#define RXBCTRL_OFF 0
177#define RXBSIDH_OFF 1
178#define RXBSIDL_OFF 2
179#define RXBEID8_OFF 3
180#define RXBEID0_OFF 4
181#define RXBDLC_OFF 5
182#define RXBDAT_OFF 6
183
184#define GET_BYTE(val, byte) \
185 (((val) >> ((byte) * 8)) & 0xff)
186#define SET_BYTE(val, byte) \
187 (((val) & 0xff) << ((byte) * 8))
188
189/*
190 * Buffer size required for the largest SPI transfer (i.e., reading a
191 * frame)
192 */
193#define CAN_FRAME_MAX_DATA_LEN 8
194#define SPI_TRANSFER_BUF_LEN (6 + CAN_FRAME_MAX_DATA_LEN)
195#define CAN_FRAME_MAX_BITS 128
196
197#define TX_ECHO_SKB_MAX 1
198
199#define DEVICE_NAME "mcp251x"
200
201static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
202module_param(mcp251x_enable_dma, int, S_IRUGO);
203MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
204
205static struct can_bittiming_const mcp251x_bittiming_const = {
206 .name = DEVICE_NAME,
207 .tseg1_min = 3,
208 .tseg1_max = 16,
209 .tseg2_min = 2,
210 .tseg2_max = 8,
211 .sjw_max = 4,
212 .brp_min = 1,
213 .brp_max = 64,
214 .brp_inc = 1,
215};
216
217struct mcp251x_priv {
218 struct can_priv can;
219 struct net_device *net;
220 struct spi_device *spi;
221
222 struct mutex spi_lock; /* SPI buffer lock */
223 u8 *spi_tx_buf;
224 u8 *spi_rx_buf;
225 dma_addr_t spi_tx_dma;
226 dma_addr_t spi_rx_dma;
227
228 struct sk_buff *tx_skb;
229 int tx_len;
230 struct workqueue_struct *wq;
231 struct work_struct tx_work;
232 struct work_struct irq_work;
233 struct completion awake;
234 int wake;
235 int force_quit;
236 int after_suspend;
237#define AFTER_SUSPEND_UP 1
238#define AFTER_SUSPEND_DOWN 2
239#define AFTER_SUSPEND_POWER 4
240#define AFTER_SUSPEND_RESTART 8
241 int restart_tx;
242};
243
244static void mcp251x_clean(struct net_device *net)
245{
246 struct mcp251x_priv *priv = netdev_priv(net);
247
248 net->stats.tx_errors++;
249 if (priv->tx_skb)
250 dev_kfree_skb(priv->tx_skb);
251 if (priv->tx_len)
252 can_free_echo_skb(priv->net, 0);
253 priv->tx_skb = NULL;
254 priv->tx_len = 0;
255}
256
257/*
258 * Note about handling of error return of mcp251x_spi_trans: accessing
259 * registers via SPI is not really different conceptually than using
260 * normal I/O assembler instructions, although it's much more
261 * complicated from a practical POV. So it's not advisable to always
262 * check the return value of this function. Imagine that every
263 * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
264 * error();", it would be a great mess (well there are some situation
265 * when exception handling C++ like could be useful after all). So we
266 * just check that transfers are OK at the beginning of our
267 * conversation with the chip and to avoid doing really nasty things
268 * (like injecting bogus packets in the network stack).
269 */
270static int mcp251x_spi_trans(struct spi_device *spi, int len)
271{
272 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
273 struct spi_transfer t = {
274 .tx_buf = priv->spi_tx_buf,
275 .rx_buf = priv->spi_rx_buf,
276 .len = len,
277 .cs_change = 0,
278 };
279 struct spi_message m;
280 int ret;
281
282 spi_message_init(&m);
283
284 if (mcp251x_enable_dma) {
285 t.tx_dma = priv->spi_tx_dma;
286 t.rx_dma = priv->spi_rx_dma;
287 m.is_dma_mapped = 1;
288 }
289
290 spi_message_add_tail(&t, &m);
291
292 ret = spi_sync(spi, &m);
293 if (ret)
294 dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
295 return ret;
296}
297
298static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
299{
300 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
301 u8 val = 0;
302
303 mutex_lock(&priv->spi_lock);
304
305 priv->spi_tx_buf[0] = INSTRUCTION_READ;
306 priv->spi_tx_buf[1] = reg;
307
308 mcp251x_spi_trans(spi, 3);
309 val = priv->spi_rx_buf[2];
310
311 mutex_unlock(&priv->spi_lock);
312
313 return val;
314}
315
316static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
317{
318 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
319
320 mutex_lock(&priv->spi_lock);
321
322 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
323 priv->spi_tx_buf[1] = reg;
324 priv->spi_tx_buf[2] = val;
325
326 mcp251x_spi_trans(spi, 3);
327
328 mutex_unlock(&priv->spi_lock);
329}
330
331static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
332 u8 mask, uint8_t val)
333{
334 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
335
336 mutex_lock(&priv->spi_lock);
337
338 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
339 priv->spi_tx_buf[1] = reg;
340 priv->spi_tx_buf[2] = mask;
341 priv->spi_tx_buf[3] = val;
342
343 mcp251x_spi_trans(spi, 4);
344
345 mutex_unlock(&priv->spi_lock);
346}
347
348static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
349 int len, int tx_buf_idx)
350{
351 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
352 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
353
354 if (pdata->model == CAN_MCP251X_MCP2510) {
355 int i;
356
357 for (i = 1; i < TXBDAT_OFF + len; i++)
358 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
359 buf[i]);
360 } else {
361 mutex_lock(&priv->spi_lock);
362 memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
363 mcp251x_spi_trans(spi, TXBDAT_OFF + len);
364 mutex_unlock(&priv->spi_lock);
365 }
366}
367
368static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
369 int tx_buf_idx)
370{
371 u32 sid, eid, exide, rtr;
372 u8 buf[SPI_TRANSFER_BUF_LEN];
373
374 exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
375 if (exide)
376 sid = (frame->can_id & CAN_EFF_MASK) >> 18;
377 else
378 sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
379 eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
380 rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
381
382 buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
383 buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
384 buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
385 (exide << SIDL_EXIDE_SHIFT) |
386 ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
387 buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
388 buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
389 buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
390 memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
391 mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
392 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
393}
394
395static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
396 int buf_idx)
397{
398 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
399 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
400
401 if (pdata->model == CAN_MCP251X_MCP2510) {
402 int i, len;
403
404 for (i = 1; i < RXBDAT_OFF; i++)
405 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
406 len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
407 if (len > 8)
408 len = 8;
409 for (; i < (RXBDAT_OFF + len); i++)
410 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
411 } else {
412 mutex_lock(&priv->spi_lock);
413
414 priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
415 mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
416 memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
417
418 mutex_unlock(&priv->spi_lock);
419 }
420}
421
422static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
423{
424 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
425 struct sk_buff *skb;
426 struct can_frame *frame;
427 u8 buf[SPI_TRANSFER_BUF_LEN];
428
429 skb = alloc_can_skb(priv->net, &frame);
430 if (!skb) {
431 dev_err(&spi->dev, "cannot allocate RX skb\n");
432 priv->net->stats.rx_dropped++;
433 return;
434 }
435
436 mcp251x_hw_rx_frame(spi, buf, buf_idx);
437 if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
438 /* Extended ID format */
439 frame->can_id = CAN_EFF_FLAG;
440 frame->can_id |=
441 /* Extended ID part */
442 SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
443 SET_BYTE(buf[RXBEID8_OFF], 1) |
444 SET_BYTE(buf[RXBEID0_OFF], 0) |
445 /* Standard ID part */
446 (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
447 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
448 /* Remote transmission request */
449 if (buf[RXBDLC_OFF] & RXBDLC_RTR)
450 frame->can_id |= CAN_RTR_FLAG;
451 } else {
452 /* Standard ID format */
453 frame->can_id =
454 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
455 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
456 }
457 /* Data length */
458 frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
459 if (frame->can_dlc > 8) {
460 dev_warn(&spi->dev, "invalid frame recevied\n");
461 priv->net->stats.rx_errors++;
462 dev_kfree_skb(skb);
463 return;
464 }
465 memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
466
467 priv->net->stats.rx_packets++;
468 priv->net->stats.rx_bytes += frame->can_dlc;
469 netif_rx(skb);
470}
471
472static void mcp251x_hw_sleep(struct spi_device *spi)
473{
474 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
475}
476
477static void mcp251x_hw_wakeup(struct spi_device *spi)
478{
479 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
480
481 priv->wake = 1;
482
483 /* Can only wake up by generating a wake-up interrupt. */
484 mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
485 mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
486
487 /* Wait until the device is awake */
488 if (!wait_for_completion_timeout(&priv->awake, HZ))
489 dev_err(&spi->dev, "MCP251x didn't wake-up\n");
490}
491
492static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
493 struct net_device *net)
494{
495 struct mcp251x_priv *priv = netdev_priv(net);
496 struct spi_device *spi = priv->spi;
497
498 if (priv->tx_skb || priv->tx_len) {
499 dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
500 netif_stop_queue(net);
501 return NETDEV_TX_BUSY;
502 }
503
504 if (skb->len != sizeof(struct can_frame)) {
505 dev_err(&spi->dev, "dropping packet - bad length\n");
506 dev_kfree_skb(skb);
507 net->stats.tx_dropped++;
508 return NETDEV_TX_OK;
509 }
510
511 netif_stop_queue(net);
512 priv->tx_skb = skb;
513 net->trans_start = jiffies;
514 queue_work(priv->wq, &priv->tx_work);
515
516 return NETDEV_TX_OK;
517}
518
519static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
520{
521 struct mcp251x_priv *priv = netdev_priv(net);
522
523 switch (mode) {
524 case CAN_MODE_START:
525 /* We have to delay work since SPI I/O may sleep */
526 priv->can.state = CAN_STATE_ERROR_ACTIVE;
527 priv->restart_tx = 1;
528 if (priv->can.restart_ms == 0)
529 priv->after_suspend = AFTER_SUSPEND_RESTART;
530 queue_work(priv->wq, &priv->irq_work);
531 break;
532 default:
533 return -EOPNOTSUPP;
534 }
535
536 return 0;
537}
538
539static void mcp251x_set_normal_mode(struct spi_device *spi)
540{
541 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
542 unsigned long timeout;
543
544 /* Enable interrupts */
545 mcp251x_write_reg(spi, CANINTE,
546 CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
547 CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
548 CANINTF_MERRF);
549
550 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
551 /* Put device into loopback mode */
552 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
553 } else {
554 /* Put device into normal mode */
555 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
556
557 /* Wait for the device to enter normal mode */
558 timeout = jiffies + HZ;
559 while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
560 schedule();
561 if (time_after(jiffies, timeout)) {
562 dev_err(&spi->dev, "MCP251x didn't"
563 " enter in normal mode\n");
564 return;
565 }
566 }
567 }
568 priv->can.state = CAN_STATE_ERROR_ACTIVE;
569}
570
571static int mcp251x_do_set_bittiming(struct net_device *net)
572{
573 struct mcp251x_priv *priv = netdev_priv(net);
574 struct can_bittiming *bt = &priv->can.bittiming;
575 struct spi_device *spi = priv->spi;
576
577 mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
578 (bt->brp - 1));
579 mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
580 (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
581 CNF2_SAM : 0) |
582 ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
583 (bt->prop_seg - 1));
584 mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
585 (bt->phase_seg2 - 1));
586 dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
587 mcp251x_read_reg(spi, CNF1),
588 mcp251x_read_reg(spi, CNF2),
589 mcp251x_read_reg(spi, CNF3));
590
591 return 0;
592}
593
594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
595 struct spi_device *spi)
596{
597 mcp251x_do_set_bittiming(net);
598
599 /* Enable RX0->RX1 buffer roll over and disable filters */
600 mcp251x_write_bits(spi, RXBCTRL(0),
601 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
602 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
603 mcp251x_write_bits(spi, RXBCTRL(1),
604 RXBCTRL_RXM0 | RXBCTRL_RXM1,
605 RXBCTRL_RXM0 | RXBCTRL_RXM1);
606 return 0;
607}
608
609static void mcp251x_hw_reset(struct spi_device *spi)
610{
611 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
612 int ret;
613
614 mutex_lock(&priv->spi_lock);
615
616 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
617
618 ret = spi_write(spi, priv->spi_tx_buf, 1);
619
620 mutex_unlock(&priv->spi_lock);
621
622 if (ret)
623 dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
624 /* Wait for reset to finish */
625 mdelay(10);
626}
627
628static int mcp251x_hw_probe(struct spi_device *spi)
629{
630 int st1, st2;
631
632 mcp251x_hw_reset(spi);
633
634 /*
635 * Please note that these are "magic values" based on after
636 * reset defaults taken from data sheet which allows us to see
637 * if we really have a chip on the bus (we avoid common all
638 * zeroes or all ones situations)
639 */
640 st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
641 st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
642
643 dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
644
645 /* Check for power up default values */
646 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
647}
648
649static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
650{
651 struct net_device *net = (struct net_device *)dev_id;
652 struct mcp251x_priv *priv = netdev_priv(net);
653
654 /* Schedule bottom half */
655 if (!work_pending(&priv->irq_work))
656 queue_work(priv->wq, &priv->irq_work);
657
658 return IRQ_HANDLED;
659}
660
661static int mcp251x_open(struct net_device *net)
662{
663 struct mcp251x_priv *priv = netdev_priv(net);
664 struct spi_device *spi = priv->spi;
665 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
666 int ret;
667
668 ret = open_candev(net);
669 if (ret) {
670 dev_err(&spi->dev, "unable to set initial baudrate!\n");
671 return ret;
672 }
673
674 if (pdata->transceiver_enable)
675 pdata->transceiver_enable(1);
676
677 priv->force_quit = 0;
678 priv->tx_skb = NULL;
679 priv->tx_len = 0;
680
681 ret = request_irq(spi->irq, mcp251x_can_isr,
682 IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
683 if (ret) {
684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
685 if (pdata->transceiver_enable)
686 pdata->transceiver_enable(0);
687 close_candev(net);
688 return ret;
689 }
690
691 mcp251x_hw_wakeup(spi);
692 mcp251x_hw_reset(spi);
693 ret = mcp251x_setup(net, priv, spi);
694 if (ret) {
695 free_irq(spi->irq, net);
696 mcp251x_hw_sleep(spi);
697 if (pdata->transceiver_enable)
698 pdata->transceiver_enable(0);
699 close_candev(net);
700 return ret;
701 }
702 mcp251x_set_normal_mode(spi);
703 netif_wake_queue(net);
704
705 return 0;
706}
707
708static int mcp251x_stop(struct net_device *net)
709{
710 struct mcp251x_priv *priv = netdev_priv(net);
711 struct spi_device *spi = priv->spi;
712 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
713
714 close_candev(net);
715
716 /* Disable and clear pending interrupts */
717 mcp251x_write_reg(spi, CANINTE, 0x00);
718 mcp251x_write_reg(spi, CANINTF, 0x00);
719
720 priv->force_quit = 1;
721 free_irq(spi->irq, net);
722 flush_workqueue(priv->wq);
723
724 mcp251x_write_reg(spi, TXBCTRL(0), 0);
725 if (priv->tx_skb || priv->tx_len)
726 mcp251x_clean(net);
727
728 mcp251x_hw_sleep(spi);
729
730 if (pdata->transceiver_enable)
731 pdata->transceiver_enable(0);
732
733 priv->can.state = CAN_STATE_STOPPED;
734
735 return 0;
736}
737
738static void mcp251x_tx_work_handler(struct work_struct *ws)
739{
740 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
741 tx_work);
742 struct spi_device *spi = priv->spi;
743 struct net_device *net = priv->net;
744 struct can_frame *frame;
745
746 if (priv->tx_skb) {
747 frame = (struct can_frame *)priv->tx_skb->data;
748
749 if (priv->can.state == CAN_STATE_BUS_OFF) {
750 mcp251x_clean(net);
751 netif_wake_queue(net);
752 return;
753 }
754 if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
755 frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
756 mcp251x_hw_tx(spi, frame, 0);
757 priv->tx_len = 1 + frame->can_dlc;
758 can_put_echo_skb(priv->tx_skb, net, 0);
759 priv->tx_skb = NULL;
760 }
761}
762
763static void mcp251x_irq_work_handler(struct work_struct *ws)
764{
765 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
766 irq_work);
767 struct spi_device *spi = priv->spi;
768 struct net_device *net = priv->net;
769 u8 txbnctrl;
770 u8 intf;
771 enum can_state new_state;
772
773 if (priv->after_suspend) {
774 mdelay(10);
775 mcp251x_hw_reset(spi);
776 mcp251x_setup(net, priv, spi);
777 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
778 mcp251x_set_normal_mode(spi);
779 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
780 netif_device_attach(net);
781 /* Clean since we lost tx buffer */
782 if (priv->tx_skb || priv->tx_len) {
783 mcp251x_clean(net);
784 netif_wake_queue(net);
785 }
786 mcp251x_set_normal_mode(spi);
787 } else {
788 mcp251x_hw_sleep(spi);
789 }
790 priv->after_suspend = 0;
791 }
792
793 if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
794 return;
795
796 while (!priv->force_quit && !freezing(current)) {
797 u8 eflag = mcp251x_read_reg(spi, EFLG);
798 int can_id = 0, data1 = 0;
799
800 mcp251x_write_reg(spi, EFLG, 0x00);
801
802 if (priv->restart_tx) {
803 priv->restart_tx = 0;
804 mcp251x_write_reg(spi, TXBCTRL(0), 0);
805 if (priv->tx_skb || priv->tx_len)
806 mcp251x_clean(net);
807 netif_wake_queue(net);
808 can_id |= CAN_ERR_RESTARTED;
809 }
810
811 if (priv->wake) {
812 /* Wait whilst the device wakes up */
813 mdelay(10);
814 priv->wake = 0;
815 }
816
817 intf = mcp251x_read_reg(spi, CANINTF);
818 mcp251x_write_bits(spi, CANINTF, intf, 0x00);
819
820 /* Update can state */
821 if (eflag & EFLG_TXBO) {
822 new_state = CAN_STATE_BUS_OFF;
823 can_id |= CAN_ERR_BUSOFF;
824 } else if (eflag & EFLG_TXEP) {
825 new_state = CAN_STATE_ERROR_PASSIVE;
826 can_id |= CAN_ERR_CRTL;
827 data1 |= CAN_ERR_CRTL_TX_PASSIVE;
828 } else if (eflag & EFLG_RXEP) {
829 new_state = CAN_STATE_ERROR_PASSIVE;
830 can_id |= CAN_ERR_CRTL;
831 data1 |= CAN_ERR_CRTL_RX_PASSIVE;
832 } else if (eflag & EFLG_TXWAR) {
833 new_state = CAN_STATE_ERROR_WARNING;
834 can_id |= CAN_ERR_CRTL;
835 data1 |= CAN_ERR_CRTL_TX_WARNING;
836 } else if (eflag & EFLG_RXWAR) {
837 new_state = CAN_STATE_ERROR_WARNING;
838 can_id |= CAN_ERR_CRTL;
839 data1 |= CAN_ERR_CRTL_RX_WARNING;
840 } else {
841 new_state = CAN_STATE_ERROR_ACTIVE;
842 }
843
844 /* Update can state statistics */
845 switch (priv->can.state) {
846 case CAN_STATE_ERROR_ACTIVE:
847 if (new_state >= CAN_STATE_ERROR_WARNING &&
848 new_state <= CAN_STATE_BUS_OFF)
849 priv->can.can_stats.error_warning++;
850 case CAN_STATE_ERROR_WARNING: /* fallthrough */
851 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
852 new_state <= CAN_STATE_BUS_OFF)
853 priv->can.can_stats.error_passive++;
854 break;
855 default:
856 break;
857 }
858 priv->can.state = new_state;
859
860 if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
861 struct sk_buff *skb;
862 struct can_frame *frame;
863
864 /* Create error frame */
865 skb = alloc_can_err_skb(net, &frame);
866 if (skb) {
867 /* Set error frame flags based on bus state */
868 frame->can_id = can_id;
869 frame->data[1] = data1;
870
871 /* Update net stats for overflows */
872 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
873 if (eflag & EFLG_RX0OVR)
874 net->stats.rx_over_errors++;
875 if (eflag & EFLG_RX1OVR)
876 net->stats.rx_over_errors++;
877 frame->can_id |= CAN_ERR_CRTL;
878 frame->data[1] |=
879 CAN_ERR_CRTL_RX_OVERFLOW;
880 }
881
882 netif_rx(skb);
883 } else {
884 dev_info(&spi->dev,
885 "cannot allocate error skb\n");
886 }
887 }
888
889 if (priv->can.state == CAN_STATE_BUS_OFF) {
890 if (priv->can.restart_ms == 0) {
891 can_bus_off(net);
892 mcp251x_hw_sleep(spi);
893 return;
894 }
895 }
896
897 if (intf == 0)
898 break;
899
900 if (intf & CANINTF_WAKIF)
901 complete(&priv->awake);
902
903 if (intf & CANINTF_MERRF) {
904 /* If there are pending Tx buffers, restart queue */
905 txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
906 if (!(txbnctrl & TXBCTRL_TXREQ)) {
907 if (priv->tx_skb || priv->tx_len)
908 mcp251x_clean(net);
909 netif_wake_queue(net);
910 }
911 }
912
913 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
914 net->stats.tx_packets++;
915 net->stats.tx_bytes += priv->tx_len - 1;
916 if (priv->tx_len) {
917 can_get_echo_skb(net, 0);
918 priv->tx_len = 0;
919 }
920 netif_wake_queue(net);
921 }
922
923 if (intf & CANINTF_RX0IF)
924 mcp251x_hw_rx(spi, 0);
925
926 if (intf & CANINTF_RX1IF)
927 mcp251x_hw_rx(spi, 1);
928 }
929}
930
931static const struct net_device_ops mcp251x_netdev_ops = {
932 .ndo_open = mcp251x_open,
933 .ndo_stop = mcp251x_stop,
934 .ndo_start_xmit = mcp251x_hard_start_xmit,
935};
936
937static int __devinit mcp251x_can_probe(struct spi_device *spi)
938{
939 struct net_device *net;
940 struct mcp251x_priv *priv;
941 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
942 int ret = -ENODEV;
943
944 if (!pdata)
945 /* Platform data is required for osc freq */
946 goto error_out;
947
948 /* Allocate can/net device */
949 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
950 if (!net) {
951 ret = -ENOMEM;
952 goto error_alloc;
953 }
954
955 net->netdev_ops = &mcp251x_netdev_ops;
956 net->flags |= IFF_ECHO;
957
958 priv = netdev_priv(net);
959 priv->can.bittiming_const = &mcp251x_bittiming_const;
960 priv->can.do_set_mode = mcp251x_do_set_mode;
961 priv->can.clock.freq = pdata->oscillator_frequency / 2;
962 priv->net = net;
963 dev_set_drvdata(&spi->dev, priv);
964
965 priv->spi = spi;
966 mutex_init(&priv->spi_lock);
967
968 /* If requested, allocate DMA buffers */
969 if (mcp251x_enable_dma) {
970 spi->dev.coherent_dma_mask = ~0;
971
972 /*
973 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
974 * that much and share it between Tx and Rx DMA buffers.
975 */
976 priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
977 PAGE_SIZE,
978 &priv->spi_tx_dma,
979 GFP_DMA);
980
981 if (priv->spi_tx_buf) {
982 priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
983 (PAGE_SIZE / 2));
984 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
985 (PAGE_SIZE / 2));
986 } else {
987 /* Fall back to non-DMA */
988 mcp251x_enable_dma = 0;
989 }
990 }
991
992 /* Allocate non-DMA buffers */
993 if (!mcp251x_enable_dma) {
994 priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
995 if (!priv->spi_tx_buf) {
996 ret = -ENOMEM;
997 goto error_tx_buf;
998 }
999 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
1000 if (!priv->spi_tx_buf) {
1001 ret = -ENOMEM;
1002 goto error_rx_buf;
1003 }
1004 }
1005
1006 if (pdata->power_enable)
1007 pdata->power_enable(1);
1008
1009 /* Call out to platform specific setup */
1010 if (pdata->board_specific_setup)
1011 pdata->board_specific_setup(spi);
1012
1013 SET_NETDEV_DEV(net, &spi->dev);
1014
1015 priv->wq = create_freezeable_workqueue("mcp251x_wq");
1016
1017 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
1018 INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
1019
1020 init_completion(&priv->awake);
1021
1022 /* Configure the SPI bus */
1023 spi->mode = SPI_MODE_0;
1024 spi->bits_per_word = 8;
1025 spi_setup(spi);
1026
1027 if (!mcp251x_hw_probe(spi)) {
1028 dev_info(&spi->dev, "Probe failed\n");
1029 goto error_probe;
1030 }
1031 mcp251x_hw_sleep(spi);
1032
1033 if (pdata->transceiver_enable)
1034 pdata->transceiver_enable(0);
1035
1036 ret = register_candev(net);
1037 if (!ret) {
1038 dev_info(&spi->dev, "probed\n");
1039 return ret;
1040 }
1041error_probe:
1042 if (!mcp251x_enable_dma)
1043 kfree(priv->spi_rx_buf);
1044error_rx_buf:
1045 if (!mcp251x_enable_dma)
1046 kfree(priv->spi_tx_buf);
1047error_tx_buf:
1048 free_candev(net);
1049 if (mcp251x_enable_dma)
1050 dma_free_coherent(&spi->dev, PAGE_SIZE,
1051 priv->spi_tx_buf, priv->spi_tx_dma);
1052error_alloc:
1053 if (pdata->power_enable)
1054 pdata->power_enable(0);
1055 dev_err(&spi->dev, "probe failed\n");
1056error_out:
1057 return ret;
1058}
1059
1060static int __devexit mcp251x_can_remove(struct spi_device *spi)
1061{
1062 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1063 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1064 struct net_device *net = priv->net;
1065
1066 unregister_candev(net);
1067 free_candev(net);
1068
1069 priv->force_quit = 1;
1070 flush_workqueue(priv->wq);
1071 destroy_workqueue(priv->wq);
1072
1073 if (mcp251x_enable_dma) {
1074 dma_free_coherent(&spi->dev, PAGE_SIZE,
1075 priv->spi_tx_buf, priv->spi_tx_dma);
1076 } else {
1077 kfree(priv->spi_tx_buf);
1078 kfree(priv->spi_rx_buf);
1079 }
1080
1081 if (pdata->power_enable)
1082 pdata->power_enable(0);
1083
1084 return 0;
1085}
1086
1087#ifdef CONFIG_PM
1088static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1089{
1090 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1091 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1092 struct net_device *net = priv->net;
1093
1094 if (netif_running(net)) {
1095 netif_device_detach(net);
1096
1097 mcp251x_hw_sleep(spi);
1098 if (pdata->transceiver_enable)
1099 pdata->transceiver_enable(0);
1100 priv->after_suspend = AFTER_SUSPEND_UP;
1101 } else {
1102 priv->after_suspend = AFTER_SUSPEND_DOWN;
1103 }
1104
1105 if (pdata->power_enable) {
1106 pdata->power_enable(0);
1107 priv->after_suspend |= AFTER_SUSPEND_POWER;
1108 }
1109
1110 return 0;
1111}
1112
1113static int mcp251x_can_resume(struct spi_device *spi)
1114{
1115 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1116 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1117
1118 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1119 pdata->power_enable(1);
1120 queue_work(priv->wq, &priv->irq_work);
1121 } else {
1122 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1123 if (pdata->transceiver_enable)
1124 pdata->transceiver_enable(1);
1125 queue_work(priv->wq, &priv->irq_work);
1126 } else {
1127 priv->after_suspend = 0;
1128 }
1129 }
1130 return 0;
1131}
1132#else
1133#define mcp251x_can_suspend NULL
1134#define mcp251x_can_resume NULL
1135#endif
1136
1137static struct spi_driver mcp251x_can_driver = {
1138 .driver = {
1139 .name = DEVICE_NAME,
1140 .bus = &spi_bus_type,
1141 .owner = THIS_MODULE,
1142 },
1143
1144 .probe = mcp251x_can_probe,
1145 .remove = __devexit_p(mcp251x_can_remove),
1146 .suspend = mcp251x_can_suspend,
1147 .resume = mcp251x_can_resume,
1148};
1149
1150static int __init mcp251x_can_init(void)
1151{
1152 return spi_register_driver(&mcp251x_can_driver);
1153}
1154
1155static void __exit mcp251x_can_exit(void)
1156{
1157 spi_unregister_driver(&mcp251x_can_driver);
1158}
1159
1160module_init(mcp251x_can_init);
1161module_exit(mcp251x_can_exit);
1162
1163MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
1164 "Christian Pellegrin <chripell@evolware.org>");
1165MODULE_DESCRIPTION("Microchip 251x CAN driver");
1166MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
new file mode 100644
index 000000000000..cd0f2d6f375d
--- /dev/null
+++ b/drivers/net/can/mscan/Kconfig
@@ -0,0 +1,23 @@
1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
3 tristate "Support for Freescale MSCAN based chips"
4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition
6 is based on the MSCAN12 definition which is the specific
7 implementation of the Motorola Scalable CAN concept targeted for
8 the Motorola MC68HC12 Microcontroller Family.
9
10if CAN_MSCAN
11
12config CAN_MPC5XXX
13 tristate "Freescale MPC5xxx onboard CAN controller"
14 depends on PPC_MPC52xx
15 ---help---
16 If you say yes here you get support for Freescale's MPC5xxx
17 onboard CAN controller.
18
19 This driver can also be built as a module. If so, the module
20 will be called mscan-mpc5xxx.ko.
21
22endif
23
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 000000000000..c9fab17cd8b4
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_CAN_MPC5XXX) += mscan-mpc5xxx.o
3mscan-mpc5xxx-objs := mscan.o mpc5xxx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
new file mode 100644
index 000000000000..1de6f6349b16
--- /dev/null
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -0,0 +1,259 @@
1/*
2 * CAN bus driver for the Freescale MPC5xxx embedded CPU.
3 *
4 * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <linux/io.h>
33#include <asm/mpc52xx.h>
34
35#include "mscan.h"
36
37#define DRV_NAME "mpc5xxx_can"
38
39static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
40 { .compatible = "fsl,mpc5200-cdm", },
41 {}
42};
43
44/*
45 * Get frequency of the MSCAN clock source
46 *
47 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
48 * can be selected. According to the MPC5200 user's manual, the oscillator
49 * clock is the better choice as it has less jitter but due to a hardware
50 * bug, it can not be selected for the old MPC5200 Rev. A chips.
51 */
52
53static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
54 int clock_src)
55{
56 unsigned int pvr;
57 struct mpc52xx_cdm __iomem *cdm;
58 struct device_node *np_cdm;
59 unsigned int freq;
60 u32 val;
61
62 pvr = mfspr(SPRN_PVR);
63
64 freq = mpc5xxx_get_bus_frequency(of->node);
65 if (!freq)
66 return 0;
67
68 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
69 return freq;
70
71 /* Determine SYS_XTAL_IN frequency from the clock domain settings */
72 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
73 if (!np_cdm) {
74 dev_err(&of->dev, "can't get clock node!\n");
75 return 0;
76 }
77 cdm = of_iomap(np_cdm, 0);
78 of_node_put(np_cdm);
79
80 if (in_8(&cdm->ipb_clk_sel) & 0x1)
81 freq *= 2;
82 val = in_be32(&cdm->rstcfg);
83
84 freq *= (val & (1 << 5)) ? 8 : 4;
85 freq /= (val & (1 << 6)) ? 12 : 16;
86
87 iounmap(cdm);
88
89 return freq;
90}
91
92static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
93 const struct of_device_id *id)
94{
95 struct device_node *np = ofdev->node;
96 struct net_device *dev;
97 struct mscan_priv *priv;
98 void __iomem *base;
99 const char *clk_src;
100 int err, irq, clock_src;
101
102 base = of_iomap(ofdev->node, 0);
103 if (!base) {
104 dev_err(&ofdev->dev, "couldn't ioremap\n");
105 err = -ENOMEM;
106 goto exit_release_mem;
107 }
108
109 irq = irq_of_parse_and_map(np, 0);
110 if (!irq) {
111 dev_err(&ofdev->dev, "no irq found\n");
112 err = -ENODEV;
113 goto exit_unmap_mem;
114 }
115
116 dev = alloc_mscandev();
117 if (!dev) {
118 err = -ENOMEM;
119 goto exit_dispose_irq;
120 }
121
122 priv = netdev_priv(dev);
123 priv->reg_base = base;
124 dev->irq = irq;
125
126 /*
127 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
128 * (IP_CLK) can be selected as MSCAN clock source. According to
129 * the MPC5200 user's manual, the oscillator clock is the better
130 * choice as it has less jitter. For this reason, it is selected
131 * by default.
132 */
133 clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
134 if (clk_src && strcmp(clk_src, "ip") == 0)
135 clock_src = MSCAN_CLKSRC_BUS;
136 else
137 clock_src = MSCAN_CLKSRC_XTAL;
138 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
139 if (!priv->can.clock.freq) {
140 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
141 err = -ENODEV;
142 goto exit_free_mscan;
143 }
144
145 SET_NETDEV_DEV(dev, &ofdev->dev);
146
147 err = register_mscandev(dev, clock_src);
148 if (err) {
149 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
150 DRV_NAME, err);
151 goto exit_free_mscan;
152 }
153
154 dev_set_drvdata(&ofdev->dev, dev);
155
156 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
157 priv->reg_base, dev->irq, priv->can.clock.freq);
158
159 return 0;
160
161exit_free_mscan:
162 free_candev(dev);
163exit_dispose_irq:
164 irq_dispose_mapping(irq);
165exit_unmap_mem:
166 iounmap(base);
167exit_release_mem:
168 return err;
169}
170
171static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
172{
173 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
174 struct mscan_priv *priv = netdev_priv(dev);
175
176 dev_set_drvdata(&ofdev->dev, NULL);
177
178 unregister_mscandev(dev);
179 iounmap(priv->reg_base);
180 irq_dispose_mapping(dev->irq);
181 free_candev(dev);
182
183 return 0;
184}
185
186#ifdef CONFIG_PM
187static struct mscan_regs saved_regs;
188static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
189{
190 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
191 struct mscan_priv *priv = netdev_priv(dev);
192 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
193
194 _memcpy_fromio(&saved_regs, regs, sizeof(*regs));
195
196 return 0;
197}
198
199static int mpc5xxx_can_resume(struct of_device *ofdev)
200{
201 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
202 struct mscan_priv *priv = netdev_priv(dev);
203 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
204
205 regs->canctl0 |= MSCAN_INITRQ;
206 while (!(regs->canctl1 & MSCAN_INITAK))
207 udelay(10);
208
209 regs->canctl1 = saved_regs.canctl1;
210 regs->canbtr0 = saved_regs.canbtr0;
211 regs->canbtr1 = saved_regs.canbtr1;
212 regs->canidac = saved_regs.canidac;
213
214 /* restore masks, buffers etc. */
215 _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
216 sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
217
218 regs->canctl0 &= ~MSCAN_INITRQ;
219 regs->cantbsel = saved_regs.cantbsel;
220 regs->canrier = saved_regs.canrier;
221 regs->cantier = saved_regs.cantier;
222 regs->canctl0 = saved_regs.canctl0;
223
224 return 0;
225}
226#endif
227
228static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
229 {.compatible = "fsl,mpc5200-mscan"},
230 {},
231};
232
233static struct of_platform_driver mpc5xxx_can_driver = {
234 .owner = THIS_MODULE,
235 .name = "mpc5xxx_can",
236 .probe = mpc5xxx_can_probe,
237 .remove = __devexit_p(mpc5xxx_can_remove),
238#ifdef CONFIG_PM
239 .suspend = mpc5xxx_can_suspend,
240 .resume = mpc5xxx_can_resume,
241#endif
242 .match_table = mpc5xxx_can_table,
243};
244
245static int __init mpc5xxx_can_init(void)
246{
247 return of_register_platform_driver(&mpc5xxx_can_driver);
248}
249module_init(mpc5xxx_can_init);
250
251static void __exit mpc5xxx_can_exit(void)
252{
253 return of_unregister_platform_driver(&mpc5xxx_can_driver);
254};
255module_exit(mpc5xxx_can_exit);
256
257MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
258MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
259MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 000000000000..bb06dfb58f25
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,668 @@
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/io.h>
35
36#include "mscan.h"
37
38static struct can_bittiming_const mscan_bittiming_const = {
39 .name = "mscan",
40 .tseg1_min = 4,
41 .tseg1_max = 16,
42 .tseg2_min = 2,
43 .tseg2_max = 8,
44 .sjw_max = 4,
45 .brp_min = 1,
46 .brp_max = 64,
47 .brp_inc = 1,
48};
49
50struct mscan_state {
51 u8 mode;
52 u8 canrier;
53 u8 cantier;
54};
55
56static enum can_state state_map[] = {
57 CAN_STATE_ERROR_ACTIVE,
58 CAN_STATE_ERROR_WARNING,
59 CAN_STATE_ERROR_PASSIVE,
60 CAN_STATE_BUS_OFF
61};
62
63static int mscan_set_mode(struct net_device *dev, u8 mode)
64{
65 struct mscan_priv *priv = netdev_priv(dev);
66 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
67 int ret = 0;
68 int i;
69 u8 canctl1;
70
71 if (mode != MSCAN_NORMAL_MODE) {
72 if (priv->tx_active) {
73 /* Abort transfers before going to sleep */#
74 out_8(&regs->cantarq, priv->tx_active);
75 /* Suppress TX done interrupts */
76 out_8(&regs->cantier, 0);
77 }
78
79 canctl1 = in_8(&regs->canctl1);
80 if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
81 setbits8(&regs->canctl0, MSCAN_SLPRQ);
82 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
83 if (in_8(&regs->canctl1) & MSCAN_SLPAK)
84 break;
85 udelay(100);
86 }
87 /*
88 * The mscan controller will fail to enter sleep mode,
89 * while there are irregular activities on bus, like
90 * somebody keeps retransmitting. This behavior is
91 * undocumented and seems to differ between mscan built
92 * in mpc5200b and mpc5200. We proceed in that case,
93 * since otherwise the slprq will be kept set and the
94 * controller will get stuck. NOTE: INITRQ or CSWAI
95 * will abort all active transmit actions, if still
96 * any, at once.
97 */
98 if (i >= MSCAN_SET_MODE_RETRIES)
99 dev_dbg(dev->dev.parent,
100 "device failed to enter sleep mode. "
101 "We proceed anyhow.\n");
102 else
103 priv->can.state = CAN_STATE_SLEEPING;
104 }
105
106 if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
107 setbits8(&regs->canctl0, MSCAN_INITRQ);
108 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
109 if (in_8(&regs->canctl1) & MSCAN_INITAK)
110 break;
111 }
112 if (i >= MSCAN_SET_MODE_RETRIES)
113 ret = -ENODEV;
114 }
115 if (!ret)
116 priv->can.state = CAN_STATE_STOPPED;
117
118 if (mode & MSCAN_CSWAI)
119 setbits8(&regs->canctl0, MSCAN_CSWAI);
120
121 } else {
122 canctl1 = in_8(&regs->canctl1);
123 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
124 clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
125 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
126 canctl1 = in_8(&regs->canctl1);
127 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
128 break;
129 }
130 if (i >= MSCAN_SET_MODE_RETRIES)
131 ret = -ENODEV;
132 else
133 priv->can.state = CAN_STATE_ERROR_ACTIVE;
134 }
135 }
136 return ret;
137}
138
139static int mscan_start(struct net_device *dev)
140{
141 struct mscan_priv *priv = netdev_priv(dev);
142 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
143 u8 canrflg;
144 int err;
145
146 out_8(&regs->canrier, 0);
147
148 INIT_LIST_HEAD(&priv->tx_head);
149 priv->prev_buf_id = 0;
150 priv->cur_pri = 0;
151 priv->tx_active = 0;
152 priv->shadow_canrier = 0;
153 priv->flags = 0;
154
155 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
156 if (err)
157 return err;
158
159 canrflg = in_8(&regs->canrflg);
160 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
161 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
162 MSCAN_STATE_TX(canrflg))];
163 out_8(&regs->cantier, 0);
164
165 /* Enable receive interrupts. */
166 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
167 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
168
169 return 0;
170}
171
172static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
173{
174 struct can_frame *frame = (struct can_frame *)skb->data;
175 struct mscan_priv *priv = netdev_priv(dev);
176 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
177 int i, rtr, buf_id;
178 u32 can_id;
179
180 if (frame->can_dlc > 8)
181 return -EINVAL;
182
183 out_8(&regs->cantier, 0);
184
185 i = ~priv->tx_active & MSCAN_TXE;
186 buf_id = ffs(i) - 1;
187 switch (hweight8(i)) {
188 case 0:
189 netif_stop_queue(dev);
190 dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
191 return NETDEV_TX_BUSY;
192 case 1:
193 /*
194 * if buf_id < 3, then current frame will be send out of order,
195 * since buffer with lower id have higher priority (hell..)
196 */
197 netif_stop_queue(dev);
198 case 2:
199 if (buf_id < priv->prev_buf_id) {
200 priv->cur_pri++;
201 if (priv->cur_pri == 0xff) {
202 set_bit(F_TX_WAIT_ALL, &priv->flags);
203 netif_stop_queue(dev);
204 }
205 }
206 set_bit(F_TX_PROGRESS, &priv->flags);
207 break;
208 }
209 priv->prev_buf_id = buf_id;
210 out_8(&regs->cantbsel, i);
211
212 rtr = frame->can_id & CAN_RTR_FLAG;
213
214 /* RTR is always the lowest bit of interest, then IDs follow */
215 if (frame->can_id & CAN_EFF_FLAG) {
216 can_id = (frame->can_id & CAN_EFF_MASK)
217 << (MSCAN_EFF_RTR_SHIFT + 1);
218 if (rtr)
219 can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
220 out_be16(&regs->tx.idr3_2, can_id);
221
222 can_id >>= 16;
223 /* EFF_FLAGS are inbetween the IDs :( */
224 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
225 | MSCAN_EFF_FLAGS;
226 } else {
227 can_id = (frame->can_id & CAN_SFF_MASK)
228 << (MSCAN_SFF_RTR_SHIFT + 1);
229 if (rtr)
230 can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
231 }
232 out_be16(&regs->tx.idr1_0, can_id);
233
234 if (!rtr) {
235 void __iomem *data = &regs->tx.dsr1_0;
236 u16 *payload = (u16 *)frame->data;
237
238 /* It is safe to write into dsr[dlc+1] */
239 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
240 out_be16(data, *payload++);
241 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
242 }
243 }
244
245 out_8(&regs->tx.dlr, frame->can_dlc);
246 out_8(&regs->tx.tbpr, priv->cur_pri);
247
248 /* Start transmission. */
249 out_8(&regs->cantflg, 1 << buf_id);
250
251 if (!test_bit(F_TX_PROGRESS, &priv->flags))
252 dev->trans_start = jiffies;
253
254 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
255
256 can_put_echo_skb(skb, dev, buf_id);
257
258 /* Enable interrupt. */
259 priv->tx_active |= 1 << buf_id;
260 out_8(&regs->cantier, priv->tx_active);
261
262 return NETDEV_TX_OK;
263}
264
265/* This function returns the old state to see where we came from */
266static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
267{
268 struct mscan_priv *priv = netdev_priv(dev);
269 enum can_state state, old_state = priv->can.state;
270
271 if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
272 state = state_map[max(MSCAN_STATE_RX(canrflg),
273 MSCAN_STATE_TX(canrflg))];
274 priv->can.state = state;
275 }
276 return old_state;
277}
278
279static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
280{
281 struct mscan_priv *priv = netdev_priv(dev);
282 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
283 u32 can_id;
284 int i;
285
286 can_id = in_be16(&regs->rx.idr1_0);
287 if (can_id & (1 << 3)) {
288 frame->can_id = CAN_EFF_FLAG;
289 can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
290 can_id = ((can_id & 0xffe00000) |
291 ((can_id & 0x7ffff) << 2)) >> 2;
292 } else {
293 can_id >>= 4;
294 frame->can_id = 0;
295 }
296
297 frame->can_id |= can_id >> 1;
298 if (can_id & 1)
299 frame->can_id |= CAN_RTR_FLAG;
300 frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
301
302 if (!(frame->can_id & CAN_RTR_FLAG)) {
303 void __iomem *data = &regs->rx.dsr1_0;
304 u16 *payload = (u16 *)frame->data;
305
306 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
307 *payload++ = in_be16(data);
308 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
309 }
310 }
311
312 out_8(&regs->canrflg, MSCAN_RXF);
313}
314
315static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
316 u8 canrflg)
317{
318 struct mscan_priv *priv = netdev_priv(dev);
319 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
320 struct net_device_stats *stats = &dev->stats;
321 enum can_state old_state;
322
323 dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
324 frame->can_id = CAN_ERR_FLAG;
325
326 if (canrflg & MSCAN_OVRIF) {
327 frame->can_id |= CAN_ERR_CRTL;
328 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
329 stats->rx_over_errors++;
330 stats->rx_errors++;
331 } else {
332 frame->data[1] = 0;
333 }
334
335 old_state = check_set_state(dev, canrflg);
336 /* State changed */
337 if (old_state != priv->can.state) {
338 switch (priv->can.state) {
339 case CAN_STATE_ERROR_WARNING:
340 frame->can_id |= CAN_ERR_CRTL;
341 priv->can.can_stats.error_warning++;
342 if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
343 (canrflg & MSCAN_RSTAT_MSK))
344 frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
345 if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
346 (canrflg & MSCAN_TSTAT_MSK))
347 frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
348 break;
349 case CAN_STATE_ERROR_PASSIVE:
350 frame->can_id |= CAN_ERR_CRTL;
351 priv->can.can_stats.error_passive++;
352 frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
353 break;
354 case CAN_STATE_BUS_OFF:
355 frame->can_id |= CAN_ERR_BUSOFF;
356 /*
357 * The MSCAN on the MPC5200 does recover from bus-off
358 * automatically. To avoid that we stop the chip doing
359 * a light-weight stop (we are in irq-context).
360 */
361 out_8(&regs->cantier, 0);
362 out_8(&regs->canrier, 0);
363 setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
364 can_bus_off(dev);
365 break;
366 default:
367 break;
368 }
369 }
370 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
371 frame->can_dlc = CAN_ERR_DLC;
372 out_8(&regs->canrflg, MSCAN_ERR_IF);
373}
374
375static int mscan_rx_poll(struct napi_struct *napi, int quota)
376{
377 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
378 struct net_device *dev = napi->dev;
379 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
380 struct net_device_stats *stats = &dev->stats;
381 int npackets = 0;
382 int ret = 1;
383 struct sk_buff *skb;
384 struct can_frame *frame;
385 u8 canrflg;
386
387 while (npackets < quota) {
388 canrflg = in_8(&regs->canrflg);
389 if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
390 break;
391
392 skb = alloc_can_skb(dev, &frame);
393 if (!skb) {
394 if (printk_ratelimit())
395 dev_notice(dev->dev.parent, "packet dropped\n");
396 stats->rx_dropped++;
397 out_8(&regs->canrflg, canrflg);
398 continue;
399 }
400
401 if (canrflg & MSCAN_RXF)
402 mscan_get_rx_frame(dev, frame);
403 else if (canrflg & MSCAN_ERR_IF)
404 mscan_get_err_frame(dev, frame, canrflg);
405
406 stats->rx_packets++;
407 stats->rx_bytes += frame->can_dlc;
408 npackets++;
409 netif_receive_skb(skb);
410 }
411
412 if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
413 napi_complete(&priv->napi);
414 clear_bit(F_RX_PROGRESS, &priv->flags);
415 if (priv->can.state < CAN_STATE_BUS_OFF)
416 out_8(&regs->canrier, priv->shadow_canrier);
417 ret = 0;
418 }
419 return ret;
420}
421
422static irqreturn_t mscan_isr(int irq, void *dev_id)
423{
424 struct net_device *dev = (struct net_device *)dev_id;
425 struct mscan_priv *priv = netdev_priv(dev);
426 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
427 struct net_device_stats *stats = &dev->stats;
428 u8 cantier, cantflg, canrflg;
429 irqreturn_t ret = IRQ_NONE;
430
431 cantier = in_8(&regs->cantier) & MSCAN_TXE;
432 cantflg = in_8(&regs->cantflg) & cantier;
433
434 if (cantier && cantflg) {
435 struct list_head *tmp, *pos;
436
437 list_for_each_safe(pos, tmp, &priv->tx_head) {
438 struct tx_queue_entry *entry =
439 list_entry(pos, struct tx_queue_entry, list);
440 u8 mask = entry->mask;
441
442 if (!(cantflg & mask))
443 continue;
444
445 out_8(&regs->cantbsel, mask);
446 stats->tx_bytes += in_8(&regs->tx.dlr);
447 stats->tx_packets++;
448 can_get_echo_skb(dev, entry->id);
449 priv->tx_active &= ~mask;
450 list_del(pos);
451 }
452
453 if (list_empty(&priv->tx_head)) {
454 clear_bit(F_TX_WAIT_ALL, &priv->flags);
455 clear_bit(F_TX_PROGRESS, &priv->flags);
456 priv->cur_pri = 0;
457 } else {
458 dev->trans_start = jiffies;
459 }
460
461 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
462 netif_wake_queue(dev);
463
464 out_8(&regs->cantier, priv->tx_active);
465 ret = IRQ_HANDLED;
466 }
467
468 canrflg = in_8(&regs->canrflg);
469 if ((canrflg & ~MSCAN_STAT_MSK) &&
470 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
471 if (canrflg & ~MSCAN_STAT_MSK) {
472 priv->shadow_canrier = in_8(&regs->canrier);
473 out_8(&regs->canrier, 0);
474 napi_schedule(&priv->napi);
475 ret = IRQ_HANDLED;
476 } else {
477 clear_bit(F_RX_PROGRESS, &priv->flags);
478 }
479 }
480 return ret;
481}
482
483static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
484{
485 struct mscan_priv *priv = netdev_priv(dev);
486 int ret = 0;
487
488 if (!priv->open_time)
489 return -EINVAL;
490
491 switch (mode) {
492 case CAN_MODE_START:
493 if (priv->can.state <= CAN_STATE_BUS_OFF)
494 mscan_set_mode(dev, MSCAN_INIT_MODE);
495 ret = mscan_start(dev);
496 if (ret)
497 break;
498 if (netif_queue_stopped(dev))
499 netif_wake_queue(dev);
500 break;
501
502 default:
503 ret = -EOPNOTSUPP;
504 break;
505 }
506 return ret;
507}
508
509static int mscan_do_set_bittiming(struct net_device *dev)
510{
511 struct mscan_priv *priv = netdev_priv(dev);
512 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
513 struct can_bittiming *bt = &priv->can.bittiming;
514 u8 btr0, btr1;
515
516 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
517 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
518 BTR1_SET_TSEG2(bt->phase_seg2) |
519 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
520
521 dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
522 btr0, btr1);
523
524 out_8(&regs->canbtr0, btr0);
525 out_8(&regs->canbtr1, btr1);
526
527 return 0;
528}
529
530static int mscan_open(struct net_device *dev)
531{
532 int ret;
533 struct mscan_priv *priv = netdev_priv(dev);
534 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
535
536 /* common open */
537 ret = open_candev(dev);
538 if (ret)
539 return ret;
540
541 napi_enable(&priv->napi);
542
543 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
544 if (ret < 0) {
545 dev_err(dev->dev.parent, "failed to attach interrupt\n");
546 goto exit_napi_disable;
547 }
548
549 priv->open_time = jiffies;
550
551 clrbits8(&regs->canctl1, MSCAN_LISTEN);
552
553 ret = mscan_start(dev);
554 if (ret)
555 goto exit_free_irq;
556
557 netif_start_queue(dev);
558
559 return 0;
560
561exit_free_irq:
562 priv->open_time = 0;
563 free_irq(dev->irq, dev);
564exit_napi_disable:
565 napi_disable(&priv->napi);
566 close_candev(dev);
567 return ret;
568}
569
570static int mscan_close(struct net_device *dev)
571{
572 struct mscan_priv *priv = netdev_priv(dev);
573 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
574
575 netif_stop_queue(dev);
576 napi_disable(&priv->napi);
577
578 out_8(&regs->cantier, 0);
579 out_8(&regs->canrier, 0);
580 mscan_set_mode(dev, MSCAN_INIT_MODE);
581 close_candev(dev);
582 free_irq(dev->irq, dev);
583 priv->open_time = 0;
584
585 return 0;
586}
587
588static const struct net_device_ops mscan_netdev_ops = {
589 .ndo_open = mscan_open,
590 .ndo_stop = mscan_close,
591 .ndo_start_xmit = mscan_start_xmit,
592};
593
594int register_mscandev(struct net_device *dev, int clock_src)
595{
596 struct mscan_priv *priv = netdev_priv(dev);
597 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
598 u8 ctl1;
599
600 ctl1 = in_8(&regs->canctl1);
601 if (clock_src)
602 ctl1 |= MSCAN_CLKSRC;
603 else
604 ctl1 &= ~MSCAN_CLKSRC;
605
606 ctl1 |= MSCAN_CANE;
607 out_8(&regs->canctl1, ctl1);
608 udelay(100);
609
610 /* acceptance mask/acceptance code (accept everything) */
611 out_be16(&regs->canidar1_0, 0);
612 out_be16(&regs->canidar3_2, 0);
613 out_be16(&regs->canidar5_4, 0);
614 out_be16(&regs->canidar7_6, 0);
615
616 out_be16(&regs->canidmr1_0, 0xffff);
617 out_be16(&regs->canidmr3_2, 0xffff);
618 out_be16(&regs->canidmr5_4, 0xffff);
619 out_be16(&regs->canidmr7_6, 0xffff);
620 /* Two 32 bit Acceptance Filters */
621 out_8(&regs->canidac, MSCAN_AF_32BIT);
622
623 mscan_set_mode(dev, MSCAN_INIT_MODE);
624
625 return register_candev(dev);
626}
627
628void unregister_mscandev(struct net_device *dev)
629{
630 struct mscan_priv *priv = netdev_priv(dev);
631 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
632 mscan_set_mode(dev, MSCAN_INIT_MODE);
633 clrbits8(&regs->canctl1, MSCAN_CANE);
634 unregister_candev(dev);
635}
636
637struct net_device *alloc_mscandev(void)
638{
639 struct net_device *dev;
640 struct mscan_priv *priv;
641 int i;
642
643 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
644 if (!dev)
645 return NULL;
646 priv = netdev_priv(dev);
647
648 dev->netdev_ops = &mscan_netdev_ops;
649
650 dev->flags |= IFF_ECHO; /* we support local echo */
651
652 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
653
654 priv->can.bittiming_const = &mscan_bittiming_const;
655 priv->can.do_set_bittiming = mscan_do_set_bittiming;
656 priv->can.do_set_mode = mscan_do_set_mode;
657
658 for (i = 0; i < TX_QUEUE_SIZE; i++) {
659 priv->tx_queue[i].id = i;
660 priv->tx_queue[i].mask = 1 << i;
661 }
662
663 return dev;
664}
665
666MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
667MODULE_LICENSE("GPL v2");
668MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 000000000000..00fc4aaf1ed8
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,296 @@
1/*
2 * Definitions of consts/structs to drive the Freescale MSCAN.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __MSCAN_H__
22#define __MSCAN_H__
23
24#include <linux/types.h>
25
26/* MSCAN control register 0 (CANCTL0) bits */
27#define MSCAN_RXFRM 0x80
28#define MSCAN_RXACT 0x40
29#define MSCAN_CSWAI 0x20
30#define MSCAN_SYNCH 0x10
31#define MSCAN_TIME 0x08
32#define MSCAN_WUPE 0x04
33#define MSCAN_SLPRQ 0x02
34#define MSCAN_INITRQ 0x01
35
36/* MSCAN control register 1 (CANCTL1) bits */
37#define MSCAN_CANE 0x80
38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10
41#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01
44
45/* Use the MPC5200 MSCAN variant? */
46#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200
48#endif
49
50#ifdef MSCAN_FOR_MPC5200
51#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
53#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0
56#endif
57
58/* MSCAN receiver flag register (CANRFLG) bits */
59#define MSCAN_WUPIF 0x80
60#define MSCAN_CSCIF 0x40
61#define MSCAN_RSTAT1 0x20
62#define MSCAN_RSTAT0 0x10
63#define MSCAN_TSTAT1 0x08
64#define MSCAN_TSTAT0 0x04
65#define MSCAN_OVRIF 0x02
66#define MSCAN_RXF 0x01
67#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF)
68#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0)
69#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0)
70#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
71
72#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
73 MSCAN_TSTAT1 | MSCAN_TSTAT0)
74#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2)
75#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4)
76#define MSCAN_STATE_ACTIVE 0
77#define MSCAN_STATE_WARNING 1
78#define MSCAN_STATE_PASSIVE 2
79#define MSCAN_STATE_BUSOFF 3
80
81/* MSCAN receiver interrupt enable register (CANRIER) bits */
82#define MSCAN_WUPIE 0x80
83#define MSCAN_CSCIE 0x40
84#define MSCAN_RSTATE1 0x20
85#define MSCAN_RSTATE0 0x10
86#define MSCAN_TSTATE1 0x08
87#define MSCAN_TSTATE0 0x04
88#define MSCAN_OVRIE 0x02
89#define MSCAN_RXFIE 0x01
90
91/* MSCAN transmitter flag register (CANTFLG) bits */
92#define MSCAN_TXE2 0x04
93#define MSCAN_TXE1 0x02
94#define MSCAN_TXE0 0x01
95#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
96
97/* MSCAN transmitter interrupt enable register (CANTIER) bits */
98#define MSCAN_TXIE2 0x04
99#define MSCAN_TXIE1 0x02
100#define MSCAN_TXIE0 0x01
101#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
102
103/* MSCAN transmitter message abort request (CANTARQ) bits */
104#define MSCAN_ABTRQ2 0x04
105#define MSCAN_ABTRQ1 0x02
106#define MSCAN_ABTRQ0 0x01
107
108/* MSCAN transmitter message abort ack (CANTAAK) bits */
109#define MSCAN_ABTAK2 0x04
110#define MSCAN_ABTAK1 0x02
111#define MSCAN_ABTAK0 0x01
112
113/* MSCAN transmit buffer selection (CANTBSEL) bits */
114#define MSCAN_TX2 0x04
115#define MSCAN_TX1 0x02
116#define MSCAN_TX0 0x01
117
118/* MSCAN ID acceptance control register (CANIDAC) bits */
119#define MSCAN_IDAM1 0x20
120#define MSCAN_IDAM0 0x10
121#define MSCAN_IDHIT2 0x04
122#define MSCAN_IDHIT1 0x02
123#define MSCAN_IDHIT0 0x01
124
125#define MSCAN_AF_32BIT 0x00
126#define MSCAN_AF_16BIT MSCAN_IDAM0
127#define MSCAN_AF_8BIT MSCAN_IDAM1
128#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1)
129#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1))
130
131/* MSCAN Miscellaneous Register (CANMISC) bits */
132#define MSCAN_BOHOLD 0x01
133
134/* MSCAN Identifier Register (IDR) bits */
135#define MSCAN_SFF_RTR_SHIFT 4
136#define MSCAN_EFF_RTR_SHIFT 0
137#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
138
139#ifdef MSCAN_FOR_MPC5200
140#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
141#define _MSCAN_RESERVED_DSR_SIZE 2
142#else
143#define _MSCAN_RESERVED_(n, num)
144#define _MSCAN_RESERVED_DSR_SIZE 0
145#endif
146
147/* Structure of the hardware registers */
148struct mscan_regs {
149 /* (see doc S12MSCANV3/D) MPC5200 MSCAN */
150 u8 canctl0; /* + 0x00 0x00 */
151 u8 canctl1; /* + 0x01 0x01 */
152 _MSCAN_RESERVED_(1, 2); /* + 0x02 */
153 u8 canbtr0; /* + 0x04 0x02 */
154 u8 canbtr1; /* + 0x05 0x03 */
155 _MSCAN_RESERVED_(2, 2); /* + 0x06 */
156 u8 canrflg; /* + 0x08 0x04 */
157 u8 canrier; /* + 0x09 0x05 */
158 _MSCAN_RESERVED_(3, 2); /* + 0x0a */
159 u8 cantflg; /* + 0x0c 0x06 */
160 u8 cantier; /* + 0x0d 0x07 */
161 _MSCAN_RESERVED_(4, 2); /* + 0x0e */
162 u8 cantarq; /* + 0x10 0x08 */
163 u8 cantaak; /* + 0x11 0x09 */
164 _MSCAN_RESERVED_(5, 2); /* + 0x12 */
165 u8 cantbsel; /* + 0x14 0x0a */
166 u8 canidac; /* + 0x15 0x0b */
167 u8 reserved; /* + 0x16 0x0c */
168 _MSCAN_RESERVED_(6, 5); /* + 0x17 */
169#ifndef MSCAN_FOR_MPC5200
170 u8 canmisc; /* 0x0d */
171#endif
172 u8 canrxerr; /* + 0x1c 0x0e */
173 u8 cantxerr; /* + 0x1d 0x0f */
174 _MSCAN_RESERVED_(7, 2); /* + 0x1e */
175 u16 canidar1_0; /* + 0x20 0x10 */
176 _MSCAN_RESERVED_(8, 2); /* + 0x22 */
177 u16 canidar3_2; /* + 0x24 0x12 */
178 _MSCAN_RESERVED_(9, 2); /* + 0x26 */
179 u16 canidmr1_0; /* + 0x28 0x14 */
180 _MSCAN_RESERVED_(10, 2); /* + 0x2a */
181 u16 canidmr3_2; /* + 0x2c 0x16 */
182 _MSCAN_RESERVED_(11, 2); /* + 0x2e */
183 u16 canidar5_4; /* + 0x30 0x18 */
184 _MSCAN_RESERVED_(12, 2); /* + 0x32 */
185 u16 canidar7_6; /* + 0x34 0x1a */
186 _MSCAN_RESERVED_(13, 2); /* + 0x36 */
187 u16 canidmr5_4; /* + 0x38 0x1c */
188 _MSCAN_RESERVED_(14, 2); /* + 0x3a */
189 u16 canidmr7_6; /* + 0x3c 0x1e */
190 _MSCAN_RESERVED_(15, 2); /* + 0x3e */
191 struct {
192 u16 idr1_0; /* + 0x40 0x20 */
193 _MSCAN_RESERVED_(16, 2); /* + 0x42 */
194 u16 idr3_2; /* + 0x44 0x22 */
195 _MSCAN_RESERVED_(17, 2); /* + 0x46 */
196 u16 dsr1_0; /* + 0x48 0x24 */
197 _MSCAN_RESERVED_(18, 2); /* + 0x4a */
198 u16 dsr3_2; /* + 0x4c 0x26 */
199 _MSCAN_RESERVED_(19, 2); /* + 0x4e */
200 u16 dsr5_4; /* + 0x50 0x28 */
201 _MSCAN_RESERVED_(20, 2); /* + 0x52 */
202 u16 dsr7_6; /* + 0x54 0x2a */
203 _MSCAN_RESERVED_(21, 2); /* + 0x56 */
204 u8 dlr; /* + 0x58 0x2c */
205 u8:8; /* + 0x59 0x2d */
206 _MSCAN_RESERVED_(22, 2); /* + 0x5a */
207 u16 time; /* + 0x5c 0x2e */
208 } rx;
209 _MSCAN_RESERVED_(23, 2); /* + 0x5e */
210 struct {
211 u16 idr1_0; /* + 0x60 0x30 */
212 _MSCAN_RESERVED_(24, 2); /* + 0x62 */
213 u16 idr3_2; /* + 0x64 0x32 */
214 _MSCAN_RESERVED_(25, 2); /* + 0x66 */
215 u16 dsr1_0; /* + 0x68 0x34 */
216 _MSCAN_RESERVED_(26, 2); /* + 0x6a */
217 u16 dsr3_2; /* + 0x6c 0x36 */
218 _MSCAN_RESERVED_(27, 2); /* + 0x6e */
219 u16 dsr5_4; /* + 0x70 0x38 */
220 _MSCAN_RESERVED_(28, 2); /* + 0x72 */
221 u16 dsr7_6; /* + 0x74 0x3a */
222 _MSCAN_RESERVED_(29, 2); /* + 0x76 */
223 u8 dlr; /* + 0x78 0x3c */
224 u8 tbpr; /* + 0x79 0x3d */
225 _MSCAN_RESERVED_(30, 2); /* + 0x7a */
226 u16 time; /* + 0x7c 0x3e */
227 } tx;
228 _MSCAN_RESERVED_(31, 2); /* + 0x7e */
229} __attribute__ ((packed));
230
231#undef _MSCAN_RESERVED_
232#define MSCAN_REGION sizeof(struct mscan)
233
234#define MSCAN_NORMAL_MODE 0
235#define MSCAN_SLEEP_MODE MSCAN_SLPRQ
236#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
237#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
238#define MSCAN_SET_MODE_RETRIES 255
239#define MSCAN_ECHO_SKB_MAX 3
240
241#define BTR0_BRP_MASK 0x3f
242#define BTR0_SJW_SHIFT 6
243#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
244
245#define BTR1_TSEG1_MASK 0xf
246#define BTR1_TSEG2_SHIFT 4
247#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
248#define BTR1_SAM_SHIFT 7
249
250#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
251#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
252 BTR0_SJW_MASK)
253
254#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
255#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
256 BTR1_TSEG2_MASK)
257#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0)
258
259#define F_RX_PROGRESS 0
260#define F_TX_PROGRESS 1
261#define F_TX_WAIT_ALL 2
262
263#define TX_QUEUE_SIZE 3
264
265struct tx_queue_entry {
266 struct list_head list;
267 u8 mask;
268 u8 id;
269};
270
271struct mscan_priv {
272 struct can_priv can; /* must be the first member */
273 long open_time;
274 unsigned long flags;
275 void __iomem *reg_base; /* ioremap'ed address to registers */
276 u8 shadow_statflg;
277 u8 shadow_canrier;
278 u8 cur_pri;
279 u8 prev_buf_id;
280 u8 tx_active;
281
282 struct list_head tx_head;
283 struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
284 struct napi_struct napi;
285};
286
287extern struct net_device *alloc_mscandev(void);
288/*
289 * clock_src:
290 * 1 = The MSCAN clock source is the onchip Bus Clock.
291 * 0 = The MSCAN clock source is the chip Oscillator Clock.
292 */
293extern int register_mscandev(struct net_device *dev, int clock_src);
294extern void unregister_mscandev(struct net_device *dev);
295
296#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 16d2ecd2a3b7..b4ba88a31075 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -296,11 +296,9 @@ static void sja1000_rx(struct net_device *dev)
296 uint8_t dlc; 296 uint8_t dlc;
297 int i; 297 int i;
298 298
299 skb = dev_alloc_skb(sizeof(struct can_frame)); 299 skb = alloc_can_skb(dev, &cf);
300 if (skb == NULL) 300 if (skb == NULL)
301 return; 301 return;
302 skb->dev = dev;
303 skb->protocol = htons(ETH_P_CAN);
304 302
305 fi = priv->read_reg(priv, REG_FI); 303 fi = priv->read_reg(priv, REG_FI);
306 dlc = fi & 0x0F; 304 dlc = fi & 0x0F;
@@ -323,8 +321,6 @@ static void sja1000_rx(struct net_device *dev)
323 if (fi & FI_RTR) 321 if (fi & FI_RTR)
324 id |= CAN_RTR_FLAG; 322 id |= CAN_RTR_FLAG;
325 323
326 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
327 memset(cf, 0, sizeof(struct can_frame));
328 cf->can_id = id; 324 cf->can_id = id;
329 cf->can_dlc = dlc; 325 cf->can_dlc = dlc;
330 for (i = 0; i < dlc; i++) 326 for (i = 0; i < dlc; i++)
@@ -351,15 +347,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
351 enum can_state state = priv->can.state; 347 enum can_state state = priv->can.state;
352 uint8_t ecc, alc; 348 uint8_t ecc, alc;
353 349
354 skb = dev_alloc_skb(sizeof(struct can_frame)); 350 skb = alloc_can_err_skb(dev, &cf);
355 if (skb == NULL) 351 if (skb == NULL)
356 return -ENOMEM; 352 return -ENOMEM;
357 skb->dev = dev;
358 skb->protocol = htons(ETH_P_CAN);
359 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
360 memset(cf, 0, sizeof(struct can_frame));
361 cf->can_id = CAN_ERR_FLAG;
362 cf->can_dlc = CAN_ERR_DLC;
363 353
364 if (isrc & IRQ_DOI) { 354 if (isrc & IRQ_DOI) {
365 /* data overrun interrupt */ 355 /* data overrun interrupt */
@@ -526,7 +516,7 @@ static int sja1000_open(struct net_device *dev)
526 516
527 /* register interrupt handler, if not done by the device driver */ 517 /* register interrupt handler, if not done by the device driver */
528 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { 518 if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) {
529 err = request_irq(dev->irq, &sja1000_interrupt, priv->irq_flags, 519 err = request_irq(dev->irq, sja1000_interrupt, priv->irq_flags,
530 dev->name, (void *)dev); 520 dev->name, (void *)dev);
531 if (err) { 521 if (err) {
532 close_candev(dev); 522 close_candev(dev);
@@ -565,7 +555,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
565 struct net_device *dev; 555 struct net_device *dev;
566 struct sja1000_priv *priv; 556 struct sja1000_priv *priv;
567 557
568 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv); 558 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv,
559 SJA1000_ECHO_SKB_MAX);
569 if (!dev) 560 if (!dev)
570 return NULL; 561 return NULL;
571 562
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 302d2c763ad7..97a622b9302f 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -50,6 +50,8 @@
50#include <linux/can/dev.h> 50#include <linux/can/dev.h>
51#include <linux/can/platform/sja1000.h> 51#include <linux/can/platform/sja1000.h>
52 52
53#define SJA1000_ECHO_SKB_MAX 1 /* the SJA1000 has one TX buffer object */
54
53#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ 55#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
54 56
55/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 57/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
new file mode 100644
index 000000000000..07e8016b17ec
--- /dev/null
+++ b/drivers/net/can/ti_hecc.c
@@ -0,0 +1,993 @@
1/*
2 * TI HECC (CAN) device driver
3 *
4 * This driver supports TI's HECC (High End CAN Controller module) and the
5 * specs for the same is available at <http://www.ti.com>
6 *
7 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed as is WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20/*
21 * Your platform definitions should specify module ram offsets and interrupt
22 * number to use as follows:
23 *
24 * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
25 * .scc_hecc_offset = 0,
26 * .scc_ram_offset = 0x3000,
27 * .hecc_ram_offset = 0x3000,
28 * .mbx_offset = 0x2000,
29 * .int_line = 0,
30 * .revision = 1,
31 * };
32 *
33 * Please see include/can/platform/ti_hecc.h for description of above fields
34 *
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/kernel.h>
40#include <linux/types.h>
41#include <linux/interrupt.h>
42#include <linux/errno.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/platform_device.h>
46#include <linux/clk.h>
47
48#include <linux/can.h>
49#include <linux/can/dev.h>
50#include <linux/can/error.h>
51#include <linux/can/platform/ti_hecc.h>
52
53#define DRV_NAME "ti_hecc"
54#define HECC_MODULE_VERSION "0.7"
55MODULE_VERSION(HECC_MODULE_VERSION);
56#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION
57
58/* TX / RX Mailbox Configuration */
59#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
60#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
61
62/*
63 * Important Note: TX mailbox configuration
64 * TX mailboxes should be restricted to the number of SKB buffers to avoid
65 * maintaining SKB buffers separately. TX mailboxes should be a power of 2
66 * for the mailbox logic to work. Top mailbox numbers are reserved for RX
67 * and lower mailboxes for TX.
68 *
69 * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT
70 * 4 (default) 2
71 * 8 3
72 * 16 4
73 */
74#define HECC_MB_TX_SHIFT 2 /* as per table above */
75#define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT)
76
77#define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT)
78#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
79#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
80#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
81#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
82#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
83
84/*
85 * Important Note: RX mailbox configuration
86 * RX mailboxes are further logically split into two - main and buffer
87 * mailboxes. The goal is to get all packets into main mailboxes as
88 * driven by mailbox number and receive priority (higher to lower) and
89 * buffer mailboxes are used to receive pkts while main mailboxes are being
90 * processed. This ensures in-order packet reception.
91 *
92 * Here are the recommended values for buffer mailbox. Note that RX mailboxes
93 * start after TX mailboxes:
94 *
95 * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
96 * 28 12 8
97 * 16 20 4
98 */
99
100#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
101#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
102#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
103#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
104
105/* TI HECC module registers */
106#define HECC_CANME 0x0 /* Mailbox enable */
107#define HECC_CANMD 0x4 /* Mailbox direction */
108#define HECC_CANTRS 0x8 /* Transmit request set */
109#define HECC_CANTRR 0xC /* Transmit request */
110#define HECC_CANTA 0x10 /* Transmission acknowledge */
111#define HECC_CANAA 0x14 /* Abort acknowledge */
112#define HECC_CANRMP 0x18 /* Receive message pending */
113#define HECC_CANRML 0x1C /* Remote message lost */
114#define HECC_CANRFP 0x20 /* Remote frame pending */
115#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
116#define HECC_CANMC 0x28 /* Master control */
117#define HECC_CANBTC 0x2C /* Bit timing configuration */
118#define HECC_CANES 0x30 /* Error and status */
119#define HECC_CANTEC 0x34 /* Transmit error counter */
120#define HECC_CANREC 0x38 /* Receive error counter */
121#define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */
122#define HECC_CANGIM 0x40 /* Global interrupt mask */
123#define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */
124#define HECC_CANMIM 0x48 /* Mailbox interrupt mask */
125#define HECC_CANMIL 0x4C /* Mailbox interrupt level */
126#define HECC_CANOPC 0x50 /* Overwrite protection control */
127#define HECC_CANTIOC 0x54 /* Transmit I/O control */
128#define HECC_CANRIOC 0x58 /* Receive I/O control */
129#define HECC_CANLNT 0x5C /* HECC only: Local network time */
130#define HECC_CANTOC 0x60 /* HECC only: Time-out control */
131#define HECC_CANTOS 0x64 /* HECC only: Time-out status */
132#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
133#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
134
135/* Mailbox registers */
136#define HECC_CANMID 0x0
137#define HECC_CANMCF 0x4
138#define HECC_CANMDL 0x8
139#define HECC_CANMDH 0xC
140
141#define HECC_SET_REG 0xFFFFFFFF
142#define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */
143#define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */
144
145#define HECC_CANMC_SCM BIT(13) /* SCC compat mode */
146#define HECC_CANMC_CCR BIT(12) /* Change config request */
147#define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */
148#define HECC_CANMC_ABO BIT(7) /* Auto Bus On */
149#define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */
150#define HECC_CANMC_SRES BIT(5) /* Software reset */
151
152#define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */
153#define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */
154
155#define HECC_CANMID_IDE BIT(31) /* Extended frame format */
156#define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */
157#define HECC_CANMID_AAM BIT(29) /* Auto answer mode */
158
159#define HECC_CANES_FE BIT(24) /* form error */
160#define HECC_CANES_BE BIT(23) /* bit error */
161#define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */
162#define HECC_CANES_CRCE BIT(21) /* CRC error */
163#define HECC_CANES_SE BIT(20) /* stuff bit error */
164#define HECC_CANES_ACKE BIT(19) /* ack error */
165#define HECC_CANES_BO BIT(18) /* Bus off status */
166#define HECC_CANES_EP BIT(17) /* Error passive status */
167#define HECC_CANES_EW BIT(16) /* Error warning status */
168#define HECC_CANES_SMA BIT(5) /* suspend mode ack */
169#define HECC_CANES_CCE BIT(4) /* Change config enabled */
170#define HECC_CANES_PDA BIT(3) /* Power down mode ack */
171
172#define HECC_CANBTC_SAM BIT(7) /* sample points */
173
174#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
175 HECC_CANES_CRCE | HECC_CANES_SE |\
176 HECC_CANES_ACKE)
177
178#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
179
180#define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */
181#define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */
182#define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */
183#define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */
184#define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */
185#define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */
186#define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */
187#define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */
188#define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */
189#define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */
190#define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */
191#define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */
192#define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */
193#define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */
194#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
195
196/* CAN Bittiming constants as per HECC specs */
197static struct can_bittiming_const ti_hecc_bittiming_const = {
198 .name = DRV_NAME,
199 .tseg1_min = 1,
200 .tseg1_max = 16,
201 .tseg2_min = 1,
202 .tseg2_max = 8,
203 .sjw_max = 4,
204 .brp_min = 1,
205 .brp_max = 256,
206 .brp_inc = 1,
207};
208
209struct ti_hecc_priv {
210 struct can_priv can; /* MUST be first member/field */
211 struct napi_struct napi;
212 struct net_device *ndev;
213 struct clk *clk;
214 void __iomem *base;
215 u32 scc_ram_offset;
216 u32 hecc_ram_offset;
217 u32 mbx_offset;
218 u32 int_line;
219 spinlock_t mbx_lock; /* CANME register needs protection */
220 u32 tx_head;
221 u32 tx_tail;
222 u32 rx_next;
223};
224
225static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
226{
227 return priv->tx_head & HECC_TX_MB_MASK;
228}
229
230static inline int get_tx_tail_mb(struct ti_hecc_priv *priv)
231{
232 return priv->tx_tail & HECC_TX_MB_MASK;
233}
234
235static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
236{
237 return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO;
238}
239
240static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
241{
242 __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
243}
244
245static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
246 u32 reg, u32 val)
247{
248 __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
249 reg);
250}
251
252static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
253{
254 return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
255 reg);
256}
257
258static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
259{
260 __raw_writel(val, priv->base + reg);
261}
262
263static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
264{
265 return __raw_readl(priv->base + reg);
266}
267
268static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
269 u32 bit_mask)
270{
271 hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
272}
273
274static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
275 u32 bit_mask)
276{
277 hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
278}
279
280static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
281{
282 return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
283}
284
285static int ti_hecc_get_state(const struct net_device *ndev,
286 enum can_state *state)
287{
288 struct ti_hecc_priv *priv = netdev_priv(ndev);
289
290 *state = priv->can.state;
291 return 0;
292}
293
294static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
295{
296 struct can_bittiming *bit_timing = &priv->can.bittiming;
297 u32 can_btc;
298
299 can_btc = (bit_timing->phase_seg2 - 1) & 0x7;
300 can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1)
301 & 0xF) << 3;
302 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) {
303 if (bit_timing->brp > 4)
304 can_btc |= HECC_CANBTC_SAM;
305 else
306 dev_warn(priv->ndev->dev.parent, "WARN: Triple" \
307 "sampling not set due to h/w limitations");
308 }
309 can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
310 can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
311
312 /* ERM being set to 0 by default meaning resync at falling edge */
313
314 hecc_write(priv, HECC_CANBTC, can_btc);
315 dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc);
316
317 return 0;
318}
319
320static void ti_hecc_reset(struct net_device *ndev)
321{
322 u32 cnt;
323 struct ti_hecc_priv *priv = netdev_priv(ndev);
324
325 dev_dbg(ndev->dev.parent, "resetting hecc ...\n");
326 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
327
328 /* Set change control request and wait till enabled */
329 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
330
331 /*
332 * INFO: It has been observed that at times CCE bit may not be
333 * set and hw seems to be ok even if this bit is not set so
334 * timing out with a timing of 1ms to respect the specs
335 */
336 cnt = HECC_CCE_WAIT_COUNT;
337 while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
338 --cnt;
339 udelay(10);
340 }
341
342 /*
343 * Note: On HECC, BTC can be programmed only in initialization mode, so
344 * it is expected that the can bittiming parameters are set via ip
345 * utility before the device is opened
346 */
347 ti_hecc_set_btc(priv);
348
349 /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
350 hecc_write(priv, HECC_CANMC, 0);
351
352 /*
353 * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
354 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
355 */
356
357 /*
358 * INFO: It has been observed that at times CCE bit may not be
359 * set and hw seems to be ok even if this bit is not set so
360 */
361 cnt = HECC_CCE_WAIT_COUNT;
362 while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
363 --cnt;
364 udelay(10);
365 }
366
367 /* Enable TX and RX I/O Control pins */
368 hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN);
369 hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN);
370
371 /* Clear registers for clean operation */
372 hecc_write(priv, HECC_CANTA, HECC_SET_REG);
373 hecc_write(priv, HECC_CANRMP, HECC_SET_REG);
374 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
375 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
376 hecc_write(priv, HECC_CANME, 0);
377 hecc_write(priv, HECC_CANMD, 0);
378
379 /* SCC compat mode NOT supported (and not needed too) */
380 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM);
381}
382
383static void ti_hecc_start(struct net_device *ndev)
384{
385 struct ti_hecc_priv *priv = netdev_priv(ndev);
386 u32 cnt, mbxno, mbx_mask;
387
388 /* put HECC in initialization mode and set btc */
389 ti_hecc_reset(ndev);
390
391 priv->tx_head = priv->tx_tail = HECC_TX_MASK;
392 priv->rx_next = HECC_RX_FIRST_MBOX;
393
394 /* Enable local and global acceptance mask registers */
395 hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
396
397 /* Prepare configured mailboxes to receive messages */
398 for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) {
399 mbxno = HECC_MAX_MAILBOXES - 1 - cnt;
400 mbx_mask = BIT(mbxno);
401 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
402 hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME);
403 hecc_write_lam(priv, mbxno, HECC_SET_REG);
404 hecc_set_bit(priv, HECC_CANMD, mbx_mask);
405 hecc_set_bit(priv, HECC_CANME, mbx_mask);
406 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
407 }
408
409 /* Prevent message over-write & Enable interrupts */
410 hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
411 if (priv->int_line) {
412 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
413 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
414 HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
415 } else {
416 hecc_write(priv, HECC_CANMIL, 0);
417 hecc_write(priv, HECC_CANGIM,
418 HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
419 }
420 priv->can.state = CAN_STATE_ERROR_ACTIVE;
421}
422
423static void ti_hecc_stop(struct net_device *ndev)
424{
425 struct ti_hecc_priv *priv = netdev_priv(ndev);
426
427 /* Disable interrupts and disable mailboxes */
428 hecc_write(priv, HECC_CANGIM, 0);
429 hecc_write(priv, HECC_CANMIM, 0);
430 hecc_write(priv, HECC_CANME, 0);
431 priv->can.state = CAN_STATE_STOPPED;
432}
433
434static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
435{
436 int ret = 0;
437
438 switch (mode) {
439 case CAN_MODE_START:
440 ti_hecc_start(ndev);
441 netif_wake_queue(ndev);
442 break;
443 default:
444 ret = -EOPNOTSUPP;
445 break;
446 }
447
448 return ret;
449}
450
451/*
452 * ti_hecc_xmit: HECC Transmit
453 *
454 * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
455 * priority of the mailbox for tranmission is dependent upon priority setting
456 * field in mailbox registers. The mailbox with highest value in priority field
457 * is transmitted first. Only when two mailboxes have the same value in
458 * priority field the highest numbered mailbox is transmitted first.
459 *
460 * To utilize the HECC priority feature as described above we start with the
461 * highest numbered mailbox with highest priority level and move on to the next
462 * mailbox with the same priority level and so on. Once we loop through all the
463 * transmit mailboxes we choose the next priority level (lower) and so on
464 * until we reach the lowest priority level on the lowest numbered mailbox
465 * when we stop transmission until all mailboxes are transmitted and then
466 * restart at highest numbered mailbox with highest priority.
467 *
468 * Two counters (head and tail) are used to track the next mailbox to transmit
469 * and to track the echo buffer for already transmitted mailbox. The queue
470 * is stopped when all the mailboxes are busy or when there is a priority
471 * value roll-over happens.
472 */
473static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
474{
475 struct ti_hecc_priv *priv = netdev_priv(ndev);
476 struct can_frame *cf = (struct can_frame *)skb->data;
477 u32 mbxno, mbx_mask, data;
478 unsigned long flags;
479
480 mbxno = get_tx_head_mb(priv);
481 mbx_mask = BIT(mbxno);
482 spin_lock_irqsave(&priv->mbx_lock, flags);
483 if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
484 spin_unlock_irqrestore(&priv->mbx_lock, flags);
485 netif_stop_queue(ndev);
486 dev_err(priv->ndev->dev.parent,
487 "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
488 priv->tx_head, priv->tx_tail);
489 return NETDEV_TX_BUSY;
490 }
491 spin_unlock_irqrestore(&priv->mbx_lock, flags);
492
493 /* Prepare mailbox for transmission */
494 data = min_t(u8, cf->can_dlc, 8);
495 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
496 data |= HECC_CANMCF_RTR;
497 data |= get_tx_head_prio(priv) << 8;
498 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
499
500 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
501 data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE;
502 else /* Standard frame format */
503 data = (cf->can_id & CAN_SFF_MASK) << 18;
504 hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
505 hecc_write_mbx(priv, mbxno, HECC_CANMDL,
506 be32_to_cpu(*(u32 *)(cf->data)));
507 if (cf->can_dlc > 4)
508 hecc_write_mbx(priv, mbxno, HECC_CANMDH,
509 be32_to_cpu(*(u32 *)(cf->data + 4)));
510 else
511 *(u32 *)(cf->data + 4) = 0;
512 can_put_echo_skb(skb, ndev, mbxno);
513
514 spin_lock_irqsave(&priv->mbx_lock, flags);
515 --priv->tx_head;
516 if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
517 (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
518 netif_stop_queue(ndev);
519 }
520 hecc_set_bit(priv, HECC_CANME, mbx_mask);
521 spin_unlock_irqrestore(&priv->mbx_lock, flags);
522
523 hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
524 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
525 hecc_write(priv, HECC_CANTRS, mbx_mask);
526
527 return NETDEV_TX_OK;
528}
529
530static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
531{
532 struct net_device_stats *stats = &priv->ndev->stats;
533 struct can_frame *cf;
534 struct sk_buff *skb;
535 u32 data, mbx_mask;
536 unsigned long flags;
537
538 skb = alloc_can_skb(priv->ndev, &cf);
539 if (!skb) {
540 if (printk_ratelimit())
541 dev_err(priv->ndev->dev.parent,
542 "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
543 return -ENOMEM;
544 }
545
546 mbx_mask = BIT(mbxno);
547 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
548 if (data & HECC_CANMID_IDE)
549 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
550 else
551 cf->can_id = (data >> 18) & CAN_SFF_MASK;
552 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
553 if (data & HECC_CANMCF_RTR)
554 cf->can_id |= CAN_RTR_FLAG;
555 cf->can_dlc = data & 0xF;
556 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
557 *(u32 *)(cf->data) = cpu_to_be32(data);
558 if (cf->can_dlc > 4) {
559 data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
560 *(u32 *)(cf->data + 4) = cpu_to_be32(data);
561 } else {
562 *(u32 *)(cf->data + 4) = 0;
563 }
564 spin_lock_irqsave(&priv->mbx_lock, flags);
565 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
566 hecc_write(priv, HECC_CANRMP, mbx_mask);
567 /* enable mailbox only if it is part of rx buffer mailboxes */
568 if (priv->rx_next < HECC_RX_BUFFER_MBOX)
569 hecc_set_bit(priv, HECC_CANME, mbx_mask);
570 spin_unlock_irqrestore(&priv->mbx_lock, flags);
571
572 stats->rx_bytes += cf->can_dlc;
573 netif_receive_skb(skb);
574 stats->rx_packets++;
575
576 return 0;
577}
578
579/*
580 * ti_hecc_rx_poll - HECC receive pkts
581 *
582 * The receive mailboxes start from highest numbered mailbox till last xmit
583 * mailbox. On CAN frame reception the hardware places the data into highest
584 * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
585 * have same filtering (ALL CAN frames) packets will arrive in the highest
586 * available RX mailbox and we need to ensure in-order packet reception.
587 *
588 * To ensure the packets are received in the right order we logically divide
589 * the RX mailboxes into main and buffer mailboxes. Packets are received as per
590 * mailbox priotity (higher to lower) in the main bank and once it is full we
591 * disable further reception into main mailboxes. While the main mailboxes are
592 * processed in NAPI, further packets are received in buffer mailboxes.
593 *
594 * We maintain a RX next mailbox counter to process packets and once all main
595 * mailboxe packets are passed to the upper stack we enable all of them but
596 * continue to process packets received in buffer mailboxes. With each packet
597 * received from buffer mailbox we enable it immediately so as to handle the
598 * overflow from higher mailboxes.
599 */
600static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
601{
602 struct net_device *ndev = napi->dev;
603 struct ti_hecc_priv *priv = netdev_priv(ndev);
604 u32 num_pkts = 0;
605 u32 mbx_mask;
606 unsigned long pending_pkts, flags;
607
608 if (!netif_running(ndev))
609 return 0;
610
611 while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
612 num_pkts < quota) {
613 mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
614 if (mbx_mask & pending_pkts) {
615 if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
616 return num_pkts;
617 ++num_pkts;
618 } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
619 break; /* pkt not received yet */
620 }
621 --priv->rx_next;
622 if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
623 /* enable high bank mailboxes */
624 spin_lock_irqsave(&priv->mbx_lock, flags);
625 mbx_mask = hecc_read(priv, HECC_CANME);
626 mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
627 hecc_write(priv, HECC_CANME, mbx_mask);
628 spin_unlock_irqrestore(&priv->mbx_lock, flags);
629 } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
630 priv->rx_next = HECC_RX_FIRST_MBOX;
631 break;
632 }
633 }
634
635 /* Enable packet interrupt if all pkts are handled */
636 if (hecc_read(priv, HECC_CANRMP) == 0) {
637 napi_complete(napi);
638 /* Re-enable RX mailbox interrupts */
639 mbx_mask = hecc_read(priv, HECC_CANMIM);
640 mbx_mask |= HECC_TX_MBOX_MASK;
641 hecc_write(priv, HECC_CANMIM, mbx_mask);
642 }
643
644 return num_pkts;
645}
646
647static int ti_hecc_error(struct net_device *ndev, int int_status,
648 int err_status)
649{
650 struct ti_hecc_priv *priv = netdev_priv(ndev);
651 struct net_device_stats *stats = &ndev->stats;
652 struct can_frame *cf;
653 struct sk_buff *skb;
654
655 /* propogate the error condition to the can stack */
656 skb = alloc_can_err_skb(ndev, &cf);
657 if (!skb) {
658 if (printk_ratelimit())
659 dev_err(priv->ndev->dev.parent,
660 "ti_hecc_error: alloc_can_err_skb() failed\n");
661 return -ENOMEM;
662 }
663
664 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
665 if ((int_status & HECC_CANGIF_BOIF) == 0) {
666 priv->can.state = CAN_STATE_ERROR_WARNING;
667 ++priv->can.can_stats.error_warning;
668 cf->can_id |= CAN_ERR_CRTL;
669 if (hecc_read(priv, HECC_CANTEC) > 96)
670 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
671 if (hecc_read(priv, HECC_CANREC) > 96)
672 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
673 }
674 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
675 dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
676 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
677 }
678
679 if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
680 if ((int_status & HECC_CANGIF_BOIF) == 0) {
681 priv->can.state = CAN_STATE_ERROR_PASSIVE;
682 ++priv->can.can_stats.error_passive;
683 cf->can_id |= CAN_ERR_CRTL;
684 if (hecc_read(priv, HECC_CANTEC) > 127)
685 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
686 if (hecc_read(priv, HECC_CANREC) > 127)
687 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
688 }
689 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
690 dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
691 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
692 }
693
694 /*
695 * Need to check busoff condition in error status register too to
696 * ensure warning interrupts don't hog the system
697 */
698 if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
699 priv->can.state = CAN_STATE_BUS_OFF;
700 cf->can_id |= CAN_ERR_BUSOFF;
701 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
702 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
703 /* Disable all interrupts in bus-off to avoid int hog */
704 hecc_write(priv, HECC_CANGIM, 0);
705 can_bus_off(ndev);
706 }
707
708 if (err_status & HECC_BUS_ERROR) {
709 ++priv->can.can_stats.bus_error;
710 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
711 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
712 if (err_status & HECC_CANES_FE) {
713 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
714 cf->data[2] |= CAN_ERR_PROT_FORM;
715 }
716 if (err_status & HECC_CANES_BE) {
717 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
718 cf->data[2] |= CAN_ERR_PROT_BIT;
719 }
720 if (err_status & HECC_CANES_SE) {
721 hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
722 cf->data[2] |= CAN_ERR_PROT_STUFF;
723 }
724 if (err_status & HECC_CANES_CRCE) {
725 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
726 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
727 CAN_ERR_PROT_LOC_CRC_DEL;
728 }
729 if (err_status & HECC_CANES_ACKE) {
730 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
731 cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
732 CAN_ERR_PROT_LOC_ACK_DEL;
733 }
734 }
735
736 netif_receive_skb(skb);
737 stats->rx_packets++;
738 stats->rx_bytes += cf->can_dlc;
739 return 0;
740}
741
742static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
743{
744 struct net_device *ndev = (struct net_device *)dev_id;
745 struct ti_hecc_priv *priv = netdev_priv(ndev);
746 struct net_device_stats *stats = &ndev->stats;
747 u32 mbxno, mbx_mask, int_status, err_status;
748 unsigned long ack, flags;
749
750 int_status = hecc_read(priv,
751 (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
752
753 if (!int_status)
754 return IRQ_NONE;
755
756 err_status = hecc_read(priv, HECC_CANES);
757 if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
758 HECC_CANES_EP | HECC_CANES_EW))
759 ti_hecc_error(ndev, int_status, err_status);
760
761 if (int_status & HECC_CANGIF_GMIF) {
762 while (priv->tx_tail - priv->tx_head > 0) {
763 mbxno = get_tx_tail_mb(priv);
764 mbx_mask = BIT(mbxno);
765 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
766 break;
767 hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
768 hecc_write(priv, HECC_CANTA, mbx_mask);
769 spin_lock_irqsave(&priv->mbx_lock, flags);
770 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
771 spin_unlock_irqrestore(&priv->mbx_lock, flags);
772 stats->tx_bytes += hecc_read_mbx(priv, mbxno,
773 HECC_CANMCF) & 0xF;
774 stats->tx_packets++;
775 can_get_echo_skb(ndev, mbxno);
776 --priv->tx_tail;
777 }
778
779 /* restart queue if wrap-up or if queue stalled on last pkt */
780 if (((priv->tx_head == priv->tx_tail) &&
781 ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
782 (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
783 ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
784 netif_wake_queue(ndev);
785
786 /* Disable RX mailbox interrupts and let NAPI reenable them */
787 if (hecc_read(priv, HECC_CANRMP)) {
788 ack = hecc_read(priv, HECC_CANMIM);
789 ack &= BIT(HECC_MAX_TX_MBOX) - 1;
790 hecc_write(priv, HECC_CANMIM, ack);
791 napi_schedule(&priv->napi);
792 }
793 }
794
795 /* clear all interrupt conditions - read back to avoid spurious ints */
796 if (priv->int_line) {
797 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
798 int_status = hecc_read(priv, HECC_CANGIF1);
799 } else {
800 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
801 int_status = hecc_read(priv, HECC_CANGIF0);
802 }
803
804 return IRQ_HANDLED;
805}
806
807static int ti_hecc_open(struct net_device *ndev)
808{
809 struct ti_hecc_priv *priv = netdev_priv(ndev);
810 int err;
811
812 err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
813 ndev->name, ndev);
814 if (err) {
815 dev_err(ndev->dev.parent, "error requesting interrupt\n");
816 return err;
817 }
818
819 /* Open common can device */
820 err = open_candev(ndev);
821 if (err) {
822 dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
823 free_irq(ndev->irq, ndev);
824 return err;
825 }
826
827 clk_enable(priv->clk);
828 ti_hecc_start(ndev);
829 napi_enable(&priv->napi);
830 netif_start_queue(ndev);
831
832 return 0;
833}
834
835static int ti_hecc_close(struct net_device *ndev)
836{
837 struct ti_hecc_priv *priv = netdev_priv(ndev);
838
839 netif_stop_queue(ndev);
840 napi_disable(&priv->napi);
841 ti_hecc_stop(ndev);
842 free_irq(ndev->irq, ndev);
843 clk_disable(priv->clk);
844 close_candev(ndev);
845
846 return 0;
847}
848
849static const struct net_device_ops ti_hecc_netdev_ops = {
850 .ndo_open = ti_hecc_open,
851 .ndo_stop = ti_hecc_close,
852 .ndo_start_xmit = ti_hecc_xmit,
853};
854
855static int ti_hecc_probe(struct platform_device *pdev)
856{
857 struct net_device *ndev = (struct net_device *)0;
858 struct ti_hecc_priv *priv;
859 struct ti_hecc_platform_data *pdata;
860 struct resource *mem, *irq;
861 void __iomem *addr;
862 int err = -ENODEV;
863
864 pdata = pdev->dev.platform_data;
865 if (!pdata) {
866 dev_err(&pdev->dev, "No platform data\n");
867 goto probe_exit;
868 }
869
870 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
871 if (!mem) {
872 dev_err(&pdev->dev, "No mem resources\n");
873 goto probe_exit;
874 }
875 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
876 if (!irq) {
877 dev_err(&pdev->dev, "No irq resource\n");
878 goto probe_exit;
879 }
880 if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
881 dev_err(&pdev->dev, "HECC region already claimed\n");
882 err = -EBUSY;
883 goto probe_exit;
884 }
885 addr = ioremap(mem->start, resource_size(mem));
886 if (!addr) {
887 dev_err(&pdev->dev, "ioremap failed\n");
888 err = -ENOMEM;
889 goto probe_exit_free_region;
890 }
891
892 ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
893 if (!ndev) {
894 dev_err(&pdev->dev, "alloc_candev failed\n");
895 err = -ENOMEM;
896 goto probe_exit_iounmap;
897 }
898
899 priv = netdev_priv(ndev);
900 priv->ndev = ndev;
901 priv->base = addr;
902 priv->scc_ram_offset = pdata->scc_ram_offset;
903 priv->hecc_ram_offset = pdata->hecc_ram_offset;
904 priv->mbx_offset = pdata->mbx_offset;
905 priv->int_line = pdata->int_line;
906
907 priv->can.bittiming_const = &ti_hecc_bittiming_const;
908 priv->can.do_set_mode = ti_hecc_do_set_mode;
909 priv->can.do_get_state = ti_hecc_get_state;
910
911 ndev->irq = irq->start;
912 ndev->flags |= IFF_ECHO;
913 platform_set_drvdata(pdev, ndev);
914 SET_NETDEV_DEV(ndev, &pdev->dev);
915 ndev->netdev_ops = &ti_hecc_netdev_ops;
916
917 priv->clk = clk_get(&pdev->dev, "hecc_ck");
918 if (IS_ERR(priv->clk)) {
919 dev_err(&pdev->dev, "No clock available\n");
920 err = PTR_ERR(priv->clk);
921 priv->clk = NULL;
922 goto probe_exit_candev;
923 }
924 priv->can.clock.freq = clk_get_rate(priv->clk);
925 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
926 HECC_DEF_NAPI_WEIGHT);
927
928 err = register_candev(ndev);
929 if (err) {
930 dev_err(&pdev->dev, "register_candev() failed\n");
931 goto probe_exit_clk;
932 }
933 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
934 priv->base, (u32) ndev->irq);
935
936 return 0;
937
938probe_exit_clk:
939 clk_put(priv->clk);
940probe_exit_candev:
941 free_candev(ndev);
942probe_exit_iounmap:
943 iounmap(addr);
944probe_exit_free_region:
945 release_mem_region(mem->start, resource_size(mem));
946probe_exit:
947 return err;
948}
949
950static int __devexit ti_hecc_remove(struct platform_device *pdev)
951{
952 struct resource *res;
953 struct net_device *ndev = platform_get_drvdata(pdev);
954 struct ti_hecc_priv *priv = netdev_priv(ndev);
955
956 clk_put(priv->clk);
957 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
958 iounmap(priv->base);
959 release_mem_region(res->start, resource_size(res));
960 unregister_candev(ndev);
961 free_candev(ndev);
962 platform_set_drvdata(pdev, NULL);
963
964 return 0;
965}
966
967/* TI HECC netdevice driver: platform driver structure */
968static struct platform_driver ti_hecc_driver = {
969 .driver = {
970 .name = DRV_NAME,
971 .owner = THIS_MODULE,
972 },
973 .probe = ti_hecc_probe,
974 .remove = __devexit_p(ti_hecc_remove),
975};
976
977static int __init ti_hecc_init_driver(void)
978{
979 printk(KERN_INFO DRV_DESC "\n");
980 return platform_driver_register(&ti_hecc_driver);
981}
982module_init(ti_hecc_init_driver);
983
984static void __exit ti_hecc_exit_driver(void)
985{
986 printk(KERN_INFO DRV_DESC " unloaded\n");
987 platform_driver_unregister(&ti_hecc_driver);
988}
989module_exit(ti_hecc_exit_driver);
990
991MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
992MODULE_LICENSE("GPL v2");
993MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index abdbd9c2b788..3e4419054c81 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -232,7 +232,7 @@ MODULE_DEVICE_TABLE(usb, ems_usb_table);
232#define INTR_IN_BUFFER_SIZE 4 232#define INTR_IN_BUFFER_SIZE 4
233 233
234#define MAX_RX_URBS 10 234#define MAX_RX_URBS 10
235#define MAX_TX_URBS CAN_ECHO_SKB_MAX 235#define MAX_TX_URBS 10
236 236
237struct ems_usb; 237struct ems_usb;
238 238
@@ -311,14 +311,10 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
311 int i; 311 int i;
312 struct net_device_stats *stats = &dev->netdev->stats; 312 struct net_device_stats *stats = &dev->netdev->stats;
313 313
314 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 314 skb = alloc_can_skb(dev->netdev, &cf);
315 if (skb == NULL) 315 if (skb == NULL)
316 return; 316 return;
317 317
318 skb->protocol = htons(ETH_P_CAN);
319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321
322 cf->can_id = le32_to_cpu(msg->msg.can_msg.id); 318 cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 319 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
324 320
@@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
346 struct sk_buff *skb; 342 struct sk_buff *skb;
347 struct net_device_stats *stats = &dev->netdev->stats; 343 struct net_device_stats *stats = &dev->netdev->stats;
348 344
349 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 345 skb = alloc_can_err_skb(dev->netdev, &cf);
350 if (skb == NULL) 346 if (skb == NULL)
351 return; 347 return;
352 348
353 skb->protocol = htons(ETH_P_CAN);
354
355 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
356 memset(cf, 0, sizeof(struct can_frame));
357
358 cf->can_id = CAN_ERR_FLAG;
359 cf->can_dlc = CAN_ERR_DLC;
360
361 if (msg->type == CPC_MSG_TYPE_CAN_STATE) { 349 if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
362 u8 state = msg->msg.can_state; 350 u8 state = msg->msg.can_state;
363 351
@@ -1015,7 +1003,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1015 struct ems_usb *dev; 1003 struct ems_usb *dev;
1016 int i, err = -ENOMEM; 1004 int i, err = -ENOMEM;
1017 1005
1018 netdev = alloc_candev(sizeof(struct ems_usb)); 1006 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1019 if (!netdev) { 1007 if (!netdev) {
1020 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1008 dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
1021 return -ENOMEM; 1009 return -ENOMEM;
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 3bf1b04f2cab..ee7eb9ee77e2 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -33,10 +33,16 @@
33#include <net/route.h> 33#include <net/route.h>
34#include <net/ipv6.h> 34#include <net/ipv6.h>
35#include <net/ip6_route.h> 35#include <net/ip6_route.h>
36#include <net/ip6_checksum.h>
36#include <scsi/iscsi_if.h> 37#include <scsi/iscsi_if.h>
37 38
38#include "cnic_if.h" 39#include "cnic_if.h"
39#include "bnx2.h" 40#include "bnx2.h"
41#include "bnx2x_reg.h"
42#include "bnx2x_fw_defs.h"
43#include "bnx2x_hsi.h"
44#include "../scsi/bnx2i/57xx_iscsi_constants.h"
45#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
40#include "cnic.h" 46#include "cnic.h"
41#include "cnic_defs.h" 47#include "cnic_defs.h"
42 48
@@ -59,6 +65,7 @@ static DEFINE_MUTEX(cnic_lock);
59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 65static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
60 66
61static int cnic_service_bnx2(void *, void *); 67static int cnic_service_bnx2(void *, void *);
68static int cnic_service_bnx2x(void *, void *);
62static int cnic_ctl(void *, struct cnic_ctl_info *); 69static int cnic_ctl(void *, struct cnic_ctl_info *);
63 70
64static struct cnic_ops cnic_bnx2_ops = { 71static struct cnic_ops cnic_bnx2_ops = {
@@ -67,9 +74,14 @@ static struct cnic_ops cnic_bnx2_ops = {
67 .cnic_ctl = cnic_ctl, 74 .cnic_ctl = cnic_ctl,
68}; 75};
69 76
70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); 77static struct cnic_ops cnic_bnx2x_ops = {
71static void cnic_init_bnx2_tx_ring(struct cnic_dev *); 78 .cnic_owner = THIS_MODULE,
72static void cnic_init_bnx2_rx_ring(struct cnic_dev *); 79 .cnic_handler = cnic_service_bnx2x,
80 .cnic_ctl = cnic_ctl,
81};
82
83static void cnic_shutdown_rings(struct cnic_dev *);
84static void cnic_init_rings(struct cnic_dev *);
73static int cnic_cm_set_pg(struct cnic_sock *); 85static int cnic_cm_set_pg(struct cnic_sock *);
74 86
75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 87static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
@@ -83,10 +95,16 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
83 if (cp->uio_dev != -1) 95 if (cp->uio_dev != -1)
84 return -EBUSY; 96 return -EBUSY;
85 97
98 rtnl_lock();
99 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
100 rtnl_unlock();
101 return -ENODEV;
102 }
103
86 cp->uio_dev = iminor(inode); 104 cp->uio_dev = iminor(inode);
87 105
88 cnic_init_bnx2_tx_ring(dev); 106 cnic_init_rings(dev);
89 cnic_init_bnx2_rx_ring(dev); 107 rtnl_unlock();
90 108
91 return 0; 109 return 0;
92} 110}
@@ -96,7 +114,7 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
96 struct cnic_dev *dev = uinfo->priv; 114 struct cnic_dev *dev = uinfo->priv;
97 struct cnic_local *cp = dev->cnic_priv; 115 struct cnic_local *cp = dev->cnic_priv;
98 116
99 cnic_shutdown_bnx2_rx_ring(dev); 117 cnic_shutdown_rings(dev);
100 118
101 cp->uio_dev = -1; 119 cp->uio_dev = -1;
102 return 0; 120 return 0;
@@ -162,6 +180,36 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
162 ethdev->drv_ctl(dev->netdev, &info); 180 ethdev->drv_ctl(dev->netdev, &info);
163} 181}
164 182
183static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
184{
185 struct cnic_local *cp = dev->cnic_priv;
186 struct cnic_eth_dev *ethdev = cp->ethdev;
187 struct drv_ctl_info info;
188 struct drv_ctl_io *io = &info.data.io;
189
190 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
191 io->offset = off;
192 io->dma_addr = addr;
193 ethdev->drv_ctl(dev->netdev, &info);
194}
195
196static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
197{
198 struct cnic_local *cp = dev->cnic_priv;
199 struct cnic_eth_dev *ethdev = cp->ethdev;
200 struct drv_ctl_info info;
201 struct drv_ctl_l2_ring *ring = &info.data.ring;
202
203 if (start)
204 info.cmd = DRV_CTL_START_L2_CMD;
205 else
206 info.cmd = DRV_CTL_STOP_L2_CMD;
207
208 ring->cid = cid;
209 ring->client_id = cl_id;
210 ethdev->drv_ctl(dev->netdev, &info);
211}
212
165static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 213static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
166{ 214{
167 struct cnic_local *cp = dev->cnic_priv; 215 struct cnic_local *cp = dev->cnic_priv;
@@ -204,6 +252,19 @@ static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
204 ethdev->drv_ctl(dev->netdev, &info); 252 ethdev->drv_ctl(dev->netdev, &info);
205} 253}
206 254
255static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
256{
257 u32 i;
258
259 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
260 if (cp->ctx_tbl[i].cid == cid) {
261 *l5_cid = i;
262 return 0;
263 }
264 }
265 return -EINVAL;
266}
267
207static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 268static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
208 struct cnic_sock *csk) 269 struct cnic_sock *csk)
209{ 270{
@@ -347,7 +408,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
347{ 408{
348 struct cnic_dev *dev; 409 struct cnic_dev *dev;
349 410
350 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 411 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
351 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", 412 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
352 ulp_type); 413 ulp_type);
353 return -EINVAL; 414 return -EINVAL;
@@ -393,7 +454,7 @@ int cnic_unregister_driver(int ulp_type)
393 struct cnic_ulp_ops *ulp_ops; 454 struct cnic_ulp_ops *ulp_ops;
394 int i = 0; 455 int i = 0;
395 456
396 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 457 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
397 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 458 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
398 ulp_type); 459 ulp_type);
399 return -EINVAL; 460 return -EINVAL;
@@ -449,7 +510,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
449 struct cnic_local *cp = dev->cnic_priv; 510 struct cnic_local *cp = dev->cnic_priv;
450 struct cnic_ulp_ops *ulp_ops; 511 struct cnic_ulp_ops *ulp_ops;
451 512
452 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
453 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", 514 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
454 ulp_type); 515 ulp_type);
455 return -EINVAL; 516 return -EINVAL;
@@ -490,7 +551,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
490 struct cnic_local *cp = dev->cnic_priv; 551 struct cnic_local *cp = dev->cnic_priv;
491 int i = 0; 552 int i = 0;
492 553
493 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 554 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
494 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 555 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
495 ulp_type); 556 ulp_type);
496 return -EINVAL; 557 return -EINVAL;
@@ -635,6 +696,20 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
635 } 696 }
636} 697}
637 698
699static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
700{
701 int i;
702 u32 *page_table = dma->pgtbl;
703
704 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in little endian format. */
706 *page_table = dma->pg_map_arr[i] & 0xffffffff;
707 page_table++;
708 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
709 page_table++;
710 }
711}
712
638static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 713static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
639 int pages, int use_pg_tbl) 714 int pages, int use_pg_tbl)
640{ 715{
@@ -675,6 +750,21 @@ error:
675 return -ENOMEM; 750 return -ENOMEM;
676} 751}
677 752
753static void cnic_free_context(struct cnic_dev *dev)
754{
755 struct cnic_local *cp = dev->cnic_priv;
756 int i;
757
758 for (i = 0; i < cp->ctx_blks; i++) {
759 if (cp->ctx_arr[i].ctx) {
760 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
761 cp->ctx_arr[i].ctx,
762 cp->ctx_arr[i].mapping);
763 cp->ctx_arr[i].ctx = NULL;
764 }
765 }
766}
767
678static void cnic_free_resc(struct cnic_dev *dev) 768static void cnic_free_resc(struct cnic_dev *dev)
679{ 769{
680 struct cnic_local *cp = dev->cnic_priv; 770 struct cnic_local *cp = dev->cnic_priv;
@@ -702,14 +792,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
702 cp->l2_ring = NULL; 792 cp->l2_ring = NULL;
703 } 793 }
704 794
705 for (i = 0; i < cp->ctx_blks; i++) { 795 cnic_free_context(dev);
706 if (cp->ctx_arr[i].ctx) {
707 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
708 cp->ctx_arr[i].ctx,
709 cp->ctx_arr[i].mapping);
710 cp->ctx_arr[i].ctx = NULL;
711 }
712 }
713 kfree(cp->ctx_arr); 796 kfree(cp->ctx_arr);
714 cp->ctx_arr = NULL; 797 cp->ctx_arr = NULL;
715 cp->ctx_blks = 0; 798 cp->ctx_blks = 0;
@@ -717,6 +800,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
717 cnic_free_dma(dev, &cp->gbl_buf_info); 800 cnic_free_dma(dev, &cp->gbl_buf_info);
718 cnic_free_dma(dev, &cp->conn_buf_info); 801 cnic_free_dma(dev, &cp->conn_buf_info);
719 cnic_free_dma(dev, &cp->kwq_info); 802 cnic_free_dma(dev, &cp->kwq_info);
803 cnic_free_dma(dev, &cp->kwq_16_data_info);
720 cnic_free_dma(dev, &cp->kcq_info); 804 cnic_free_dma(dev, &cp->kcq_info);
721 kfree(cp->iscsi_tbl); 805 kfree(cp->iscsi_tbl);
722 cp->iscsi_tbl = NULL; 806 cp->iscsi_tbl = NULL;
@@ -808,14 +892,20 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
808 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 892 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
809 uinfo->mem[0].memtype = UIO_MEM_PHYS; 893 uinfo->mem[0].memtype = UIO_MEM_PHYS;
810 894
811 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
812 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 895 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
896 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
813 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 897 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
814 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 898 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
815 else 899 else
816 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 900 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
817 901
818 uinfo->name = "bnx2_cnic"; 902 uinfo->name = "bnx2_cnic";
903 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
904 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
905 PAGE_MASK;
906 uinfo->mem[1].size = sizeof(struct host_def_status_block);
907
908 uinfo->name = "bnx2x_cnic";
819 } 909 }
820 910
821 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 911 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
@@ -880,6 +970,151 @@ error:
880 return ret; 970 return ret;
881} 971}
882 972
973static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
974{
975 struct cnic_local *cp = dev->cnic_priv;
976 struct cnic_eth_dev *ethdev = cp->ethdev;
977 int ctx_blk_size = cp->ethdev->ctx_blk_size;
978 int total_mem, blks, i, cid_space;
979
980 if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
981 return -EINVAL;
982
983 cid_space = MAX_ISCSI_TBL_SZ +
984 (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
985
986 total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
987 blks = total_mem / ctx_blk_size;
988 if (total_mem % ctx_blk_size)
989 blks++;
990
991 if (blks > cp->ethdev->ctx_tbl_len)
992 return -ENOMEM;
993
994 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
995 if (cp->ctx_arr == NULL)
996 return -ENOMEM;
997
998 cp->ctx_blks = blks;
999 cp->ctx_blk_size = ctx_blk_size;
1000 if (BNX2X_CHIP_IS_E1H(cp->chip_id))
1001 cp->ctx_align = 0;
1002 else
1003 cp->ctx_align = ctx_blk_size;
1004
1005 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1006
1007 for (i = 0; i < blks; i++) {
1008 cp->ctx_arr[i].ctx =
1009 pci_alloc_consistent(dev->pcidev, cp->ctx_blk_size,
1010 &cp->ctx_arr[i].mapping);
1011 if (cp->ctx_arr[i].ctx == NULL)
1012 return -ENOMEM;
1013
1014 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1015 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1016 cnic_free_context(dev);
1017 cp->ctx_blk_size += cp->ctx_align;
1018 i = -1;
1019 continue;
1020 }
1021 }
1022 }
1023 return 0;
1024}
1025
1026static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1027{
1028 struct cnic_local *cp = dev->cnic_priv;
1029 int i, j, n, ret, pages;
1030 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1031
1032 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1033 GFP_KERNEL);
1034 if (!cp->iscsi_tbl)
1035 goto error;
1036
1037 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1038 MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
1039 if (!cp->ctx_tbl)
1040 goto error;
1041
1042 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1043 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1044 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1045 }
1046
1047 pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
1048 PAGE_SIZE;
1049
1050 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1051 if (ret)
1052 return -ENOMEM;
1053
1054 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1055 for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1056 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1057
1058 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1059 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1060 off;
1061
1062 if ((i % n) == (n - 1))
1063 j++;
1064 }
1065
1066 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
1067 if (ret)
1068 goto error;
1069 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
1070
1071 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1072 struct bnx2x_bd_chain_next *next =
1073 (struct bnx2x_bd_chain_next *)
1074 &cp->kcq[i][MAX_KCQE_CNT];
1075 int j = i + 1;
1076
1077 if (j >= KCQ_PAGE_CNT)
1078 j = 0;
1079 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
1080 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
1081 }
1082
1083 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1084 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1085 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1086 if (ret)
1087 goto error;
1088
1089 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1090 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1091 if (ret)
1092 goto error;
1093
1094 ret = cnic_alloc_bnx2x_context(dev);
1095 if (ret)
1096 goto error;
1097
1098 cp->bnx2x_status_blk = cp->status_blk;
1099 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1100
1101 cp->l2_rx_ring_size = 15;
1102
1103 ret = cnic_alloc_l2_rings(dev, 4);
1104 if (ret)
1105 goto error;
1106
1107 ret = cnic_alloc_uio(dev);
1108 if (ret)
1109 goto error;
1110
1111 return 0;
1112
1113error:
1114 cnic_free_resc(dev);
1115 return -ENOMEM;
1116}
1117
883static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1118static inline u32 cnic_kwq_avail(struct cnic_local *cp)
884{ 1119{
885 return cp->max_kwq_idx - 1120 return cp->max_kwq_idx -
@@ -921,6 +1156,880 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
921 return 0; 1156 return 0;
922} 1157}
923 1158
1159static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1160 union l5cm_specific_data *l5_data)
1161{
1162 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1163 dma_addr_t map;
1164
1165 map = ctx->kwqe_data_mapping;
1166 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1167 l5_data->phy_address.hi = (u64) map >> 32;
1168 return ctx->kwqe_data;
1169}
1170
1171static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1172 u32 type, union l5cm_specific_data *l5_data)
1173{
1174 struct cnic_local *cp = dev->cnic_priv;
1175 struct l5cm_spe kwqe;
1176 struct kwqe_16 *kwq[1];
1177 int ret;
1178
1179 kwqe.hdr.conn_and_cmd_data =
1180 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1181 BNX2X_HW_CID(cid, cp->func)));
1182 kwqe.hdr.type = cpu_to_le16(type);
1183 kwqe.hdr.reserved = 0;
1184 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1185 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1186
1187 kwq[0] = (struct kwqe_16 *) &kwqe;
1188
1189 spin_lock_bh(&cp->cnic_ulp_lock);
1190 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1191 spin_unlock_bh(&cp->cnic_ulp_lock);
1192
1193 if (ret == 1)
1194 return 0;
1195
1196 return -EBUSY;
1197}
1198
1199static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1200 struct kcqe *cqes[], u32 num_cqes)
1201{
1202 struct cnic_local *cp = dev->cnic_priv;
1203 struct cnic_ulp_ops *ulp_ops;
1204
1205 rcu_read_lock();
1206 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1207 if (likely(ulp_ops)) {
1208 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1209 cqes, num_cqes);
1210 }
1211 rcu_read_unlock();
1212}
1213
1214static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1215{
1216 struct cnic_local *cp = dev->cnic_priv;
1217 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1218 int func = cp->func, pages;
1219 int hq_bds;
1220
1221 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1222 cp->num_ccells = req1->num_ccells_per_conn;
1223 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1224 cp->num_iscsi_tasks;
1225 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1226 BNX2X_ISCSI_R2TQE_SIZE;
1227 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1228 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1229 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1230 cp->num_cqs = req1->num_cqs;
1231
1232 if (!dev->max_iscsi_conn)
1233 return 0;
1234
1235 /* init Tstorm RAM */
1236 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func),
1237 req1->rq_num_wqes);
1238 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1239 PAGE_SIZE);
1240 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1241 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1242 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1243 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1244 req1->num_tasks_per_conn);
1245
1246 /* init Ustorm RAM */
1247 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1248 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func),
1249 req1->rq_buffer_size);
1250 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1251 PAGE_SIZE);
1252 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1253 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1254 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1255 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1256 req1->num_tasks_per_conn);
1257 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func),
1258 req1->rq_num_wqes);
1259 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func),
1260 req1->cq_num_wqes);
1261 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1262 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1263
1264 /* init Xstorm RAM */
1265 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1266 PAGE_SIZE);
1267 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1268 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1269 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1270 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1271 req1->num_tasks_per_conn);
1272 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1273 hq_bds);
1274 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func),
1275 req1->num_tasks_per_conn);
1276 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1277 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1278
1279 /* init Cstorm RAM */
1280 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1281 PAGE_SIZE);
1282 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1283 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1284 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1285 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1286 req1->num_tasks_per_conn);
1287 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func),
1288 req1->cq_num_wqes);
1289 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1290 hq_bds);
1291
1292 return 0;
1293}
1294
1295static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1296{
1297 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1298 struct cnic_local *cp = dev->cnic_priv;
1299 int func = cp->func;
1300 struct iscsi_kcqe kcqe;
1301 struct kcqe *cqes[1];
1302
1303 memset(&kcqe, 0, sizeof(kcqe));
1304 if (!dev->max_iscsi_conn) {
1305 kcqe.completion_status =
1306 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1307 goto done;
1308 }
1309
1310 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1311 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1312 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1313 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1314 req2->error_bit_map[1]);
1315
1316 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1317 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1318 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1319 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1320 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1321 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1322 req2->error_bit_map[1]);
1323
1324 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1325 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1326
1327 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1328
1329done:
1330 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1331 cqes[0] = (struct kcqe *) &kcqe;
1332 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1333
1334 return 0;
1335}
1336
1337static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1338{
1339 struct cnic_local *cp = dev->cnic_priv;
1340 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1341
1342 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1343 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1344
1345 cnic_free_dma(dev, &iscsi->hq_info);
1346 cnic_free_dma(dev, &iscsi->r2tq_info);
1347 cnic_free_dma(dev, &iscsi->task_array_info);
1348 }
1349 cnic_free_id(&cp->cid_tbl, ctx->cid);
1350 ctx->cid = 0;
1351}
1352
1353static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1354{
1355 u32 cid;
1356 int ret, pages;
1357 struct cnic_local *cp = dev->cnic_priv;
1358 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1359 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1360
1361 cid = cnic_alloc_new_id(&cp->cid_tbl);
1362 if (cid == -1) {
1363 ret = -ENOMEM;
1364 goto error;
1365 }
1366
1367 ctx->cid = cid;
1368 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1369
1370 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1371 if (ret)
1372 goto error;
1373
1374 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1375 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1376 if (ret)
1377 goto error;
1378
1379 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1380 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1381 if (ret)
1382 goto error;
1383
1384 return 0;
1385
1386error:
1387 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1388 return ret;
1389}
1390
1391static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1392 struct regpair *ctx_addr)
1393{
1394 struct cnic_local *cp = dev->cnic_priv;
1395 struct cnic_eth_dev *ethdev = cp->ethdev;
1396 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1397 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1398 unsigned long align_off = 0;
1399 dma_addr_t ctx_map;
1400 void *ctx;
1401
1402 if (cp->ctx_align) {
1403 unsigned long mask = cp->ctx_align - 1;
1404
1405 if (cp->ctx_arr[blk].mapping & mask)
1406 align_off = cp->ctx_align -
1407 (cp->ctx_arr[blk].mapping & mask);
1408 }
1409 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1410 (off * BNX2X_CONTEXT_MEM_SIZE);
1411 ctx = cp->ctx_arr[blk].ctx + align_off +
1412 (off * BNX2X_CONTEXT_MEM_SIZE);
1413 if (init)
1414 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1415
1416 ctx_addr->lo = ctx_map & 0xffffffff;
1417 ctx_addr->hi = (u64) ctx_map >> 32;
1418 return ctx;
1419}
1420
1421static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1422 u32 num)
1423{
1424 struct cnic_local *cp = dev->cnic_priv;
1425 struct iscsi_kwqe_conn_offload1 *req1 =
1426 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1427 struct iscsi_kwqe_conn_offload2 *req2 =
1428 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1429 struct iscsi_kwqe_conn_offload3 *req3;
1430 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1431 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1432 u32 cid = ctx->cid;
1433 u32 hw_cid = BNX2X_HW_CID(cid, cp->func);
1434 struct iscsi_context *ictx;
1435 struct regpair context_addr;
1436 int i, j, n = 2, n_max;
1437
1438 ctx->ctx_flags = 0;
1439 if (!req2->num_additional_wqes)
1440 return -EINVAL;
1441
1442 n_max = req2->num_additional_wqes + 2;
1443
1444 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1445 if (ictx == NULL)
1446 return -ENOMEM;
1447
1448 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1449
1450 ictx->xstorm_ag_context.hq_prod = 1;
1451
1452 ictx->xstorm_st_context.iscsi.first_burst_length =
1453 ISCSI_DEF_FIRST_BURST_LEN;
1454 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1455 ISCSI_DEF_MAX_RECV_SEG_LEN;
1456 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1457 req1->sq_page_table_addr_lo;
1458 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1459 req1->sq_page_table_addr_hi;
1460 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1461 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1462 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1463 iscsi->hq_info.pgtbl_map & 0xffffffff;
1464 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1465 (u64) iscsi->hq_info.pgtbl_map >> 32;
1466 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1467 iscsi->hq_info.pgtbl[0];
1468 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1469 iscsi->hq_info.pgtbl[1];
1470 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1471 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1472 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1473 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1474 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1475 iscsi->r2tq_info.pgtbl[0];
1476 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1477 iscsi->r2tq_info.pgtbl[1];
1478 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1479 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1480 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1481 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1482 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1483 BNX2X_ISCSI_PBL_NOT_CACHED;
1484 ictx->xstorm_st_context.iscsi.flags.flags |=
1485 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1486 ictx->xstorm_st_context.iscsi.flags.flags |=
1487 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1488
1489 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1490 /* TSTORM requires the base address of RQ DB & not PTE */
1491 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1492 req2->rq_page_table_addr_lo & PAGE_MASK;
1493 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1494 req2->rq_page_table_addr_hi;
1495 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1496 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1497 ictx->tstorm_st_context.tcp.flags2 |=
1498 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1499
1500 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1501
1502 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1503 req2->rq_page_table_addr_lo & 0xffffffff;
1504 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1505 (u64) req2->rq_page_table_addr_hi >> 32;
1506 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1507 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1508 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1509 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1510 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1511 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1512 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1513 iscsi->r2tq_info.pgtbl[0];
1514 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1515 iscsi->r2tq_info.pgtbl[1];
1516 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1517 req1->cq_page_table_addr_lo;
1518 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1519 req1->cq_page_table_addr_hi;
1520 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1521 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1522 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1523 ictx->ustorm_st_context.task_pbe_cache_index =
1524 BNX2X_ISCSI_PBL_NOT_CACHED;
1525 ictx->ustorm_st_context.task_pdu_cache_index =
1526 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1527
1528 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1529 if (j == 3) {
1530 if (n >= n_max)
1531 break;
1532 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1533 j = 0;
1534 }
1535 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1536 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1537 req3->qp_first_pte[j].hi;
1538 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1539 req3->qp_first_pte[j].lo;
1540 }
1541
1542 ictx->ustorm_st_context.task_pbl_base.lo =
1543 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1544 ictx->ustorm_st_context.task_pbl_base.hi =
1545 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1546 ictx->ustorm_st_context.tce_phy_addr.lo =
1547 iscsi->task_array_info.pgtbl[0];
1548 ictx->ustorm_st_context.tce_phy_addr.hi =
1549 iscsi->task_array_info.pgtbl[1];
1550 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1551 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1552 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1553 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1554 ISCSI_DEF_MAX_BURST_LEN;
1555 ictx->ustorm_st_context.negotiated_rx |=
1556 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1557 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1558
1559 ictx->cstorm_st_context.hq_pbl_base.lo =
1560 iscsi->hq_info.pgtbl_map & 0xffffffff;
1561 ictx->cstorm_st_context.hq_pbl_base.hi =
1562 (u64) iscsi->hq_info.pgtbl_map >> 32;
1563 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1564 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1565 ictx->cstorm_st_context.task_pbl_base.lo =
1566 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1567 ictx->cstorm_st_context.task_pbl_base.hi =
1568 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1569 /* CSTORM and USTORM initialization is different, CSTORM requires
1570 * CQ DB base & not PTE addr */
1571 ictx->cstorm_st_context.cq_db_base.lo =
1572 req1->cq_page_table_addr_lo & PAGE_MASK;
1573 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1574 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1575 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1576 for (i = 0; i < cp->num_cqs; i++) {
1577 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1578 ISCSI_INITIAL_SN;
1579 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1580 ISCSI_INITIAL_SN;
1581 }
1582
1583 ictx->xstorm_ag_context.cdu_reserved =
1584 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1585 ISCSI_CONNECTION_TYPE);
1586 ictx->ustorm_ag_context.cdu_usage =
1587 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1588 ISCSI_CONNECTION_TYPE);
1589 return 0;
1590
1591}
1592
1593static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1594 u32 num, int *work)
1595{
1596 struct iscsi_kwqe_conn_offload1 *req1;
1597 struct iscsi_kwqe_conn_offload2 *req2;
1598 struct cnic_local *cp = dev->cnic_priv;
1599 struct iscsi_kcqe kcqe;
1600 struct kcqe *cqes[1];
1601 u32 l5_cid;
1602 int ret;
1603
1604 if (num < 2) {
1605 *work = num;
1606 return -EINVAL;
1607 }
1608
1609 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1610 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1611 if ((num - 2) < req2->num_additional_wqes) {
1612 *work = num;
1613 return -EINVAL;
1614 }
1615 *work = 2 + req2->num_additional_wqes;;
1616
1617 l5_cid = req1->iscsi_conn_id;
1618 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1619 return -EINVAL;
1620
1621 memset(&kcqe, 0, sizeof(kcqe));
1622 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1623 kcqe.iscsi_conn_id = l5_cid;
1624 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1625
1626 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1627 atomic_dec(&cp->iscsi_conn);
1628 ret = 0;
1629 goto done;
1630 }
1631 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1632 if (ret) {
1633 atomic_dec(&cp->iscsi_conn);
1634 ret = 0;
1635 goto done;
1636 }
1637 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1638 if (ret < 0) {
1639 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1640 atomic_dec(&cp->iscsi_conn);
1641 goto done;
1642 }
1643
1644 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1645 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid,
1646 cp->func);
1647
1648done:
1649 cqes[0] = (struct kcqe *) &kcqe;
1650 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1651 return ret;
1652}
1653
1654
1655static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1656{
1657 struct cnic_local *cp = dev->cnic_priv;
1658 struct iscsi_kwqe_conn_update *req =
1659 (struct iscsi_kwqe_conn_update *) kwqe;
1660 void *data;
1661 union l5cm_specific_data l5_data;
1662 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1663 int ret;
1664
1665 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1666 return -EINVAL;
1667
1668 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1669 if (!data)
1670 return -ENOMEM;
1671
1672 memcpy(data, kwqe, sizeof(struct kwqe));
1673
1674 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1675 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1676 return ret;
1677}
1678
1679static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1680{
1681 struct cnic_local *cp = dev->cnic_priv;
1682 struct iscsi_kwqe_conn_destroy *req =
1683 (struct iscsi_kwqe_conn_destroy *) kwqe;
1684 union l5cm_specific_data l5_data;
1685 u32 l5_cid = req->reserved0;
1686 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1687 int ret = 0;
1688 struct iscsi_kcqe kcqe;
1689 struct kcqe *cqes[1];
1690
1691 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
1692 goto skip_cfc_delete;
1693
1694 while (!time_after(jiffies, ctx->timestamp + (2 * HZ)))
1695 msleep(250);
1696
1697 init_waitqueue_head(&ctx->waitq);
1698 ctx->wait_cond = 0;
1699 memset(&l5_data, 0, sizeof(l5_data));
1700 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
1701 req->context_id,
1702 ETH_CONNECTION_TYPE |
1703 (1 << SPE_HDR_COMMON_RAMROD_SHIFT),
1704 &l5_data);
1705 if (ret == 0)
1706 wait_event(ctx->waitq, ctx->wait_cond);
1707
1708skip_cfc_delete:
1709 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1710
1711 atomic_dec(&cp->iscsi_conn);
1712
1713 memset(&kcqe, 0, sizeof(kcqe));
1714 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1715 kcqe.iscsi_conn_id = l5_cid;
1716 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1717 kcqe.iscsi_conn_context_id = req->context_id;
1718
1719 cqes[0] = (struct kcqe *) &kcqe;
1720 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1721
1722 return ret;
1723}
1724
1725static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1726 struct l4_kwq_connect_req1 *kwqe1,
1727 struct l4_kwq_connect_req3 *kwqe3,
1728 struct l5cm_active_conn_buffer *conn_buf)
1729{
1730 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1731 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1732 &conn_buf->xstorm_conn_buffer;
1733 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1734 &conn_buf->tstorm_conn_buffer;
1735 struct regpair context_addr;
1736 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1737 struct in6_addr src_ip, dst_ip;
1738 int i;
1739 u32 *addrp;
1740
1741 addrp = (u32 *) &conn_addr->local_ip_addr;
1742 for (i = 0; i < 4; i++, addrp++)
1743 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1744
1745 addrp = (u32 *) &conn_addr->remote_ip_addr;
1746 for (i = 0; i < 4; i++, addrp++)
1747 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1748
1749 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1750
1751 xstorm_buf->context_addr.hi = context_addr.hi;
1752 xstorm_buf->context_addr.lo = context_addr.lo;
1753 xstorm_buf->mss = 0xffff;
1754 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1755 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1756 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1757 xstorm_buf->pseudo_header_checksum =
1758 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1759
1760 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1761 tstorm_buf->params |=
1762 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1763 if (kwqe3->ka_timeout) {
1764 tstorm_buf->ka_enable = 1;
1765 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1766 tstorm_buf->ka_interval = kwqe3->ka_interval;
1767 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1768 }
1769 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1770 tstorm_buf->snd_buf = kwqe3->snd_buf;
1771 tstorm_buf->max_rt_time = 0xffffffff;
1772}
1773
1774static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1775{
1776 struct cnic_local *cp = dev->cnic_priv;
1777 int func = CNIC_FUNC(cp);
1778 u8 *mac = dev->mac_addr;
1779
1780 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1781 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]);
1782 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1783 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]);
1784 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1785 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]);
1786 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1787 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]);
1788 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1789 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]);
1790 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1791 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]);
1792
1793 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1794 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]);
1795 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1796 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1797 mac[4]);
1798 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1799 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]);
1800 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1801 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1802 mac[2]);
1803 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1804 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2,
1805 mac[1]);
1806 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1807 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3,
1808 mac[0]);
1809}
1810
1811static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1812{
1813 struct cnic_local *cp = dev->cnic_priv;
1814 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1815 u16 tstorm_flags = 0;
1816
1817 if (tcp_ts) {
1818 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1819 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1820 }
1821
1822 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1823 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags);
1824
1825 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1826 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags);
1827}
1828
1829static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1830 u32 num, int *work)
1831{
1832 struct cnic_local *cp = dev->cnic_priv;
1833 struct l4_kwq_connect_req1 *kwqe1 =
1834 (struct l4_kwq_connect_req1 *) wqes[0];
1835 struct l4_kwq_connect_req3 *kwqe3;
1836 struct l5cm_active_conn_buffer *conn_buf;
1837 struct l5cm_conn_addr_params *conn_addr;
1838 union l5cm_specific_data l5_data;
1839 u32 l5_cid = kwqe1->pg_cid;
1840 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1841 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1842 int ret;
1843
1844 if (num < 2) {
1845 *work = num;
1846 return -EINVAL;
1847 }
1848
1849 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
1850 *work = 3;
1851 else
1852 *work = 2;
1853
1854 if (num < *work) {
1855 *work = num;
1856 return -EINVAL;
1857 }
1858
1859 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
1860 printk(KERN_ERR PFX "%s: conn_buf size too big\n",
1861 dev->netdev->name);
1862 return -ENOMEM;
1863 }
1864 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1865 if (!conn_buf)
1866 return -ENOMEM;
1867
1868 memset(conn_buf, 0, sizeof(*conn_buf));
1869
1870 conn_addr = &conn_buf->conn_addr_buf;
1871 conn_addr->remote_addr_0 = csk->ha[0];
1872 conn_addr->remote_addr_1 = csk->ha[1];
1873 conn_addr->remote_addr_2 = csk->ha[2];
1874 conn_addr->remote_addr_3 = csk->ha[3];
1875 conn_addr->remote_addr_4 = csk->ha[4];
1876 conn_addr->remote_addr_5 = csk->ha[5];
1877
1878 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
1879 struct l4_kwq_connect_req2 *kwqe2 =
1880 (struct l4_kwq_connect_req2 *) wqes[1];
1881
1882 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
1883 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
1884 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
1885
1886 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
1887 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
1888 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
1889 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
1890 }
1891 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
1892
1893 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
1894 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
1895 conn_addr->local_tcp_port = kwqe1->src_port;
1896 conn_addr->remote_tcp_port = kwqe1->dst_port;
1897
1898 conn_addr->pmtu = kwqe3->pmtu;
1899 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
1900
1901 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1902 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
1903
1904 cnic_bnx2x_set_tcp_timestamp(dev,
1905 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
1906
1907 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
1908 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1909 if (!ret)
1910 ctx->ctx_flags |= CTX_FL_OFFLD_START;
1911
1912 return ret;
1913}
1914
1915static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
1916{
1917 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
1918 union l5cm_specific_data l5_data;
1919 int ret;
1920
1921 memset(&l5_data, 0, sizeof(l5_data));
1922 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
1923 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1924 return ret;
1925}
1926
1927static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
1928{
1929 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
1930 union l5cm_specific_data l5_data;
1931 int ret;
1932
1933 memset(&l5_data, 0, sizeof(l5_data));
1934 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
1935 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1936 return ret;
1937}
1938static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1939{
1940 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
1941 struct l4_kcq kcqe;
1942 struct kcqe *cqes[1];
1943
1944 memset(&kcqe, 0, sizeof(kcqe));
1945 kcqe.pg_host_opaque = req->host_opaque;
1946 kcqe.pg_cid = req->host_opaque;
1947 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
1948 cqes[0] = (struct kcqe *) &kcqe;
1949 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1950 return 0;
1951}
1952
1953static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1954{
1955 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
1956 struct l4_kcq kcqe;
1957 struct kcqe *cqes[1];
1958
1959 memset(&kcqe, 0, sizeof(kcqe));
1960 kcqe.pg_host_opaque = req->pg_host_opaque;
1961 kcqe.pg_cid = req->pg_cid;
1962 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
1963 cqes[0] = (struct kcqe *) &kcqe;
1964 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1965 return 0;
1966}
1967
1968static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1969 u32 num_wqes)
1970{
1971 int i, work, ret;
1972 u32 opcode;
1973 struct kwqe *kwqe;
1974
1975 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1976 return -EAGAIN; /* bnx2 is down */
1977
1978 for (i = 0; i < num_wqes; ) {
1979 kwqe = wqes[i];
1980 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
1981 work = 1;
1982
1983 switch (opcode) {
1984 case ISCSI_KWQE_OPCODE_INIT1:
1985 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
1986 break;
1987 case ISCSI_KWQE_OPCODE_INIT2:
1988 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
1989 break;
1990 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
1991 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
1992 num_wqes - i, &work);
1993 break;
1994 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
1995 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
1996 break;
1997 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
1998 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
1999 break;
2000 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2001 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2002 &work);
2003 break;
2004 case L4_KWQE_OPCODE_VALUE_CLOSE:
2005 ret = cnic_bnx2x_close(dev, kwqe);
2006 break;
2007 case L4_KWQE_OPCODE_VALUE_RESET:
2008 ret = cnic_bnx2x_reset(dev, kwqe);
2009 break;
2010 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2011 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2012 break;
2013 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2014 ret = cnic_bnx2x_update_pg(dev, kwqe);
2015 break;
2016 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2017 ret = 0;
2018 break;
2019 default:
2020 ret = 0;
2021 printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n",
2022 dev->netdev->name, opcode);
2023 break;
2024 }
2025 if (ret < 0)
2026 printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n",
2027 dev->netdev->name, opcode);
2028 i += work;
2029 }
2030 return 0;
2031}
2032
924static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2033static void service_kcqes(struct cnic_dev *dev, int num_cqes)
925{ 2034{
926 struct cnic_local *cp = dev->cnic_priv; 2035 struct cnic_local *cp = dev->cnic_priv;
@@ -987,6 +2096,22 @@ static u16 cnic_bnx2_hw_idx(u16 idx)
987 return idx; 2096 return idx;
988} 2097}
989 2098
2099static u16 cnic_bnx2x_next_idx(u16 idx)
2100{
2101 idx++;
2102 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2103 idx++;
2104
2105 return idx;
2106}
2107
2108static u16 cnic_bnx2x_hw_idx(u16 idx)
2109{
2110 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2111 idx++;
2112 return idx;
2113}
2114
990static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) 2115static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
991{ 2116{
992 struct cnic_local *cp = dev->cnic_priv; 2117 struct cnic_local *cp = dev->cnic_priv;
@@ -1012,7 +2137,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
1012 return last_cnt; 2137 return last_cnt;
1013} 2138}
1014 2139
1015static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) 2140static void cnic_chk_pkt_rings(struct cnic_local *cp)
1016{ 2141{
1017 u16 rx_cons = *cp->rx_cons_ptr; 2142 u16 rx_cons = *cp->rx_cons_ptr;
1018 u16 tx_cons = *cp->tx_cons_ptr; 2143 u16 tx_cons = *cp->tx_cons_ptr;
@@ -1020,6 +2145,7 @@ static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
1020 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2145 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
1021 cp->tx_cons = tx_cons; 2146 cp->tx_cons = tx_cons;
1022 cp->rx_cons = rx_cons; 2147 cp->rx_cons = rx_cons;
2148
1023 uio_event_notify(cp->cnic_uinfo); 2149 uio_event_notify(cp->cnic_uinfo);
1024 } 2150 }
1025} 2151}
@@ -1062,7 +2188,7 @@ done:
1062 2188
1063 cp->kcq_prod_idx = sw_prod; 2189 cp->kcq_prod_idx = sw_prod;
1064 2190
1065 cnic_chk_bnx2_pkt_rings(cp); 2191 cnic_chk_pkt_rings(cp);
1066 return status_idx; 2192 return status_idx;
1067} 2193}
1068 2194
@@ -1100,7 +2226,7 @@ done:
1100 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2226 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1101 cp->kcq_prod_idx = sw_prod; 2227 cp->kcq_prod_idx = sw_prod;
1102 2228
1103 cnic_chk_bnx2_pkt_rings(cp); 2229 cnic_chk_pkt_rings(cp);
1104 2230
1105 cp->last_status_idx = status_idx; 2231 cp->last_status_idx = status_idx;
1106 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2232 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
@@ -1125,6 +2251,91 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
1125 return IRQ_HANDLED; 2251 return IRQ_HANDLED;
1126} 2252}
1127 2253
2254static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2255 u16 index, u8 op, u8 update)
2256{
2257 struct cnic_local *cp = dev->cnic_priv;
2258 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2259 COMMAND_REG_INT_ACK);
2260 struct igu_ack_register igu_ack;
2261
2262 igu_ack.status_block_index = index;
2263 igu_ack.sb_id_and_flags =
2264 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2265 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2266 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2267 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2268
2269 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2270}
2271
2272static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2273{
2274 struct cnic_local *cp = dev->cnic_priv;
2275
2276 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0,
2277 IGU_INT_DISABLE, 0);
2278}
2279
2280static void cnic_service_bnx2x_bh(unsigned long data)
2281{
2282 struct cnic_dev *dev = (struct cnic_dev *) data;
2283 struct cnic_local *cp = dev->cnic_priv;
2284 u16 hw_prod, sw_prod;
2285 struct cstorm_status_block_c *sblk =
2286 &cp->bnx2x_status_blk->c_status_block;
2287 u32 status_idx = sblk->status_block_index;
2288 int kcqe_cnt;
2289
2290 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2291 return;
2292
2293 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2294 hw_prod = cp->hw_idx(hw_prod);
2295 sw_prod = cp->kcq_prod_idx;
2296 while (sw_prod != hw_prod) {
2297 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2298 if (kcqe_cnt == 0)
2299 goto done;
2300
2301 service_kcqes(dev, kcqe_cnt);
2302
2303 /* Tell compiler that sblk fields can change. */
2304 barrier();
2305 if (status_idx == sblk->status_block_index)
2306 break;
2307
2308 status_idx = sblk->status_block_index;
2309 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2310 hw_prod = cp->hw_idx(hw_prod);
2311 }
2312
2313done:
2314 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
2315 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
2316 status_idx, IGU_INT_ENABLE, 1);
2317
2318 cp->kcq_prod_idx = sw_prod;
2319 return;
2320}
2321
2322static int cnic_service_bnx2x(void *data, void *status_blk)
2323{
2324 struct cnic_dev *dev = data;
2325 struct cnic_local *cp = dev->cnic_priv;
2326 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2327
2328 prefetch(cp->status_blk);
2329 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2330
2331 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2332 tasklet_schedule(&cp->cnic_irq_task);
2333
2334 cnic_chk_pkt_rings(cp);
2335
2336 return 0;
2337}
2338
1128static void cnic_ulp_stop(struct cnic_dev *dev) 2339static void cnic_ulp_stop(struct cnic_dev *dev)
1129{ 2340{
1130 struct cnic_local *cp = dev->cnic_priv; 2341 struct cnic_local *cp = dev->cnic_priv;
@@ -1197,6 +2408,19 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1197 2408
1198 cnic_put(dev); 2409 cnic_put(dev);
1199 break; 2410 break;
2411 case CNIC_CTL_COMPLETION_CMD: {
2412 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
2413 u32 l5_cid;
2414 struct cnic_local *cp = dev->cnic_priv;
2415
2416 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
2417 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2418
2419 ctx->wait_cond = 1;
2420 wake_up(&ctx->waitq);
2421 }
2422 break;
2423 }
1200 default: 2424 default:
1201 return -EINVAL; 2425 return -EINVAL;
1202 } 2426 }
@@ -1872,6 +3096,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1872 /* fall through */ 3096 /* fall through */
1873 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3097 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1874 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3098 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3099 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3100 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
1875 cp->close_conn(csk, opcode); 3101 cp->close_conn(csk, opcode);
1876 break; 3102 break;
1877 3103
@@ -1957,6 +3183,76 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1957 return 0; 3183 return 0;
1958} 3184}
1959 3185
3186static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3187{
3188 struct cnic_dev *dev = csk->dev;
3189 struct cnic_local *cp = dev->cnic_priv;
3190 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3191 union l5cm_specific_data l5_data;
3192 u32 cmd = 0;
3193 int close_complete = 0;
3194
3195 switch (opcode) {
3196 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3197 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3198 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3199 if (cnic_ready_to_close(csk, opcode))
3200 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3201 break;
3202 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3203 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3204 break;
3205 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3206 close_complete = 1;
3207 break;
3208 }
3209 if (cmd) {
3210 memset(&l5_data, 0, sizeof(l5_data));
3211
3212 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3213 &l5_data);
3214 } else if (close_complete) {
3215 ctx->timestamp = jiffies;
3216 cnic_close_conn(csk);
3217 cnic_cm_upcall(cp, csk, csk->state);
3218 }
3219}
3220
3221static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3222{
3223}
3224
3225static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3226{
3227 struct cnic_local *cp = dev->cnic_priv;
3228 int func = CNIC_FUNC(cp);
3229
3230 cnic_init_bnx2x_mac(dev);
3231 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3232
3233 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3234 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0);
3235
3236 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3237 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1);
3238 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3239 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func),
3240 DEF_MAX_DA_COUNT);
3241
3242 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3243 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL);
3244 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3245 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS);
3246 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3247 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2);
3248 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3249 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER);
3250
3251 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func),
3252 DEF_MAX_CWND);
3253 return 0;
3254}
3255
1960static int cnic_cm_open(struct cnic_dev *dev) 3256static int cnic_cm_open(struct cnic_dev *dev)
1961{ 3257{
1962 struct cnic_local *cp = dev->cnic_priv; 3258 struct cnic_local *cp = dev->cnic_priv;
@@ -2091,7 +3387,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
2091 3387
2092 cp->bnx2_status_blk = cp->status_blk; 3388 cp->bnx2_status_blk = cp->status_blk;
2093 cp->last_status_idx = cp->bnx2_status_blk->status_idx; 3389 cp->last_status_idx = cp->bnx2_status_blk->status_idx;
2094 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix, 3390 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
2095 (unsigned long) dev); 3391 (unsigned long) dev);
2096 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, 3392 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
2097 "cnic", dev); 3393 "cnic", dev);
@@ -2464,6 +3760,417 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2464 return 0; 3760 return 0;
2465} 3761}
2466 3762
3763static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
3764{
3765 struct cnic_local *cp = dev->cnic_priv;
3766 struct cnic_eth_dev *ethdev = cp->ethdev;
3767 u32 start_offset = ethdev->ctx_tbl_offset;
3768 int i;
3769
3770 for (i = 0; i < cp->ctx_blks; i++) {
3771 struct cnic_ctx *ctx = &cp->ctx_arr[i];
3772 dma_addr_t map = ctx->mapping;
3773
3774 if (cp->ctx_align) {
3775 unsigned long mask = cp->ctx_align - 1;
3776
3777 map = (map + mask) & ~mask;
3778 }
3779
3780 cnic_ctx_tbl_wr(dev, start_offset + i, map);
3781 }
3782}
3783
3784static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
3785{
3786 struct cnic_local *cp = dev->cnic_priv;
3787 struct cnic_eth_dev *ethdev = cp->ethdev;
3788 int err = 0;
3789
3790 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
3791 (unsigned long) dev);
3792 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3793 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
3794 "cnic", dev);
3795 if (err)
3796 tasklet_disable(&cp->cnic_irq_task);
3797 }
3798 return err;
3799}
3800
3801static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
3802{
3803 struct cnic_local *cp = dev->cnic_priv;
3804 u8 sb_id = cp->status_blk_num;
3805 int port = CNIC_PORT(cp);
3806
3807 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
3808 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
3809 HC_INDEX_C_ISCSI_EQ_CONS),
3810 64 / 12);
3811 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
3812 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
3813 HC_INDEX_C_ISCSI_EQ_CONS), 0);
3814}
3815
3816static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
3817{
3818}
3819
3820static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3821{
3822 struct cnic_local *cp = dev->cnic_priv;
3823 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
3824 struct eth_context *context;
3825 struct regpair context_addr;
3826 dma_addr_t buf_map;
3827 int func = CNIC_FUNC(cp);
3828 int port = CNIC_PORT(cp);
3829 int i;
3830 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3831 u32 val;
3832
3833 memset(txbd, 0, BCM_PAGE_SIZE);
3834
3835 buf_map = cp->l2_buf_map;
3836 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
3837 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
3838 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
3839
3840 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3841 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3842 reg_bd->addr_hi = start_bd->addr_hi;
3843 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
3844 start_bd->nbytes = cpu_to_le16(0x10);
3845 start_bd->nbd = cpu_to_le16(3);
3846 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3847 start_bd->general_data = (UNICAST_ADDRESS <<
3848 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3849 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
3850
3851 }
3852 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
3853
3854 val = (u64) cp->l2_ring_map >> 32;
3855 txbd->next_bd.addr_hi = cpu_to_le32(val);
3856
3857 context->xstorm_st_context.tx_bd_page_base_hi = val;
3858
3859 val = (u64) cp->l2_ring_map & 0xffffffff;
3860 txbd->next_bd.addr_lo = cpu_to_le32(val);
3861
3862 context->xstorm_st_context.tx_bd_page_base_lo = val;
3863
3864 context->cstorm_st_context.sb_index_number =
3865 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
3866 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
3867
3868 context->xstorm_st_context.statistics_data = (cli |
3869 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3870
3871 context->xstorm_ag_context.cdu_reserved =
3872 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3873 CDU_REGION_NUMBER_XCM_AG,
3874 ETH_CONNECTION_TYPE);
3875
3876 /* reset xstorm per client statistics */
3877 val = BAR_XSTRORM_INTMEM +
3878 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3879 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
3880 CNIC_WR(dev, val + i * 4, 0);
3881
3882 cp->tx_cons_ptr =
3883 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[
3884 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
3885}
3886
3887static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
3888{
3889 struct cnic_local *cp = dev->cnic_priv;
3890 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
3891 BCM_PAGE_SIZE);
3892 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
3893 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
3894 struct eth_context *context;
3895 struct regpair context_addr;
3896 int i;
3897 int port = CNIC_PORT(cp);
3898 int func = CNIC_FUNC(cp);
3899 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3900 u32 val;
3901 struct tstorm_eth_client_config tstorm_client = {0};
3902
3903 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
3904 dma_addr_t buf_map;
3905 int n = (i % cp->l2_rx_ring_size) + 1;
3906
3907 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
3908 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3909 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3910 }
3911 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
3912
3913 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
3914 rxbd->addr_hi = cpu_to_le32(val);
3915
3916 context->ustorm_st_context.common.bd_page_base_hi = val;
3917
3918 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3919 rxbd->addr_lo = cpu_to_le32(val);
3920
3921 context->ustorm_st_context.common.bd_page_base_lo = val;
3922
3923 context->ustorm_st_context.common.sb_index_numbers =
3924 BNX2X_ISCSI_RX_SB_INDEX_NUM;
3925 context->ustorm_st_context.common.clientId = cli;
3926 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
3927 context->ustorm_st_context.common.flags =
3928 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
3929 context->ustorm_st_context.common.statistics_counter_id = cli;
3930 context->ustorm_st_context.common.mc_alignment_log_size = 0;
3931 context->ustorm_st_context.common.bd_buff_size =
3932 cp->l2_single_buf_size;
3933
3934 context->ustorm_ag_context.cdu_usage =
3935 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3936 CDU_REGION_NUMBER_UCM_AG,
3937 ETH_CONNECTION_TYPE);
3938
3939 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
3940 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
3941 rxcqe->addr_hi = cpu_to_le32(val);
3942
3943 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3944 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
3945
3946 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3947 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
3948
3949 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
3950 rxcqe->addr_lo = cpu_to_le32(val);
3951
3952 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3953 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val);
3954
3955 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3956 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
3957
3958 /* client tstorm info */
3959 tstorm_client.mtu = cp->l2_single_buf_size - 14;
3960 tstorm_client.config_flags =
3961 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE |
3962 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE);
3963 tstorm_client.statistics_counter_id = cli;
3964
3965 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
3966 TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
3967 ((u32 *)&tstorm_client)[0]);
3968 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
3969 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
3970 ((u32 *)&tstorm_client)[1]);
3971
3972 /* reset tstorm per client statistics */
3973 val = BAR_TSTRORM_INTMEM +
3974 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3975 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
3976 CNIC_WR(dev, val + i * 4, 0);
3977
3978 /* reset ustorm per client statistics */
3979 val = BAR_USTRORM_INTMEM +
3980 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3981 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
3982 CNIC_WR(dev, val + i * 4, 0);
3983
3984 cp->rx_cons_ptr =
3985 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[
3986 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
3987}
3988
3989static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
3990{
3991 struct cnic_local *cp = dev->cnic_priv;
3992 u32 base, addr, val;
3993 int port = CNIC_PORT(cp);
3994
3995 dev->max_iscsi_conn = 0;
3996 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
3997 if (base < 0xa0000 || base >= 0xc0000)
3998 return;
3999
4000 val = BNX2X_SHMEM_ADDR(base,
4001 dev_info.port_hw_config[port].iscsi_mac_upper);
4002
4003 dev->mac_addr[0] = (u8) (val >> 8);
4004 dev->mac_addr[1] = (u8) val;
4005
4006 val = BNX2X_SHMEM_ADDR(base,
4007 dev_info.port_hw_config[port].iscsi_mac_lower);
4008
4009 dev->mac_addr[2] = (u8) (val >> 24);
4010 dev->mac_addr[3] = (u8) (val >> 16);
4011 dev->mac_addr[4] = (u8) (val >> 8);
4012 dev->mac_addr[5] = (u8) val;
4013
4014 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4015 val = CNIC_RD(dev, addr);
4016
4017 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4018 u16 val16;
4019
4020 addr = BNX2X_SHMEM_ADDR(base,
4021 drv_lic_key[port].max_iscsi_init_conn);
4022 val16 = CNIC_RD16(dev, addr);
4023
4024 if (val16)
4025 val16 ^= 0x1e1e;
4026 dev->max_iscsi_conn = val16;
4027 }
4028 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
4029 int func = CNIC_FUNC(cp);
4030
4031 addr = BNX2X_SHMEM_ADDR(base,
4032 mf_cfg.func_mf_config[func].e1hov_tag);
4033 val = CNIC_RD(dev, addr);
4034 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4035 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4036 addr = BNX2X_SHMEM_ADDR(base,
4037 mf_cfg.func_mf_config[func].config);
4038 val = CNIC_RD(dev, addr);
4039 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4040 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4041 dev->max_iscsi_conn = 0;
4042 }
4043 }
4044}
4045
4046static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4047{
4048 struct cnic_local *cp = dev->cnic_priv;
4049 int func = CNIC_FUNC(cp), ret, i;
4050 int port = CNIC_PORT(cp);
4051 u16 eq_idx;
4052 u8 sb_id = cp->status_blk_num;
4053
4054 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4055 BNX2X_ISCSI_START_CID);
4056
4057 if (ret)
4058 return -ENOMEM;
4059
4060 cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
4061 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
4062 cp->kcq_prod_idx = 0;
4063
4064 cnic_get_bnx2x_iscsi_info(dev);
4065
4066 /* Only 1 EQ */
4067 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
4068 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4069 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
4070 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4071 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
4072 cp->kcq_info.pg_map_arr[1] & 0xffffffff);
4073 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4074 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
4075 (u64) cp->kcq_info.pg_map_arr[1] >> 32);
4076 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4077 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
4078 cp->kcq_info.pg_map_arr[0] & 0xffffffff);
4079 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4080 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
4081 (u64) cp->kcq_info.pg_map_arr[0] >> 32);
4082 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4083 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
4084 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4085 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num);
4086 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4087 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0),
4088 HC_INDEX_C_ISCSI_EQ_CONS);
4089
4090 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4091 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4092 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i),
4093 cp->conn_buf_info.pgtbl[2 * i]);
4094 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4095 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4,
4096 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4097 }
4098
4099 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4100 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func),
4101 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4102 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4103 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4,
4104 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4105
4106 cnic_setup_bnx2x_context(dev);
4107
4108 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
4109 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4110 offsetof(struct cstorm_status_block_c,
4111 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4112 if (eq_idx != 0) {
4113 printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n",
4114 dev->netdev->name, eq_idx);
4115 return -EBUSY;
4116 }
4117 ret = cnic_init_bnx2x_irq(dev);
4118 if (ret)
4119 return ret;
4120
4121 cnic_init_bnx2x_tx_ring(dev);
4122 cnic_init_bnx2x_rx_ring(dev);
4123
4124 return 0;
4125}
4126
4127static void cnic_init_rings(struct cnic_dev *dev)
4128{
4129 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4130 cnic_init_bnx2_tx_ring(dev);
4131 cnic_init_bnx2_rx_ring(dev);
4132 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4133 struct cnic_local *cp = dev->cnic_priv;
4134 struct cnic_eth_dev *ethdev = cp->ethdev;
4135 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4136 union l5cm_specific_data l5_data;
4137 struct ustorm_eth_rx_producers rx_prods = {0};
4138 void __iomem *doorbell;
4139 int i;
4140
4141 rx_prods.bd_prod = 0;
4142 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4143 barrier();
4144
4145 doorbell = ethdev->io_base2 + BAR_USTRORM_INTMEM +
4146 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli);
4147
4148 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4149 writel(((u32 *) &rx_prods)[i], doorbell + i * 4);
4150
4151 cnic_init_bnx2x_tx_ring(dev);
4152 cnic_init_bnx2x_rx_ring(dev);
4153
4154 l5_data.phy_address.lo = cli;
4155 l5_data.phy_address.hi = 0;
4156 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4157 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4158 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
4159 }
4160}
4161
4162static void cnic_shutdown_rings(struct cnic_dev *dev)
4163{
4164 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4165 cnic_shutdown_bnx2_rx_ring(dev);
4166 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4167 struct cnic_local *cp = dev->cnic_priv;
4168 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4169
4170 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4171 }
4172}
4173
2467static int cnic_register_netdev(struct cnic_dev *dev) 4174static int cnic_register_netdev(struct cnic_dev *dev)
2468{ 4175{
2469 struct cnic_local *cp = dev->cnic_priv; 4176 struct cnic_local *cp = dev->cnic_priv;
@@ -2554,6 +4261,22 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2554 cnic_free_resc(dev); 4261 cnic_free_resc(dev);
2555} 4262}
2556 4263
4264
4265static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4266{
4267 struct cnic_local *cp = dev->cnic_priv;
4268 u8 sb_id = cp->status_blk_num;
4269 int port = CNIC_PORT(cp);
4270
4271 cnic_free_irq(dev);
4272 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4273 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4274 offsetof(struct cstorm_status_block_c,
4275 index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
4276 0);
4277 cnic_free_resc(dev);
4278}
4279
2557static void cnic_stop_hw(struct cnic_dev *dev) 4280static void cnic_stop_hw(struct cnic_dev *dev)
2558{ 4281{
2559 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4282 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
@@ -2685,6 +4408,57 @@ cnic_err:
2685 return NULL; 4408 return NULL;
2686} 4409}
2687 4410
4411static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4412{
4413 struct pci_dev *pdev;
4414 struct cnic_dev *cdev;
4415 struct cnic_local *cp;
4416 struct cnic_eth_dev *ethdev = NULL;
4417 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4418
4419 probe = symbol_get(bnx2x_cnic_probe);
4420 if (probe) {
4421 ethdev = (*probe)(dev);
4422 symbol_put(bnx2x_cnic_probe);
4423 }
4424 if (!ethdev)
4425 return NULL;
4426
4427 pdev = ethdev->pdev;
4428 if (!pdev)
4429 return NULL;
4430
4431 dev_hold(dev);
4432 cdev = cnic_alloc_dev(dev, pdev);
4433 if (cdev == NULL) {
4434 dev_put(dev);
4435 return NULL;
4436 }
4437
4438 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
4439 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
4440
4441 cp = cdev->cnic_priv;
4442 cp->ethdev = ethdev;
4443 cdev->pcidev = pdev;
4444
4445 cp->cnic_ops = &cnic_bnx2x_ops;
4446 cp->start_hw = cnic_start_bnx2x_hw;
4447 cp->stop_hw = cnic_stop_bnx2x_hw;
4448 cp->setup_pgtbl = cnic_setup_page_tbl_le;
4449 cp->alloc_resc = cnic_alloc_bnx2x_resc;
4450 cp->free_resc = cnic_free_resc;
4451 cp->start_cm = cnic_cm_init_bnx2x_hw;
4452 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4453 cp->enable_int = cnic_enable_bnx2x_int;
4454 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
4455 cp->ack_int = cnic_ack_bnx2x_msix;
4456 cp->close_conn = cnic_close_bnx2x_conn;
4457 cp->next_idx = cnic_bnx2x_next_idx;
4458 cp->hw_idx = cnic_bnx2x_hw_idx;
4459 return cdev;
4460}
4461
2688static struct cnic_dev *is_cnic_dev(struct net_device *dev) 4462static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2689{ 4463{
2690 struct ethtool_drvinfo drvinfo; 4464 struct ethtool_drvinfo drvinfo;
@@ -2696,6 +4470,8 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2696 4470
2697 if (!strcmp(drvinfo.driver, "bnx2")) 4471 if (!strcmp(drvinfo.driver, "bnx2"))
2698 cdev = init_bnx2_cnic(dev); 4472 cdev = init_bnx2_cnic(dev);
4473 if (!strcmp(drvinfo.driver, "bnx2x"))
4474 cdev = init_bnx2x_cnic(dev);
2699 if (cdev) { 4475 if (cdev) {
2700 write_lock(&cnic_dev_lock); 4476 write_lock(&cnic_dev_lock);
2701 list_add(&cdev->list, &cnic_dev_list); 4477 list_add(&cdev->list, &cnic_dev_list);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a94b302bb464..241d09acc0d4 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -227,6 +227,7 @@ struct cnic_local {
227 void *status_blk; 227 void *status_blk;
228 struct status_block_msix *bnx2_status_blk; 228 struct status_block_msix *bnx2_status_blk;
229 struct host_status_block *bnx2x_status_blk; 229 struct host_status_block *bnx2x_status_blk;
230 struct host_def_status_block *bnx2x_def_status_blk;
230 231
231 u32 status_blk_num; 232 u32 status_blk_num;
232 u32 int_num; 233 u32 int_num;
@@ -258,6 +259,7 @@ struct cnic_local {
258 struct cnic_ctx *ctx_arr; 259 struct cnic_ctx *ctx_arr;
259 int ctx_blks; 260 int ctx_blks;
260 int ctx_blk_size; 261 int ctx_blk_size;
262 unsigned long ctx_align;
261 int cids_per_blk; 263 int cids_per_blk;
262 264
263 u32 chip_id; 265 u32 chip_id;
@@ -290,11 +292,73 @@ struct bnx2x_bd_chain_next {
290 u8 reserved[8]; 292 u8 reserved[8];
291}; 293};
292 294
295#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
296
293#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) 297#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
294#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) 298#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
295 299
296#define CDU_REGION_NUMBER_XCM_AG 2 300#define CDU_REGION_NUMBER_XCM_AG 2
297#define CDU_REGION_NUMBER_UCM_AG 4 301#define CDU_REGION_NUMBER_UCM_AG 4
298 302
303#define CDU_VALID_DATA(_cid, _region, _type) \
304 (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
305
306#define CDU_CRC8(_cid, _region, _type) \
307 (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
308
309#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
310 (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
311
312#define BNX2X_CONTEXT_MEM_SIZE 1024
313#define BNX2X_FCOE_CID 16
314
315/* iSCSI client IDs are 17, 19, 21, 23 */
316#define BNX2X_ISCSI_BASE_CL_ID 17
317#define BNX2X_ISCSI_CL_ID(vn) (BNX2X_ISCSI_BASE_CL_ID + ((vn) << 1))
318
319#define BNX2X_ISCSI_L2_CID 17
320#define BNX2X_ISCSI_START_CID 18
321#define BNX2X_ISCSI_NUM_CONNECTIONS 128
322#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
323#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
324#define BNX2X_ISCSI_R2TQE_SIZE 8
325#define BNX2X_ISCSI_HQ_BD_SIZE 64
326#define BNX2X_ISCSI_CONN_BUF_SIZE 64
327#define BNX2X_ISCSI_GLB_BUF_SIZE 64
328#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
329#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
330#define BNX2X_HW_CID(x, func) ((x) | (((func) % PORT_MAX) << 23) | \
331 (((func) >> 1) << 17))
332#define BNX2X_SW_CID(x) (x & 0x1ffff)
333#define BNX2X_CHIP_NUM_57711 0x164f
334#define BNX2X_CHIP_NUM_57711E 0x1650
335#define BNX2X_CHIP_NUM(x) (x >> 16)
336#define BNX2X_CHIP_IS_57711(x) \
337 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
338#define BNX2X_CHIP_IS_57711E(x) \
339 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
340#define BNX2X_CHIP_IS_E1H(x) \
341 (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
342#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
343
344#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
345#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
346#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
347#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
348
349#define BNX2X_DEF_SB_ID 16
350
351#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
352 ((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
353 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
354 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
355
356#define BNX2X_SHMEM_ADDR(base, field) (base + \
357 offsetof(struct shmem_region, field))
358
359#define CNIC_PORT(cp) ((cp)->func % PORT_MAX)
360#define CNIC_FUNC(cp) ((cp)->func)
361#define CNIC_E1HVN(cp) ((cp)->func >> 1)
362
299#endif 363#endif
300 364
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index cee80f694457..9827b278dc7c 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -51,6 +51,9 @@
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) 51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) 52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53 53
54#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
55#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
56
54#define L4_LAYER_CODE (4) 57#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2) 58#define L2_LAYER_CODE (2)
56 59
@@ -577,4 +580,1918 @@ struct l4_kwq_upload {
577 u32 reserved2[6]; 580 u32 reserved2[6];
578}; 581};
579 582
583/*
584 * bnx2x structures
585 */
586
587/*
588 * iSCSI context region, used only in iSCSI
589 */
590struct ustorm_iscsi_rq_db {
591 struct regpair pbl_base;
592 struct regpair curr_pbe;
593};
594
595/*
596 * iSCSI context region, used only in iSCSI
597 */
598struct ustorm_iscsi_r2tq_db {
599 struct regpair pbl_base;
600 struct regpair curr_pbe;
601};
602
603/*
604 * iSCSI context region, used only in iSCSI
605 */
606struct ustorm_iscsi_cq_db {
607#if defined(__BIG_ENDIAN)
608 u16 cq_sn;
609 u16 prod;
610#elif defined(__LITTLE_ENDIAN)
611 u16 prod;
612 u16 cq_sn;
613#endif
614 struct regpair curr_pbe;
615};
616
617/*
618 * iSCSI context region, used only in iSCSI
619 */
620struct rings_db {
621 struct ustorm_iscsi_rq_db rq;
622 struct ustorm_iscsi_r2tq_db r2tq;
623 struct ustorm_iscsi_cq_db cq[8];
624#if defined(__BIG_ENDIAN)
625 u16 rq_prod;
626 u16 r2tq_prod;
627#elif defined(__LITTLE_ENDIAN)
628 u16 r2tq_prod;
629 u16 rq_prod;
630#endif
631 struct regpair cq_pbl_base;
632};
633
634/*
635 * iSCSI context region, used only in iSCSI
636 */
637struct ustorm_iscsi_placement_db {
638 u32 sgl_base_lo;
639 u32 sgl_base_hi;
640 u32 local_sge_0_address_hi;
641 u32 local_sge_0_address_lo;
642#if defined(__BIG_ENDIAN)
643 u16 curr_sge_offset;
644 u16 local_sge_0_size;
645#elif defined(__LITTLE_ENDIAN)
646 u16 local_sge_0_size;
647 u16 curr_sge_offset;
648#endif
649 u32 local_sge_1_address_hi;
650 u32 local_sge_1_address_lo;
651#if defined(__BIG_ENDIAN)
652 u16 reserved6;
653 u16 local_sge_1_size;
654#elif defined(__LITTLE_ENDIAN)
655 u16 local_sge_1_size;
656 u16 reserved6;
657#endif
658#if defined(__BIG_ENDIAN)
659 u8 sgl_size;
660 u8 local_sge_index_2b;
661 u16 reserved7;
662#elif defined(__LITTLE_ENDIAN)
663 u16 reserved7;
664 u8 local_sge_index_2b;
665 u8 sgl_size;
666#endif
667 u32 rem_pdu;
668 u32 place_db_bitfield_1;
669#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
670#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
671#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
672#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
673 u32 place_db_bitfield_2;
674#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
675#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
676#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
677#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
678 u32 nal;
679#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
680#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
681#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B (0x3<<24)
682#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B_SHIFT 24
683#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0x7<<26)
684#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 26
685#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B (0x7<<29)
686#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B_SHIFT 29
687};
688
689/*
690 * Ustorm iSCSI Storm Context
691 */
692struct ustorm_iscsi_st_context {
693 u32 exp_stat_sn;
694 u32 exp_data_sn;
695 struct rings_db ring;
696 struct regpair task_pbl_base;
697 struct regpair tce_phy_addr;
698 struct ustorm_iscsi_placement_db place_db;
699 u32 data_rcv_seq;
700 u32 rem_rcv_len;
701#if defined(__BIG_ENDIAN)
702 u16 hdr_itt;
703 u16 iscsi_conn_id;
704#elif defined(__LITTLE_ENDIAN)
705 u16 iscsi_conn_id;
706 u16 hdr_itt;
707#endif
708 u32 nal_bytes;
709#if defined(__BIG_ENDIAN)
710 u8 hdr_second_byte_union;
711 u8 bitfield_0;
712#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
713#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
714#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
715#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
716#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
717#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
718 u8 task_pdu_cache_index;
719 u8 task_pbe_cache_index;
720#elif defined(__LITTLE_ENDIAN)
721 u8 task_pbe_cache_index;
722 u8 task_pdu_cache_index;
723 u8 bitfield_0;
724#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
725#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
726#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
727#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
728#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
729#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
730 u8 hdr_second_byte_union;
731#endif
732#if defined(__BIG_ENDIAN)
733 u16 reserved3;
734 u8 reserved2;
735 u8 acDecrement;
736#elif defined(__LITTLE_ENDIAN)
737 u8 acDecrement;
738 u8 reserved2;
739 u16 reserved3;
740#endif
741 u32 task_stat;
742#if defined(__BIG_ENDIAN)
743 u8 hdr_opcode;
744 u8 num_cqs;
745 u16 reserved5;
746#elif defined(__LITTLE_ENDIAN)
747 u16 reserved5;
748 u8 num_cqs;
749 u8 hdr_opcode;
750#endif
751 u32 negotiated_rx;
752#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
753#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
754#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
755#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
756 u32 negotiated_rx_and_flags;
757#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
758#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
759#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
760#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
761#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
762#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
763#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
764#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
765#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
766#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
767#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
768#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
769#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
770#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
771#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
772#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
773};
774
775/*
776 * TCP context region, shared in TOE, RDMA and ISCSI
777 */
778struct tstorm_tcp_st_context_section {
779 u32 flags1;
780#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0)
781#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0
782#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
783#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
784#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
785#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
786#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26)
787#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26
788#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
789#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
790#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
791#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
792#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
793#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
794#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
795#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
796#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31)
797#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31
798 u32 flags2;
799#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0)
800#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0
801#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
802#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
803#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
804#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
805#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
806#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
807#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
808#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
809#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
810#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
811#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
812#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
813#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30)
814#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30
815#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31)
816#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31
817#if defined(__BIG_ENDIAN)
818 u16 reserved_slowpath;
819 u8 tcp_sm_state_3b;
820 u8 rto_exp_3b;
821#elif defined(__LITTLE_ENDIAN)
822 u8 rto_exp_3b;
823 u8 tcp_sm_state_3b;
824 u16 reserved_slowpath;
825#endif
826 u32 rcv_nxt;
827 u32 timestamp_recent;
828 u32 timestamp_recent_time;
829 u32 cwnd;
830 u32 ss_thresh;
831 u32 cwnd_accum;
832 u32 prev_seg_seq;
833 u32 expected_rel_seq;
834 u32 recover;
835#if defined(__BIG_ENDIAN)
836 u8 retransmit_count;
837 u8 ka_max_probe_count;
838 u8 persist_probe_count;
839 u8 ka_probe_count;
840#elif defined(__LITTLE_ENDIAN)
841 u8 ka_probe_count;
842 u8 persist_probe_count;
843 u8 ka_max_probe_count;
844 u8 retransmit_count;
845#endif
846#if defined(__BIG_ENDIAN)
847 u8 statistics_counter_id;
848 u8 ooo_support_mode;
849 u8 snd_wnd_scale_4b;
850 u8 dup_ack_count;
851#elif defined(__LITTLE_ENDIAN)
852 u8 dup_ack_count;
853 u8 snd_wnd_scale_4b;
854 u8 ooo_support_mode;
855 u8 statistics_counter_id;
856#endif
857 u32 retransmit_start_time;
858 u32 ka_timeout;
859 u32 ka_interval;
860 u32 isle_start_seq;
861 u32 isle_end_seq;
862#if defined(__BIG_ENDIAN)
863 u16 mss;
864 u16 recent_seg_wnd;
865#elif defined(__LITTLE_ENDIAN)
866 u16 recent_seg_wnd;
867 u16 mss;
868#endif
869 u32 reserved4;
870 u32 max_rt_time;
871#if defined(__BIG_ENDIAN)
872 u16 lsb_mac_address;
873 u16 vlan_id;
874#elif defined(__LITTLE_ENDIAN)
875 u16 vlan_id;
876 u16 lsb_mac_address;
877#endif
878 u32 msb_mac_address;
879 u32 reserved2;
880};
881
882/*
883 * Termination variables
884 */
885struct iscsi_term_vars {
886 u8 BitMap;
887#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
888#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
889#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
890#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
891#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
892#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
893#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
894#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
895#define ISCSI_TERM_VARS_RSRV (0x1<<7)
896#define ISCSI_TERM_VARS_RSRV_SHIFT 7
897};
898
899/*
900 * iSCSI context region, used only in iSCSI
901 */
902struct tstorm_iscsi_st_context_section {
903#if defined(__BIG_ENDIAN)
904 u16 rem_tcp_data_len;
905 u16 brb_offset;
906#elif defined(__LITTLE_ENDIAN)
907 u16 brb_offset;
908 u16 rem_tcp_data_len;
909#endif
910 u32 b2nh;
911#if defined(__BIG_ENDIAN)
912 u16 rq_cons;
913 u8 flags;
914#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
915#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
916#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
917#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
918#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
919#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
920#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
921#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
922#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
923#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
924#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
925#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
926 u8 hdr_bytes_2_fetch;
927#elif defined(__LITTLE_ENDIAN)
928 u8 hdr_bytes_2_fetch;
929 u8 flags;
930#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
931#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
932#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
933#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
934#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
935#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
936#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
937#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
938#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
939#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
940#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
941#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
942 u16 rq_cons;
943#endif
944 struct regpair rq_db_phy_addr;
945#if defined(__BIG_ENDIAN)
946 struct iscsi_term_vars term_vars;
947 u8 scratchpad_idx;
948 u16 iscsi_conn_id;
949#elif defined(__LITTLE_ENDIAN)
950 u16 iscsi_conn_id;
951 u8 scratchpad_idx;
952 struct iscsi_term_vars term_vars;
953#endif
954 u32 reserved2;
955};
956
957/*
958 * The iSCSI non-aggregative context of Tstorm
959 */
960struct tstorm_iscsi_st_context {
961 struct tstorm_tcp_st_context_section tcp;
962 struct tstorm_iscsi_st_context_section iscsi;
963};
964
965/*
966 * The tcp aggregative context section of Xstorm
967 */
968struct xstorm_tcp_tcp_ag_context_section {
969#if defined(__BIG_ENDIAN)
970 u8 __tcp_agg_vars1;
971 u8 __da_cnt;
972 u16 mss;
973#elif defined(__LITTLE_ENDIAN)
974 u16 mss;
975 u8 __da_cnt;
976 u8 __tcp_agg_vars1;
977#endif
978 u32 snd_nxt;
979 u32 tx_wnd;
980 u32 snd_una;
981 u32 local_adv_wnd;
982#if defined(__BIG_ENDIAN)
983 u8 __agg_val8_th;
984 u8 __agg_val8;
985 u16 tcp_agg_vars2;
986#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
987#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
988#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
989#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
990#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
991#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
992#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
993#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
994#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
995#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
996#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
997#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
998#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
999#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
1000#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
1001#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
1002#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
1003#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
1004#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
1005#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
1006#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
1007#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
1008#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
1009#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
1010#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
1011#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
1012#elif defined(__LITTLE_ENDIAN)
1013 u16 tcp_agg_vars2;
1014#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
1015#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
1016#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
1017#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
1018#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
1019#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
1020#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
1021#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
1022#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
1023#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
1024#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
1025#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
1026#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
1027#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
1028#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
1029#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
1030#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
1031#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
1032#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
1033#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
1034#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
1035#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
1036#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
1037#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
1038#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
1039#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
1040 u8 __agg_val8;
1041 u8 __agg_val8_th;
1042#endif
1043 u32 ack_to_far_end;
1044 u32 rto_timer;
1045 u32 ka_timer;
1046 u32 ts_to_echo;
1047#if defined(__BIG_ENDIAN)
1048 u16 __agg_val7_th;
1049 u16 __agg_val7;
1050#elif defined(__LITTLE_ENDIAN)
1051 u16 __agg_val7;
1052 u16 __agg_val7_th;
1053#endif
1054#if defined(__BIG_ENDIAN)
1055 u8 __tcp_agg_vars5;
1056 u8 __tcp_agg_vars4;
1057 u8 __tcp_agg_vars3;
1058 u8 __force_pure_ack_cnt;
1059#elif defined(__LITTLE_ENDIAN)
1060 u8 __force_pure_ack_cnt;
1061 u8 __tcp_agg_vars3;
1062 u8 __tcp_agg_vars4;
1063 u8 __tcp_agg_vars5;
1064#endif
1065 u32 tcp_agg_vars6;
1066#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
1067#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
1068#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN (0x1<<1)
1069#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN_SHIFT 1
1070#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
1071#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
1072#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
1073#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
1074#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
1075#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
1076#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
1077#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
1078#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
1079#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
1080#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
1081#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
1082#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
1083#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
1084#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
1085#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
1086#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
1087#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
1088#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
1089#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
1090#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
1091#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
1092#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
1093#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
1094#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
1095#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
1096#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
1097#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
1098#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
1099#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
1100#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
1101#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
1102#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
1103#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
1104#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
1105#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
1106#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
1107#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
1108#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
1109#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
1110#if defined(__BIG_ENDIAN)
1111 u16 __agg_misc6;
1112 u16 __tcp_agg_vars7;
1113#elif defined(__LITTLE_ENDIAN)
1114 u16 __tcp_agg_vars7;
1115 u16 __agg_misc6;
1116#endif
1117 u32 __agg_val10;
1118 u32 __agg_val10_th;
1119#if defined(__BIG_ENDIAN)
1120 u16 __reserved3;
1121 u8 __reserved2;
1122 u8 __da_only_cnt;
1123#elif defined(__LITTLE_ENDIAN)
1124 u8 __da_only_cnt;
1125 u8 __reserved2;
1126 u16 __reserved3;
1127#endif
1128};
1129
1130/*
1131 * The iscsi aggregative context of Xstorm
1132 */
1133struct xstorm_iscsi_ag_context {
1134#if defined(__BIG_ENDIAN)
1135 u16 agg_val1;
1136 u8 agg_vars1;
1137#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1138#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1139#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1140#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1141#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1142#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1143#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1144#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1145#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
1146#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
1147#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
1148#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
1149#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
1150#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
1151#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
1152#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
1153 u8 state;
1154#elif defined(__LITTLE_ENDIAN)
1155 u8 state;
1156 u8 agg_vars1;
1157#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1158#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1159#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1160#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1161#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1162#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1163#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1164#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1165#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
1166#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
1167#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
1168#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
1169#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
1170#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
1171#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
1172#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
1173 u16 agg_val1;
1174#endif
1175#if defined(__BIG_ENDIAN)
1176 u8 cdu_reserved;
1177 u8 agg_vars4;
1178#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1179#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1180#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1181#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1182#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1183#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1184#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1185#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1186#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1187#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1188#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1189#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1190 u8 agg_vars3;
1191#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1192#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1193#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
1194#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
1195 u8 agg_vars2;
1196#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
1197#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
1198#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
1199#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
1200#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
1201#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
1202#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
1203#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
1204#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
1205#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
1206#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1207#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1208#elif defined(__LITTLE_ENDIAN)
1209 u8 agg_vars2;
1210#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
1211#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
1212#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
1213#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
1214#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
1215#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
1216#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
1217#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
1218#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
1219#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
1220#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1221#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1222 u8 agg_vars3;
1223#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1224#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1225#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
1226#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
1227 u8 agg_vars4;
1228#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1229#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1230#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1231#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1232#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1233#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1234#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1235#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1236#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1237#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1238#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1239#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1240 u8 cdu_reserved;
1241#endif
1242 u32 more_to_send;
1243#if defined(__BIG_ENDIAN)
1244 u16 agg_vars5;
1245#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
1246#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
1247#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
1248#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
1249#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
1250#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
1251#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
1252#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
1253 u16 sq_cons;
1254#elif defined(__LITTLE_ENDIAN)
1255 u16 sq_cons;
1256 u16 agg_vars5;
1257#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
1258#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
1259#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
1260#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
1261#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
1262#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
1263#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
1264#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
1265#endif
1266 struct xstorm_tcp_tcp_ag_context_section tcp;
1267#if defined(__BIG_ENDIAN)
1268 u16 agg_vars7;
1269#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
1270#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1271#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1272#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1273#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
1274#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
1275#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1276#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1277#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
1278#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
1279#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
1280#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
1281#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
1282#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
1283#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
1284#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
1285#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
1286#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1287#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1288#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1289#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
1290#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
1291 u8 agg_val3_th;
1292 u8 agg_vars6;
1293#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
1294#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
1295#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
1296#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
1297#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
1298#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
1299#elif defined(__LITTLE_ENDIAN)
1300 u8 agg_vars6;
1301#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
1302#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
1303#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
1304#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
1305#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
1306#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
1307 u8 agg_val3_th;
1308 u16 agg_vars7;
1309#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
1310#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1311#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1312#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1313#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
1314#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
1315#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1316#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1317#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
1318#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
1319#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
1320#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
1321#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
1322#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
1323#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
1324#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
1325#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
1326#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1327#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1328#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1329#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
1330#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
1331#endif
1332#if defined(__BIG_ENDIAN)
1333 u16 __agg_val11_th;
1334 u16 __agg_val11;
1335#elif defined(__LITTLE_ENDIAN)
1336 u16 __agg_val11;
1337 u16 __agg_val11_th;
1338#endif
1339#if defined(__BIG_ENDIAN)
1340 u8 __reserved1;
1341 u8 __agg_val6_th;
1342 u16 __agg_val9;
1343#elif defined(__LITTLE_ENDIAN)
1344 u16 __agg_val9;
1345 u8 __agg_val6_th;
1346 u8 __reserved1;
1347#endif
1348#if defined(__BIG_ENDIAN)
1349 u16 hq_prod;
1350 u16 hq_cons;
1351#elif defined(__LITTLE_ENDIAN)
1352 u16 hq_cons;
1353 u16 hq_prod;
1354#endif
1355 u32 agg_vars8;
1356#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
1357#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
1358#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
1359#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
1360#if defined(__BIG_ENDIAN)
1361 u16 r2tq_prod;
1362 u16 sq_prod;
1363#elif defined(__LITTLE_ENDIAN)
1364 u16 sq_prod;
1365 u16 r2tq_prod;
1366#endif
1367#if defined(__BIG_ENDIAN)
1368 u8 agg_val3;
1369 u8 agg_val6;
1370 u8 agg_val5_th;
1371 u8 agg_val5;
1372#elif defined(__LITTLE_ENDIAN)
1373 u8 agg_val5;
1374 u8 agg_val5_th;
1375 u8 agg_val6;
1376 u8 agg_val3;
1377#endif
1378#if defined(__BIG_ENDIAN)
1379 u16 __agg_misc1;
1380 u16 agg_limit1;
1381#elif defined(__LITTLE_ENDIAN)
1382 u16 agg_limit1;
1383 u16 __agg_misc1;
1384#endif
1385 u32 hq_cons_tcp_seq;
1386 u32 exp_stat_sn;
1387 u32 agg_misc5;
1388};
1389
1390/*
1391 * The tcp aggregative context section of Tstorm
1392 */
1393struct tstorm_tcp_tcp_ag_context_section {
1394 u32 __agg_val1;
1395#if defined(__BIG_ENDIAN)
1396 u8 __tcp_agg_vars2;
1397 u8 __agg_val3;
1398 u16 __agg_val2;
1399#elif defined(__LITTLE_ENDIAN)
1400 u16 __agg_val2;
1401 u8 __agg_val3;
1402 u8 __tcp_agg_vars2;
1403#endif
1404#if defined(__BIG_ENDIAN)
1405 u16 __agg_val5;
1406 u8 __agg_val6;
1407 u8 __tcp_agg_vars3;
1408#elif defined(__LITTLE_ENDIAN)
1409 u8 __tcp_agg_vars3;
1410 u8 __agg_val6;
1411 u16 __agg_val5;
1412#endif
1413 u32 snd_nxt;
1414 u32 rtt_seq;
1415 u32 rtt_time;
1416 u32 __reserved66;
1417 u32 wnd_right_edge;
1418 u32 tcp_agg_vars1;
1419#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
1420#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
1421#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
1422#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
1423#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
1424#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
1425#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
1426#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
1427#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
1428#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
1429#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
1430#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
1431#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
1432#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
1433#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
1434#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
1435#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
1436#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
1437#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
1438#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
1439#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
1440#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
1441#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
1442#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
1443#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
1444#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
1445#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
1446#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
1447#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
1448#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
1449#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
1450#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
1451#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
1452#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
1453#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
1454#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
1455#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
1456#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
1457#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
1458#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
1459#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
1460#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
1461 u32 snd_max;
1462 u32 snd_una;
1463 u32 __reserved2;
1464};
1465
1466/*
1467 * The iscsi aggregative context of Tstorm
1468 */
1469struct tstorm_iscsi_ag_context {
1470#if defined(__BIG_ENDIAN)
1471 u16 ulp_credit;
1472 u8 agg_vars1;
1473#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1474#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1475#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1476#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1477#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1478#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1479#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1480#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1481#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
1482#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
1483#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1484#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1485#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
1486#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
1487 u8 state;
1488#elif defined(__LITTLE_ENDIAN)
1489 u8 state;
1490 u8 agg_vars1;
1491#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1492#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1493#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1494#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1495#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1496#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1497#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1498#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1499#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
1500#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
1501#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1502#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1503#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
1504#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
1505 u16 ulp_credit;
1506#endif
1507#if defined(__BIG_ENDIAN)
1508 u16 __agg_val4;
1509 u16 agg_vars2;
1510#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
1511#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
1512#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
1513#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
1514#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
1515#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
1516#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
1517#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
1518#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1519#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1520#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1521#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1522#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1523#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1524#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
1525#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
1526#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
1527#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
1528#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
1529#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
1530#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1531#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1532#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1533#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1534#elif defined(__LITTLE_ENDIAN)
1535 u16 agg_vars2;
1536#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
1537#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
1538#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
1539#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
1540#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
1541#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
1542#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
1543#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
1544#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1545#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1546#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1547#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1548#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1549#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1550#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
1551#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
1552#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
1553#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
1554#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
1555#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
1556#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1557#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1558#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1559#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1560 u16 __agg_val4;
1561#endif
1562 struct tstorm_tcp_tcp_ag_context_section tcp;
1563};
1564
1565/*
1566 * The iscsi aggregative context of Cstorm
1567 */
1568struct cstorm_iscsi_ag_context {
1569 u32 agg_vars1;
1570#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
1571#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
1572#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
1573#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
1574#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
1575#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
1576#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
1577#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
1578#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
1579#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
1580#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
1581#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
1582#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
1583#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
1584#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
1585#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
1586#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
1587#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
1588#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
1589#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
1590#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
1591#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
1592#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
1593#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
1594#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
1595#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
1596#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
1597#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
1598#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
1599#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
1600#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
1601#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
1602#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
1603#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
1604#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
1605#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
1606#if defined(__BIG_ENDIAN)
1607 u8 __aux1_th;
1608 u8 __aux1_val;
1609 u16 __agg_vars2;
1610#elif defined(__LITTLE_ENDIAN)
1611 u16 __agg_vars2;
1612 u8 __aux1_val;
1613 u8 __aux1_th;
1614#endif
1615 u32 rel_seq;
1616 u32 rel_seq_th;
1617#if defined(__BIG_ENDIAN)
1618 u16 hq_cons;
1619 u16 hq_prod;
1620#elif defined(__LITTLE_ENDIAN)
1621 u16 hq_prod;
1622 u16 hq_cons;
1623#endif
1624#if defined(__BIG_ENDIAN)
1625 u8 __reserved62;
1626 u8 __reserved61;
1627 u8 __reserved60;
1628 u8 __reserved59;
1629#elif defined(__LITTLE_ENDIAN)
1630 u8 __reserved59;
1631 u8 __reserved60;
1632 u8 __reserved61;
1633 u8 __reserved62;
1634#endif
1635#if defined(__BIG_ENDIAN)
1636 u16 __reserved64;
1637 u16 __cq_u_prod0;
1638#elif defined(__LITTLE_ENDIAN)
1639 u16 __cq_u_prod0;
1640 u16 __reserved64;
1641#endif
1642 u32 __cq_u_prod1;
1643#if defined(__BIG_ENDIAN)
1644 u16 __agg_vars3;
1645 u16 __cq_u_prod2;
1646#elif defined(__LITTLE_ENDIAN)
1647 u16 __cq_u_prod2;
1648 u16 __agg_vars3;
1649#endif
1650#if defined(__BIG_ENDIAN)
1651 u16 __aux2_th;
1652 u16 __cq_u_prod3;
1653#elif defined(__LITTLE_ENDIAN)
1654 u16 __cq_u_prod3;
1655 u16 __aux2_th;
1656#endif
1657};
1658
1659/*
1660 * The iscsi aggregative context of Ustorm
1661 */
1662struct ustorm_iscsi_ag_context {
1663#if defined(__BIG_ENDIAN)
1664 u8 __aux_counter_flags;
1665 u8 agg_vars2;
1666#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
1667#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
1668#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
1669#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
1670#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
1671#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
1672#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
1673#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
1674 u8 agg_vars1;
1675#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1676#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1677#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1678#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1679#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1680#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1681#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1682#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1683#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
1684#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
1685#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
1686#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
1687 u8 state;
1688#elif defined(__LITTLE_ENDIAN)
1689 u8 state;
1690 u8 agg_vars1;
1691#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1692#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1693#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1694#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1695#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1696#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1697#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1698#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1699#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
1700#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
1701#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
1702#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
1703 u8 agg_vars2;
1704#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
1705#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
1706#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
1707#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
1708#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
1709#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
1710#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
1711#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
1712 u8 __aux_counter_flags;
1713#endif
1714#if defined(__BIG_ENDIAN)
1715 u8 cdu_usage;
1716 u8 agg_misc2;
1717 u16 __cq_local_comp_itt_val;
1718#elif defined(__LITTLE_ENDIAN)
1719 u16 __cq_local_comp_itt_val;
1720 u8 agg_misc2;
1721 u8 cdu_usage;
1722#endif
1723 u32 agg_misc4;
1724#if defined(__BIG_ENDIAN)
1725 u8 agg_val3_th;
1726 u8 agg_val3;
1727 u16 agg_misc3;
1728#elif defined(__LITTLE_ENDIAN)
1729 u16 agg_misc3;
1730 u8 agg_val3;
1731 u8 agg_val3_th;
1732#endif
1733 u32 agg_val1;
1734 u32 agg_misc4_th;
1735#if defined(__BIG_ENDIAN)
1736 u16 agg_val2_th;
1737 u16 agg_val2;
1738#elif defined(__LITTLE_ENDIAN)
1739 u16 agg_val2;
1740 u16 agg_val2_th;
1741#endif
1742#if defined(__BIG_ENDIAN)
1743 u16 __reserved2;
1744 u8 decision_rules;
1745#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
1746#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1747#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1748#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1749#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1750#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1751#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1752#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1753 u8 decision_rule_enable_bits;
1754#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
1755#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
1756#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
1757#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
1758#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
1759#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
1760#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
1761#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
1762#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
1763#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
1764#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
1765#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
1766#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
1767#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
1768#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1769#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1770#elif defined(__LITTLE_ENDIAN)
1771 u8 decision_rule_enable_bits;
1772#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
1773#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
1774#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
1775#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
1776#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
1777#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
1778#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
1779#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
1780#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
1781#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
1782#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
1783#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
1784#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
1785#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
1786#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1787#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1788 u8 decision_rules;
1789#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
1790#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1791#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1792#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1793#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1794#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1795#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1796#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1797 u16 __reserved2;
1798#endif
1799};
1800
1801/*
1802 * Timers connection context
1803 */
1804struct iscsi_timers_block_context {
1805 u32 __reserved_0;
1806 u32 __reserved_1;
1807 u32 __reserved_2;
1808 u32 flags;
1809#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
1810#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
1811#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
1812#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
1813#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
1814#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
1815};
1816
1817/*
1818 * Ethernet context section, shared in TOE, RDMA and ISCSI
1819 */
1820struct xstorm_eth_context_section {
1821#if defined(__BIG_ENDIAN)
1822 u8 remote_addr_4;
1823 u8 remote_addr_5;
1824 u8 local_addr_0;
1825 u8 local_addr_1;
1826#elif defined(__LITTLE_ENDIAN)
1827 u8 local_addr_1;
1828 u8 local_addr_0;
1829 u8 remote_addr_5;
1830 u8 remote_addr_4;
1831#endif
1832#if defined(__BIG_ENDIAN)
1833 u8 remote_addr_0;
1834 u8 remote_addr_1;
1835 u8 remote_addr_2;
1836 u8 remote_addr_3;
1837#elif defined(__LITTLE_ENDIAN)
1838 u8 remote_addr_3;
1839 u8 remote_addr_2;
1840 u8 remote_addr_1;
1841 u8 remote_addr_0;
1842#endif
1843#if defined(__BIG_ENDIAN)
1844 u16 reserved_vlan_type;
1845 u16 params;
1846#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
1847#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
1848#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
1849#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
1850#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
1851#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
1852#elif defined(__LITTLE_ENDIAN)
1853 u16 params;
1854#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
1855#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
1856#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
1857#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
1858#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
1859#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
1860 u16 reserved_vlan_type;
1861#endif
1862#if defined(__BIG_ENDIAN)
1863 u8 local_addr_2;
1864 u8 local_addr_3;
1865 u8 local_addr_4;
1866 u8 local_addr_5;
1867#elif defined(__LITTLE_ENDIAN)
1868 u8 local_addr_5;
1869 u8 local_addr_4;
1870 u8 local_addr_3;
1871 u8 local_addr_2;
1872#endif
1873};
1874
1875/*
1876 * IpV4 context section, shared in TOE, RDMA and ISCSI
1877 */
1878struct xstorm_ip_v4_context_section {
1879#if defined(__BIG_ENDIAN)
1880 u16 __pbf_hdr_cmd_rsvd_id;
1881 u16 __pbf_hdr_cmd_rsvd_flags_offset;
1882#elif defined(__LITTLE_ENDIAN)
1883 u16 __pbf_hdr_cmd_rsvd_flags_offset;
1884 u16 __pbf_hdr_cmd_rsvd_id;
1885#endif
1886#if defined(__BIG_ENDIAN)
1887 u8 __pbf_hdr_cmd_rsvd_ver_ihl;
1888 u8 tos;
1889 u16 __pbf_hdr_cmd_rsvd_length;
1890#elif defined(__LITTLE_ENDIAN)
1891 u16 __pbf_hdr_cmd_rsvd_length;
1892 u8 tos;
1893 u8 __pbf_hdr_cmd_rsvd_ver_ihl;
1894#endif
1895 u32 ip_local_addr;
1896#if defined(__BIG_ENDIAN)
1897 u8 ttl;
1898 u8 __pbf_hdr_cmd_rsvd_protocol;
1899 u16 __pbf_hdr_cmd_rsvd_csum;
1900#elif defined(__LITTLE_ENDIAN)
1901 u16 __pbf_hdr_cmd_rsvd_csum;
1902 u8 __pbf_hdr_cmd_rsvd_protocol;
1903 u8 ttl;
1904#endif
1905 u32 __pbf_hdr_cmd_rsvd_1;
1906 u32 ip_remote_addr;
1907};
1908
1909/*
1910 * context section, shared in TOE, RDMA and ISCSI
1911 */
1912struct xstorm_padded_ip_v4_context_section {
1913 struct xstorm_ip_v4_context_section ip_v4;
1914 u32 reserved1[4];
1915};
1916
1917/*
1918 * IpV6 context section, shared in TOE, RDMA and ISCSI
1919 */
1920struct xstorm_ip_v6_context_section {
1921#if defined(__BIG_ENDIAN)
1922 u16 pbf_hdr_cmd_rsvd_payload_len;
1923 u8 pbf_hdr_cmd_rsvd_nxt_hdr;
1924 u8 hop_limit;
1925#elif defined(__LITTLE_ENDIAN)
1926 u8 hop_limit;
1927 u8 pbf_hdr_cmd_rsvd_nxt_hdr;
1928 u16 pbf_hdr_cmd_rsvd_payload_len;
1929#endif
1930 u32 priority_flow_label;
1931#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0)
1932#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0
1933#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20)
1934#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20
1935#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28)
1936#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28
1937 u32 ip_local_addr_lo_hi;
1938 u32 ip_local_addr_lo_lo;
1939 u32 ip_local_addr_hi_hi;
1940 u32 ip_local_addr_hi_lo;
1941 u32 ip_remote_addr_lo_hi;
1942 u32 ip_remote_addr_lo_lo;
1943 u32 ip_remote_addr_hi_hi;
1944 u32 ip_remote_addr_hi_lo;
1945};
1946
1947union xstorm_ip_context_section_types {
1948 struct xstorm_padded_ip_v4_context_section padded_ip_v4;
1949 struct xstorm_ip_v6_context_section ip_v6;
1950};
1951
1952/*
1953 * TCP context section, shared in TOE, RDMA and ISCSI
1954 */
1955struct xstorm_tcp_context_section {
1956 u32 snd_max;
1957#if defined(__BIG_ENDIAN)
1958 u16 remote_port;
1959 u16 local_port;
1960#elif defined(__LITTLE_ENDIAN)
1961 u16 local_port;
1962 u16 remote_port;
1963#endif
1964#if defined(__BIG_ENDIAN)
1965 u8 original_nagle_1b;
1966 u8 ts_enabled_1b;
1967 u16 tcp_params;
1968#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
1969#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
1970#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
1971#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
1972#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
1973#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1974#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1975#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1976#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
1977#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
1978#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1979#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1980#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
1981#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
1982#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
1983#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
1984#elif defined(__LITTLE_ENDIAN)
1985 u16 tcp_params;
1986#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
1987#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
1988#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
1989#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
1990#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
1991#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1992#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1993#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1994#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
1995#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
1996#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1997#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1998#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
1999#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
2000#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
2001#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
2002 u8 ts_enabled_1b;
2003 u8 original_nagle_1b;
2004#endif
2005#if defined(__BIG_ENDIAN)
2006 u16 pseudo_csum;
2007 u16 window_scaling_factor;
2008#elif defined(__LITTLE_ENDIAN)
2009 u16 window_scaling_factor;
2010 u16 pseudo_csum;
2011#endif
2012 u32 reserved2;
2013 u32 ts_time_diff;
2014 u32 __next_timer_expir;
2015};
2016
2017/*
2018 * Common context section, shared in TOE, RDMA and ISCSI
2019 */
2020struct xstorm_common_context_section {
2021 struct xstorm_eth_context_section ethernet;
2022 union xstorm_ip_context_section_types ip_union;
2023 struct xstorm_tcp_context_section tcp;
2024#if defined(__BIG_ENDIAN)
2025 u16 reserved;
2026 u8 statistics_params;
2027#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
2028#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
2029#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
2030#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2031#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2032#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2033#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
2034#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
2035 u8 ip_version_1b;
2036#elif defined(__LITTLE_ENDIAN)
2037 u8 ip_version_1b;
2038 u8 statistics_params;
2039#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
2040#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
2041#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
2042#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2043#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2044#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2045#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
2046#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
2047 u16 reserved;
2048#endif
2049};
2050
2051/*
2052 * Flags used in ISCSI context section
2053 */
2054struct xstorm_iscsi_context_flags {
2055 u8 flags;
2056#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0)
2057#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0
2058#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1)
2059#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1
2060#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2)
2061#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2
2062#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3)
2063#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3
2064#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4)
2065#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4
2066#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5)
2067#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5
2068#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6)
2069#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6
2070#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7)
2071#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7
2072};
2073
2074struct iscsi_task_context_entry_x {
2075 u32 data_out_buffer_offset;
2076 u32 itt;
2077 u32 data_sn;
2078};
2079
2080struct iscsi_task_context_entry_xuc_x_write_only {
2081 u32 tx_r2t_sn;
2082};
2083
2084struct iscsi_task_context_entry_xuc_xu_write_both {
2085 u32 sgl_base_lo;
2086 u32 sgl_base_hi;
2087#if defined(__BIG_ENDIAN)
2088 u8 sgl_size;
2089 u8 sge_index;
2090 u16 sge_offset;
2091#elif defined(__LITTLE_ENDIAN)
2092 u16 sge_offset;
2093 u8 sge_index;
2094 u8 sgl_size;
2095#endif
2096};
2097
2098/*
2099 * iSCSI context section
2100 */
2101struct xstorm_iscsi_context_section {
2102 u32 first_burst_length;
2103 u32 max_send_pdu_length;
2104 struct regpair sq_pbl_base;
2105 struct regpair sq_curr_pbe;
2106 struct regpair hq_pbl_base;
2107 struct regpair hq_curr_pbe_base;
2108 struct regpair r2tq_pbl_base;
2109 struct regpair r2tq_curr_pbe_base;
2110 struct regpair task_pbl_base;
2111#if defined(__BIG_ENDIAN)
2112 u16 data_out_count;
2113 struct xstorm_iscsi_context_flags flags;
2114 u8 task_pbl_cache_idx;
2115#elif defined(__LITTLE_ENDIAN)
2116 u8 task_pbl_cache_idx;
2117 struct xstorm_iscsi_context_flags flags;
2118 u16 data_out_count;
2119#endif
2120 u32 seq_more_2_send;
2121 u32 pdu_more_2_send;
2122 struct iscsi_task_context_entry_x temp_tce_x;
2123 struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr;
2124 struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr;
2125 struct regpair lun;
2126 u32 exp_data_transfer_len_ttt;
2127 u32 pdu_data_2_rxmit;
2128 u32 rxmit_bytes_2_dr;
2129#if defined(__BIG_ENDIAN)
2130 u16 rxmit_sge_offset;
2131 u16 hq_rxmit_cons;
2132#elif defined(__LITTLE_ENDIAN)
2133 u16 hq_rxmit_cons;
2134 u16 rxmit_sge_offset;
2135#endif
2136#if defined(__BIG_ENDIAN)
2137 u16 r2tq_cons;
2138 u8 rxmit_flags;
2139#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
2140#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
2141#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
2142#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
2143#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
2144#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
2145#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
2146#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
2147#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
2148#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
2149#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
2150#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
2151#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
2152#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
2153 u8 rxmit_sge_idx;
2154#elif defined(__LITTLE_ENDIAN)
2155 u8 rxmit_sge_idx;
2156 u8 rxmit_flags;
2157#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
2158#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
2159#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
2160#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
2161#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
2162#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
2163#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
2164#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
2165#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
2166#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
2167#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
2168#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
2169#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
2170#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
2171 u16 r2tq_cons;
2172#endif
2173 u32 hq_rxmit_tcp_seq;
2174};
2175
2176/*
2177 * Xstorm iSCSI Storm Context
2178 */
2179struct xstorm_iscsi_st_context {
2180 struct xstorm_common_context_section common;
2181 struct xstorm_iscsi_context_section iscsi;
2182};
2183
2184/*
2185 * CQ DB CQ producer and pending completion counter
2186 */
2187struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
2188#if defined(__BIG_ENDIAN)
2189 u16 cntr;
2190 u16 prod;
2191#elif defined(__LITTLE_ENDIAN)
2192 u16 prod;
2193 u16 cntr;
2194#endif
2195};
2196
2197/*
2198 * CQ DB pending completion ITT array
2199 */
2200struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
2201 struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
2202};
2203
2204/*
2205 * Cstorm CQ sequence to notify array, updated by driver
2206 */
2207struct iscsi_cq_db_sqn_2_notify_arr {
2208 u16 sqn[8];
2209};
2210
2211/*
2212 * Cstorm iSCSI Storm Context
2213 */
2214struct cstorm_iscsi_st_context {
2215 struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
2216 struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
2217 struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
2218 struct regpair hq_pbl_base;
2219 struct regpair hq_curr_pbe;
2220 struct regpair task_pbl_base;
2221 struct regpair cq_db_base;
2222#if defined(__BIG_ENDIAN)
2223 u16 hq_bd_itt;
2224 u16 iscsi_conn_id;
2225#elif defined(__LITTLE_ENDIAN)
2226 u16 iscsi_conn_id;
2227 u16 hq_bd_itt;
2228#endif
2229 u32 hq_bd_data_segment_len;
2230 u32 hq_bd_buffer_offset;
2231#if defined(__BIG_ENDIAN)
2232 u8 timer_entry_idx;
2233 u8 cq_proc_en_bit_map;
2234 u8 cq_pend_comp_itt_valid_bit_map;
2235 u8 hq_bd_opcode;
2236#elif defined(__LITTLE_ENDIAN)
2237 u8 hq_bd_opcode;
2238 u8 cq_pend_comp_itt_valid_bit_map;
2239 u8 cq_proc_en_bit_map;
2240 u8 timer_entry_idx;
2241#endif
2242 u32 hq_tcp_seq;
2243#if defined(__BIG_ENDIAN)
2244 u16 flags;
2245#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
2246#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
2247#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
2248#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
2249#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
2250#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
2251#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
2252#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
2253#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
2254#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
2255#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
2256#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
2257 u16 hq_cons;
2258#elif defined(__LITTLE_ENDIAN)
2259 u16 hq_cons;
2260 u16 flags;
2261#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
2262#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
2263#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
2264#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
2265#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
2266#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
2267#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
2268#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
2269#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
2270#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
2271#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
2272#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
2273#endif
2274 struct regpair rsrv1;
2275};
2276
2277/*
2278 * Iscsi connection context
2279 */
2280struct iscsi_context {
2281 struct ustorm_iscsi_st_context ustorm_st_context;
2282 struct tstorm_iscsi_st_context tstorm_st_context;
2283 struct xstorm_iscsi_ag_context xstorm_ag_context;
2284 struct tstorm_iscsi_ag_context tstorm_ag_context;
2285 struct cstorm_iscsi_ag_context cstorm_ag_context;
2286 struct ustorm_iscsi_ag_context ustorm_ag_context;
2287 struct iscsi_timers_block_context timers_context;
2288 struct regpair upb_context;
2289 struct xstorm_iscsi_st_context xstorm_st_context;
2290 struct regpair xpb_context;
2291 struct cstorm_iscsi_st_context cstorm_st_context;
2292};
2293
2294/*
2295 * Buffer per connection, used in Tstorm
2296 */
2297struct iscsi_conn_buf {
2298 struct regpair reserved[8];
2299};
2300
2301/*
2302 * ipv6 structure
2303 */
2304struct ip_v6_addr {
2305 u32 ip_addr_lo_lo;
2306 u32 ip_addr_lo_hi;
2307 u32 ip_addr_hi_lo;
2308 u32 ip_addr_hi_hi;
2309};
2310
2311/*
2312 * l5cm- connection identification params
2313 */
2314struct l5cm_conn_addr_params {
2315 u32 pmtu;
2316#if defined(__BIG_ENDIAN)
2317 u8 remote_addr_3;
2318 u8 remote_addr_2;
2319 u8 remote_addr_1;
2320 u8 remote_addr_0;
2321#elif defined(__LITTLE_ENDIAN)
2322 u8 remote_addr_0;
2323 u8 remote_addr_1;
2324 u8 remote_addr_2;
2325 u8 remote_addr_3;
2326#endif
2327#if defined(__BIG_ENDIAN)
2328 u16 params;
2329#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
2330#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
2331#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
2332#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
2333 u8 remote_addr_5;
2334 u8 remote_addr_4;
2335#elif defined(__LITTLE_ENDIAN)
2336 u8 remote_addr_4;
2337 u8 remote_addr_5;
2338 u16 params;
2339#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
2340#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
2341#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
2342#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
2343#endif
2344 struct ip_v6_addr local_ip_addr;
2345 struct ip_v6_addr remote_ip_addr;
2346 u32 ipv6_flow_label_20b;
2347 u32 reserved1;
2348#if defined(__BIG_ENDIAN)
2349 u16 remote_tcp_port;
2350 u16 local_tcp_port;
2351#elif defined(__LITTLE_ENDIAN)
2352 u16 local_tcp_port;
2353 u16 remote_tcp_port;
2354#endif
2355};
2356
2357/*
2358 * l5cm-xstorm connection buffer
2359 */
2360struct l5cm_xstorm_conn_buffer {
2361#if defined(__BIG_ENDIAN)
2362 u16 rsrv1;
2363 u16 params;
2364#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
2365#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
2366#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2367#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
2368#elif defined(__LITTLE_ENDIAN)
2369 u16 params;
2370#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
2371#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
2372#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2373#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
2374 u16 rsrv1;
2375#endif
2376#if defined(__BIG_ENDIAN)
2377 u16 mss;
2378 u16 pseudo_header_checksum;
2379#elif defined(__LITTLE_ENDIAN)
2380 u16 pseudo_header_checksum;
2381 u16 mss;
2382#endif
2383 u32 rcv_buf;
2384 u32 rsrv2;
2385 struct regpair context_addr;
2386};
2387
2388/*
2389 * l5cm-tstorm connection buffer
2390 */
2391struct l5cm_tstorm_conn_buffer {
2392 u32 snd_buf;
2393 u32 rcv_buf;
2394#if defined(__BIG_ENDIAN)
2395 u16 params;
2396#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
2397#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
2398#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2399#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
2400 u8 ka_max_probe_count;
2401 u8 ka_enable;
2402#elif defined(__LITTLE_ENDIAN)
2403 u8 ka_enable;
2404 u8 ka_max_probe_count;
2405 u16 params;
2406#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
2407#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
2408#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2409#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
2410#endif
2411 u32 ka_timeout;
2412 u32 ka_interval;
2413 u32 max_rt_time;
2414};
2415
2416/*
2417 * l5cm connection buffer for active side
2418 */
2419struct l5cm_active_conn_buffer {
2420 struct l5cm_conn_addr_params conn_addr_buf;
2421 struct l5cm_xstorm_conn_buffer xstorm_conn_buffer;
2422 struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
2423};
2424
2425/*
2426 * l5cm slow path element
2427 */
2428struct l5cm_packet_size {
2429 u32 size;
2430 u32 rsrv;
2431};
2432
2433/*
2434 * l5cm connection parameters
2435 */
2436union l5cm_reduce_param_union {
2437 u32 passive_side_scramble_key;
2438 u32 pcs_id;
2439};
2440
2441/*
2442 * l5cm connection parameters
2443 */
2444struct l5cm_reduce_conn {
2445 union l5cm_reduce_param_union param;
2446 u32 isn;
2447};
2448
2449/*
2450 * l5cm slow path element
2451 */
2452union l5cm_specific_data {
2453 u8 protocol_data[8];
2454 struct regpair phy_address;
2455 struct l5cm_packet_size packet_size;
2456 struct l5cm_reduce_conn reduced_conn;
2457};
2458
2459/*
2460 * l5 slow path element
2461 */
2462struct l5cm_spe {
2463 struct spe_hdr hdr;
2464 union l5cm_specific_data data;
2465};
2466
2467/*
2468 * Tstorm Tcp flags
2469 */
2470struct tstorm_l5cm_tcp_flags {
2471 u16 flags;
2472#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
2473#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
2474#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
2475#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
2476#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
2477#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
2478#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
2479#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
2480};
2481
2482/*
2483 * Xstorm Tcp flags
2484 */
2485struct xstorm_l5cm_tcp_flags {
2486 u8 flags;
2487#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0)
2488#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0
2489#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1)
2490#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1
2491#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2)
2492#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2
2493#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3)
2494#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
2495};
2496
580#endif /* CNIC_DEFS_H */ 2497#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d8b09efdcb52..8aaf98bdd4f7 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.1" 15#define CNIC_MODULE_VERSION "2.1.0"
16#define CNIC_MODULE_RELDATE "Oct 01, 2009" 16#define CNIC_MODULE_RELDATE "Oct 10, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -81,6 +81,8 @@ struct kcqe {
81#define DRV_CTL_CTX_WR_CMD 0x103 81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104 82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105 83#define DRV_CTL_COMPLETION_CMD 0x105
84#define DRV_CTL_START_L2_CMD 0x106
85#define DRV_CTL_STOP_L2_CMD 0x107
84 86
85struct cnic_ctl_completion { 87struct cnic_ctl_completion {
86 u32 cid; 88 u32 cid;
@@ -105,11 +107,17 @@ struct drv_ctl_io {
105 dma_addr_t dma_addr; 107 dma_addr_t dma_addr;
106}; 108};
107 109
110struct drv_ctl_l2_ring {
111 u32 client_id;
112 u32 cid;
113};
114
108struct drv_ctl_info { 115struct drv_ctl_info {
109 int cmd; 116 int cmd;
110 union { 117 union {
111 struct drv_ctl_completion comp; 118 struct drv_ctl_completion comp;
112 struct drv_ctl_io io; 119 struct drv_ctl_io io;
120 struct drv_ctl_l2_ring ring;
113 char bytes[MAX_DRV_CTL_DATA]; 121 char bytes[MAX_DRV_CTL_DATA];
114 } data; 122 } data;
115}; 123};
@@ -143,6 +151,7 @@ struct cnic_eth_dev {
143 u32 max_kwqe_pending; 151 u32 max_kwqe_pending;
144 struct pci_dev *pdev; 152 struct pci_dev *pdev;
145 void __iomem *io_base; 153 void __iomem *io_base;
154 void __iomem *io_base2;
146 155
147 u32 ctx_tbl_offset; 156 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len; 157 u32 ctx_tbl_len;
@@ -298,5 +307,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
298extern int cnic_unregister_driver(int ulp_type); 307extern int cnic_unregister_driver(int ulp_type);
299 308
300extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); 309extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
310extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
301 311
302#endif 312#endif
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 61f9da2b4943..678222389407 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -380,9 +380,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
380 return NULL; 380 return NULL;
381 } 381 }
382 382
383 skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); 383 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
384 if (likely(skb)) { 384 if (likely(skb)) {
385 skb_reserve(skb, 2);
386 skb_put(desc->skb, desc->datalen); 385 skb_put(desc->skb, desc->datalen);
387 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
388 desc->skb->ip_summed = CHECKSUM_NONE; 387 desc->skb->ip_summed = CHECKSUM_NONE;
@@ -991,12 +990,11 @@ static int cpmac_open(struct net_device *dev)
991 990
992 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 991 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
993 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { 992 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
994 skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); 993 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
995 if (unlikely(!skb)) { 994 if (unlikely(!skb)) {
996 res = -ENOMEM; 995 res = -ENOMEM;
997 goto fail_desc; 996 goto fail_desc;
998 } 997 }
999 skb_reserve(skb, 2);
1000 desc->skb = skb; 998 desc->skb = skb;
1001 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 999 desc->data_mapping = dma_map_single(&dev->dev, skb->data,
1002 CPMAC_SKB_SIZE, 1000 CPMAC_SKB_SIZE,
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 0c54219960e2..af9321617ce4 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1323,7 +1323,7 @@ net_open(struct net_device *dev)
1323 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON); 1323 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
1324#endif 1324#endif
1325 write_irq(dev, lp->chip_type, dev->irq); 1325 write_irq(dev, lp->chip_type, dev->irq);
1326 ret = request_irq(dev->irq, &net_interrupt, 0, dev->name, dev); 1326 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
1327 if (ret) { 1327 if (ret) {
1328 if (net_debug) 1328 if (net_debug)
1329 printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq); 1329 printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 2b1aea6aa558..3e8618b4efbc 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -48,12 +48,27 @@
48struct vlan_group; 48struct vlan_group;
49struct adapter; 49struct adapter;
50struct sge_qset; 50struct sge_qset;
51struct port_info;
51 52
52enum { /* rx_offload flags */ 53enum { /* rx_offload flags */
53 T3_RX_CSUM = 1 << 0, 54 T3_RX_CSUM = 1 << 0,
54 T3_LRO = 1 << 1, 55 T3_LRO = 1 << 1,
55}; 56};
56 57
58enum mac_idx_types {
59 LAN_MAC_IDX = 0,
60 SAN_MAC_IDX,
61
62 MAX_MAC_IDX
63};
64
65struct iscsi_config {
66 __u8 mac_addr[ETH_ALEN];
67 __u32 flags;
68 int (*send)(struct port_info *pi, struct sk_buff **skb);
69 int (*recv)(struct port_info *pi, struct sk_buff *skb);
70};
71
57struct port_info { 72struct port_info {
58 struct adapter *adapter; 73 struct adapter *adapter;
59 struct vlan_group *vlan_grp; 74 struct vlan_group *vlan_grp;
@@ -68,6 +83,7 @@ struct port_info {
68 struct net_device_stats netstats; 83 struct net_device_stats netstats;
69 int activity; 84 int activity;
70 __be32 iscsi_ipv4addr; 85 __be32 iscsi_ipv4addr;
86 struct iscsi_config iscsic;
71 87
72 int link_fault; /* link fault was detected */ 88 int link_fault; /* link fault was detected */
73}; 89};
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 1b2c305fb82b..6ff356d4c7ab 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -125,11 +125,9 @@ enum { /* adapter interrupt-maintained statistics */
125 IRQ_NUM_STATS /* keep last */ 125 IRQ_NUM_STATS /* keep last */
126}; 126};
127 127
128enum { 128#define TP_VERSION_MAJOR 1
129 TP_VERSION_MAJOR = 1, 129#define TP_VERSION_MINOR 1
130 TP_VERSION_MINOR = 1, 130#define TP_VERSION_MICRO 0
131 TP_VERSION_MICRO = 0
132};
133 131
134#define S_TP_VERSION_MAJOR 16 132#define S_TP_VERSION_MAJOR 16
135#define M_TP_VERSION_MAJOR 0xFF 133#define M_TP_VERSION_MAJOR 0xFF
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 34e776c5f06b..b1a5a00a78cd 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -44,6 +44,7 @@
44#include <linux/rtnetlink.h> 44#include <linux/rtnetlink.h>
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
49#include "common.h" 50#include "common.h"
@@ -344,8 +345,10 @@ static void link_start(struct net_device *dev)
344 345
345 init_rx_mode(&rm, dev, dev->mc_list); 346 init_rx_mode(&rm, dev, dev->mc_list);
346 t3_mac_reset(mac); 347 t3_mac_reset(mac);
348 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
347 t3_mac_set_mtu(mac, dev->mtu); 349 t3_mac_set_mtu(mac, dev->mtu);
348 t3_mac_set_address(mac, 0, dev->dev_addr); 350 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
351 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
349 t3_mac_set_rx_mode(mac, &rm); 352 t3_mac_set_rx_mode(mac, &rm);
350 t3_link_start(&pi->phy, mac, &pi->link_config); 353 t3_link_start(&pi->phy, mac, &pi->link_config);
351 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 354 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
@@ -903,6 +906,7 @@ static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
903static int write_smt_entry(struct adapter *adapter, int idx) 906static int write_smt_entry(struct adapter *adapter, int idx)
904{ 907{
905 struct cpl_smt_write_req *req; 908 struct cpl_smt_write_req *req;
909 struct port_info *pi = netdev_priv(adapter->port[idx]);
906 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); 910 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
907 911
908 if (!skb) 912 if (!skb)
@@ -913,8 +917,8 @@ static int write_smt_entry(struct adapter *adapter, int idx)
913 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); 917 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ 918 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
915 req->iff = idx; 919 req->iff = idx;
916 memset(req->src_mac1, 0, sizeof(req->src_mac1));
917 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); 920 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
921 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
918 skb->priority = 1; 922 skb->priority = 1;
919 offload_tx(&adapter->tdev, skb); 923 offload_tx(&adapter->tdev, skb);
920 return 0; 924 return 0;
@@ -989,11 +993,21 @@ static int bind_qsets(struct adapter *adap)
989 return err; 993 return err;
990} 994}
991 995
992#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin" 996#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
993#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin" 997 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
998#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
999#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
1000 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1001#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
994#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin" 1002#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
995#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin" 1003#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
996#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin" 1004#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1005MODULE_FIRMWARE(FW_FNAME);
1006MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1007MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1008MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1009MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1010MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
997 1011
998static inline const char *get_edc_fw_name(int edc_idx) 1012static inline const char *get_edc_fw_name(int edc_idx)
999{ 1013{
@@ -1064,16 +1078,13 @@ int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1064static int upgrade_fw(struct adapter *adap) 1078static int upgrade_fw(struct adapter *adap)
1065{ 1079{
1066 int ret; 1080 int ret;
1067 char buf[64];
1068 const struct firmware *fw; 1081 const struct firmware *fw;
1069 struct device *dev = &adap->pdev->dev; 1082 struct device *dev = &adap->pdev->dev;
1070 1083
1071 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR, 1084 ret = request_firmware(&fw, FW_FNAME, dev);
1072 FW_VERSION_MINOR, FW_VERSION_MICRO);
1073 ret = request_firmware(&fw, buf, dev);
1074 if (ret < 0) { 1085 if (ret < 0) {
1075 dev_err(dev, "could not upgrade firmware: unable to load %s\n", 1086 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1076 buf); 1087 FW_FNAME);
1077 return ret; 1088 return ret;
1078 } 1089 }
1079 ret = t3_load_fw(adap, fw->data, fw->size); 1090 ret = t3_load_fw(adap, fw->data, fw->size);
@@ -1117,8 +1128,7 @@ static int update_tpsram(struct adapter *adap)
1117 if (!rev) 1128 if (!rev)
1118 return 0; 1129 return 0;
1119 1130
1120 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev, 1131 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1121 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1122 1132
1123 ret = request_firmware(&tpsram, buf, dev); 1133 ret = request_firmware(&tpsram, buf, dev);
1124 if (ret < 0) { 1134 if (ret < 0) {
@@ -2516,7 +2526,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2516 return -EINVAL; 2526 return -EINVAL;
2517 2527
2518 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2528 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2519 t3_mac_set_address(&pi->mac, 0, dev->dev_addr); 2529 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2520 if (offload_running(adapter)) 2530 if (offload_running(adapter))
2521 write_smt_entry(adapter, pi->port_id); 2531 write_smt_entry(adapter, pi->port_id);
2522 return 0; 2532 return 0;
@@ -2654,7 +2664,7 @@ static void check_t3b2_mac(struct adapter *adapter)
2654 struct cmac *mac = &p->mac; 2664 struct cmac *mac = &p->mac;
2655 2665
2656 t3_mac_set_mtu(mac, dev->mtu); 2666 t3_mac_set_mtu(mac, dev->mtu);
2657 t3_mac_set_address(mac, 0, dev->dev_addr); 2667 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2658 cxgb_set_rxmode(dev); 2668 cxgb_set_rxmode(dev);
2659 t3_link_start(&p->phy, mac, &p->link_config); 2669 t3_link_start(&p->phy, mac, &p->link_config);
2660 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2670 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
@@ -3112,6 +3122,14 @@ static const struct net_device_ops cxgb_netdev_ops = {
3112#endif 3122#endif
3113}; 3123};
3114 3124
3125static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3126{
3127 struct port_info *pi = netdev_priv(dev);
3128
3129 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3130 pi->iscsic.mac_addr[3] |= 0x80;
3131}
3132
3115static int __devinit init_one(struct pci_dev *pdev, 3133static int __devinit init_one(struct pci_dev *pdev,
3116 const struct pci_device_id *ent) 3134 const struct pci_device_id *ent)
3117{ 3135{
@@ -3270,6 +3288,9 @@ static int __devinit init_one(struct pci_dev *pdev,
3270 goto out_free_dev; 3288 goto out_free_dev;
3271 } 3289 }
3272 3290
3291 for_each_port(adapter, i)
3292 cxgb3_init_iscsi_mac(adapter->port[i]);
3293
3273 /* Driver's ready. Reflect it on LEDs */ 3294 /* Driver's ready. Reflect it on LEDs */
3274 t3_led_ready(adapter); 3295 t3_led_ready(adapter);
3275 3296
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 6366061712f4..49f3de79118c 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1260,7 +1260,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1260 if (should_restart_tx(q) && 1260 if (should_restart_tx(q) &&
1261 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1261 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1262 q->restarts++; 1262 q->restarts++;
1263 netif_tx_wake_queue(txq); 1263 netif_tx_start_queue(txq);
1264 } 1264 }
1265 } 1265 }
1266 1266
@@ -1946,10 +1946,9 @@ static void restart_tx(struct sge_qset *qs)
1946 * Check if the ARP request is probing the private IP address 1946 * Check if the ARP request is probing the private IP address
1947 * dedicated to iSCSI, generate an ARP reply if so. 1947 * dedicated to iSCSI, generate an ARP reply if so.
1948 */ 1948 */
1949static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb) 1949static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
1950{ 1950{
1951 struct net_device *dev = skb->dev; 1951 struct net_device *dev = skb->dev;
1952 struct port_info *pi;
1953 struct arphdr *arp; 1952 struct arphdr *arp;
1954 unsigned char *arp_ptr; 1953 unsigned char *arp_ptr;
1955 unsigned char *sha; 1954 unsigned char *sha;
@@ -1972,12 +1971,11 @@ static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1972 arp_ptr += dev->addr_len; 1971 arp_ptr += dev->addr_len;
1973 memcpy(&tip, arp_ptr, sizeof(tip)); 1972 memcpy(&tip, arp_ptr, sizeof(tip));
1974 1973
1975 pi = netdev_priv(dev);
1976 if (tip != pi->iscsi_ipv4addr) 1974 if (tip != pi->iscsi_ipv4addr)
1977 return; 1975 return;
1978 1976
1979 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1977 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1980 dev->dev_addr, sha); 1978 pi->iscsic.mac_addr, sha);
1981 1979
1982} 1980}
1983 1981
@@ -1986,6 +1984,19 @@ static inline int is_arp(struct sk_buff *skb)
1986 return skb->protocol == htons(ETH_P_ARP); 1984 return skb->protocol == htons(ETH_P_ARP);
1987} 1985}
1988 1986
1987static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
1988 struct sk_buff *skb)
1989{
1990 if (is_arp(skb)) {
1991 cxgb3_arp_process(pi, skb);
1992 return;
1993 }
1994
1995 if (pi->iscsic.recv)
1996 pi->iscsic.recv(pi, skb);
1997
1998}
1999
1989/** 2000/**
1990 * rx_eth - process an ingress ethernet packet 2001 * rx_eth - process an ingress ethernet packet
1991 * @adap: the adapter 2002 * @adap: the adapter
@@ -2024,13 +2035,12 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2024 vlan_gro_receive(&qs->napi, grp, 2035 vlan_gro_receive(&qs->napi, grp,
2025 ntohs(p->vlan), skb); 2036 ntohs(p->vlan), skb);
2026 else { 2037 else {
2027 if (unlikely(pi->iscsi_ipv4addr && 2038 if (unlikely(pi->iscsic.flags)) {
2028 is_arp(skb))) {
2029 unsigned short vtag = ntohs(p->vlan) & 2039 unsigned short vtag = ntohs(p->vlan) &
2030 VLAN_VID_MASK; 2040 VLAN_VID_MASK;
2031 skb->dev = vlan_group_get_device(grp, 2041 skb->dev = vlan_group_get_device(grp,
2032 vtag); 2042 vtag);
2033 cxgb3_arp_process(adap, skb); 2043 cxgb3_process_iscsi_prov_pack(pi, skb);
2034 } 2044 }
2035 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), 2045 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
2036 rq->polling); 2046 rq->polling);
@@ -2041,8 +2051,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2041 if (lro) 2051 if (lro)
2042 napi_gro_receive(&qs->napi, skb); 2052 napi_gro_receive(&qs->napi, skb);
2043 else { 2053 else {
2044 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) 2054 if (unlikely(pi->iscsic.flags))
2045 cxgb3_arp_process(adap, skb); 2055 cxgb3_process_iscsi_prov_pack(pi, skb);
2046 netif_receive_skb(skb); 2056 netif_receive_skb(skb);
2047 } 2057 }
2048 } else 2058 } else
@@ -2125,6 +2135,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2125 if (!complete) 2135 if (!complete)
2126 return; 2136 return;
2127 2137
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2128 skb->ip_summed = CHECKSUM_UNNECESSARY; 2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2129 cpl = qs->lro_va; 2140 cpl = qs->lro_va;
2130 2141
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index e3478314c002..8edac8915ea8 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -2803,11 +2803,33 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2803 return 0; 2803 return 0;
2804} 2804}
2805 2805
2806static
2807int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
2808{
2809 struct net_device *dev = platform_get_drvdata(pdev);
2810
2811 if (netif_running(dev))
2812 emac_dev_stop(dev);
2813
2814 clk_disable(emac_clk);
2815
2816 return 0;
2817}
2818
2819static int davinci_emac_resume(struct platform_device *pdev)
2820{
2821 struct net_device *dev = platform_get_drvdata(pdev);
2822
2823 clk_enable(emac_clk);
2824
2825 if (netif_running(dev))
2826 emac_dev_open(dev);
2827
2828 return 0;
2829}
2830
2806/** 2831/**
2807 * davinci_emac_driver: EMAC platform driver structure 2832 * davinci_emac_driver: EMAC platform driver structure
2808 *
2809 * We implement only probe and remove functions - suspend/resume and
2810 * others not supported by this module
2811 */ 2833 */
2812static struct platform_driver davinci_emac_driver = { 2834static struct platform_driver davinci_emac_driver = {
2813 .driver = { 2835 .driver = {
@@ -2816,6 +2838,8 @@ static struct platform_driver davinci_emac_driver = {
2816 }, 2838 },
2817 .probe = davinci_emac_probe, 2839 .probe = davinci_emac_probe,
2818 .remove = __devexit_p(davinci_emac_remove), 2840 .remove = __devexit_p(davinci_emac_remove),
2841 .suspend = davinci_emac_suspend,
2842 .resume = davinci_emac_resume,
2819}; 2843};
2820 2844
2821/** 2845/**
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index a31696a3928e..be9590253aa1 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -801,14 +801,14 @@ static int lance_open(struct net_device *dev)
801 netif_start_queue(dev); 801 netif_start_queue(dev);
802 802
803 /* Associate IRQ with lance_interrupt */ 803 /* Associate IRQ with lance_interrupt */
804 if (request_irq(dev->irq, &lance_interrupt, 0, "lance", dev)) { 804 if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
805 printk("%s: Can't get IRQ %d\n", dev->name, dev->irq); 805 printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
806 return -EAGAIN; 806 return -EAGAIN;
807 } 807 }
808 if (lp->dma_irq >= 0) { 808 if (lp->dma_irq >= 0) {
809 unsigned long flags; 809 unsigned long flags;
810 810
811 if (request_irq(lp->dma_irq, &lance_dma_merr_int, 0, 811 if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
812 "lance error", dev)) { 812 "lance error", dev)) {
813 free_irq(dev->irq, dev); 813 free_irq(dev->irq, dev);
814 printk("%s: Can't get DMA IRQ %d\n", dev->name, 814 printk("%s: Can't get DMA IRQ %d\n", dev->name,
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 7a3bdac84abe..0c1f491d20bf 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -849,7 +849,7 @@ static int depca_open(struct net_device *dev)
849 849
850 depca_dbg_open(dev); 850 depca_dbg_open(dev);
851 851
852 if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, dev)) { 852 if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
853 printk("depca_open(): Requested IRQ%d is busy\n", dev->irq); 853 printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
854 status = -EAGAIN; 854 status = -EAGAIN;
855 } else { 855 } else {
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 7fa7a907f134..a2f1860fdd16 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -411,7 +411,7 @@ rio_open (struct net_device *dev)
411 int i; 411 int i;
412 u16 macctrl; 412 u16 macctrl;
413 413
414 i = request_irq (dev->irq, &rio_interrupt, IRQF_SHARED, dev->name, dev); 414 i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
415 if (i) 415 if (i)
416 return i; 416 return i;
417 417
@@ -505,7 +505,8 @@ rio_timer (unsigned long data)
505 entry = np->old_rx % RX_RING_SIZE; 505 entry = np->old_rx % RX_RING_SIZE;
506 /* Dropped packets don't need to re-allocate */ 506 /* Dropped packets don't need to re-allocate */
507 if (np->rx_skbuff[entry] == NULL) { 507 if (np->rx_skbuff[entry] == NULL) {
508 skb = netdev_alloc_skb (dev, np->rx_buf_sz); 508 skb = netdev_alloc_skb_ip_align(dev,
509 np->rx_buf_sz);
509 if (skb == NULL) { 510 if (skb == NULL) {
510 np->rx_ring[entry].fraginfo = 0; 511 np->rx_ring[entry].fraginfo = 0;
511 printk (KERN_INFO 512 printk (KERN_INFO
@@ -514,8 +515,6 @@ rio_timer (unsigned long data)
514 break; 515 break;
515 } 516 }
516 np->rx_skbuff[entry] = skb; 517 np->rx_skbuff[entry] = skb;
517 /* 16 byte align the IP header */
518 skb_reserve (skb, 2);
519 np->rx_ring[entry].fraginfo = 518 np->rx_ring[entry].fraginfo =
520 cpu_to_le64 (pci_map_single 519 cpu_to_le64 (pci_map_single
521 (np->pdev, skb->data, np->rx_buf_sz, 520 (np->pdev, skb->data, np->rx_buf_sz,
@@ -576,7 +575,9 @@ alloc_list (struct net_device *dev)
576 /* Allocate the rx buffers */ 575 /* Allocate the rx buffers */
577 for (i = 0; i < RX_RING_SIZE; i++) { 576 for (i = 0; i < RX_RING_SIZE; i++) {
578 /* Allocated fixed size of skbuff */ 577 /* Allocated fixed size of skbuff */
579 struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz); 578 struct sk_buff *skb;
579
580 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
580 np->rx_skbuff[i] = skb; 581 np->rx_skbuff[i] = skb;
581 if (skb == NULL) { 582 if (skb == NULL) {
582 printk (KERN_ERR 583 printk (KERN_ERR
@@ -584,7 +585,6 @@ alloc_list (struct net_device *dev)
584 dev->name); 585 dev->name);
585 break; 586 break;
586 } 587 }
587 skb_reserve (skb, 2); /* 16 byte align the IP header. */
588 /* Rubicon now supports 40 bits of addressing space. */ 588 /* Rubicon now supports 40 bits of addressing space. */
589 np->rx_ring[i].fraginfo = 589 np->rx_ring[i].fraginfo =
590 cpu_to_le64 ( pci_map_single ( 590 cpu_to_le64 ( pci_map_single (
@@ -871,13 +871,11 @@ receive_packet (struct net_device *dev)
871 PCI_DMA_FROMDEVICE); 871 PCI_DMA_FROMDEVICE);
872 skb_put (skb = np->rx_skbuff[entry], pkt_len); 872 skb_put (skb = np->rx_skbuff[entry], pkt_len);
873 np->rx_skbuff[entry] = NULL; 873 np->rx_skbuff[entry] = NULL;
874 } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) { 874 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
875 pci_dma_sync_single_for_cpu(np->pdev, 875 pci_dma_sync_single_for_cpu(np->pdev,
876 desc_to_dma(desc), 876 desc_to_dma(desc),
877 np->rx_buf_sz, 877 np->rx_buf_sz,
878 PCI_DMA_FROMDEVICE); 878 PCI_DMA_FROMDEVICE);
879 /* 16 byte align the IP header */
880 skb_reserve (skb, 2);
881 skb_copy_to_linear_data (skb, 879 skb_copy_to_linear_data (skb,
882 np->rx_skbuff[entry]->data, 880 np->rx_skbuff[entry]->data,
883 pkt_len); 881 pkt_len);
@@ -907,7 +905,7 @@ receive_packet (struct net_device *dev)
907 struct sk_buff *skb; 905 struct sk_buff *skb;
908 /* Dropped packets don't need to re-allocate */ 906 /* Dropped packets don't need to re-allocate */
909 if (np->rx_skbuff[entry] == NULL) { 907 if (np->rx_skbuff[entry] == NULL) {
910 skb = netdev_alloc_skb(dev, np->rx_buf_sz); 908 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
911 if (skb == NULL) { 909 if (skb == NULL) {
912 np->rx_ring[entry].fraginfo = 0; 910 np->rx_ring[entry].fraginfo = 0;
913 printk (KERN_INFO 911 printk (KERN_INFO
@@ -917,8 +915,6 @@ receive_packet (struct net_device *dev)
917 break; 915 break;
918 } 916 }
919 np->rx_skbuff[entry] = skb; 917 np->rx_skbuff[entry] = skb;
920 /* 16 byte align the IP header */
921 skb_reserve (skb, 2);
922 np->rx_ring[entry].fraginfo = 918 np->rx_ring[entry].fraginfo =
923 cpu_to_le64 (pci_map_single 919 cpu_to_le64 (pci_map_single
924 (np->pdev, skb->data, np->rx_buf_sz, 920 (np->pdev, skb->data, np->rx_buf_sz,
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 31b8bef49d2e..81590fbb9943 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -100,6 +100,7 @@ typedef struct board_info {
100 100
101 unsigned int flags; 101 unsigned int flags;
102 unsigned int in_suspend :1; 102 unsigned int in_suspend :1;
103 unsigned int wake_supported :1;
103 int debug_level; 104 int debug_level;
104 105
105 enum dm9000_type type; 106 enum dm9000_type type;
@@ -116,6 +117,8 @@ typedef struct board_info {
116 struct resource *data_req; 117 struct resource *data_req;
117 struct resource *irq_res; 118 struct resource *irq_res;
118 119
120 int irq_wake;
121
119 struct mutex addr_lock; /* phy and eeprom access lock */ 122 struct mutex addr_lock; /* phy and eeprom access lock */
120 123
121 struct delayed_work phy_poll; 124 struct delayed_work phy_poll;
@@ -125,6 +128,7 @@ typedef struct board_info {
125 128
126 struct mii_if_info mii; 129 struct mii_if_info mii;
127 u32 msg_enable; 130 u32 msg_enable;
131 u32 wake_state;
128 132
129 int rx_csum; 133 int rx_csum;
130 int can_csum; 134 int can_csum;
@@ -568,6 +572,54 @@ static int dm9000_set_eeprom(struct net_device *dev,
568 return 0; 572 return 0;
569} 573}
570 574
575static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
576{
577 board_info_t *dm = to_dm9000_board(dev);
578
579 memset(w, 0, sizeof(struct ethtool_wolinfo));
580
581 /* note, we could probably support wake-phy too */
582 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
583 w->wolopts = dm->wake_state;
584}
585
586static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
587{
588 board_info_t *dm = to_dm9000_board(dev);
589 unsigned long flags;
590 u32 opts = w->wolopts;
591 u32 wcr = 0;
592
593 if (!dm->wake_supported)
594 return -EOPNOTSUPP;
595
596 if (opts & ~WAKE_MAGIC)
597 return -EINVAL;
598
599 if (opts & WAKE_MAGIC)
600 wcr |= WCR_MAGICEN;
601
602 mutex_lock(&dm->addr_lock);
603
604 spin_lock_irqsave(&dm->lock, flags);
605 iow(dm, DM9000_WCR, wcr);
606 spin_unlock_irqrestore(&dm->lock, flags);
607
608 mutex_unlock(&dm->addr_lock);
609
610 if (dm->wake_state != opts) {
611 /* change in wol state, update IRQ state */
612
613 if (!dm->wake_state)
614 set_irq_wake(dm->irq_wake, 1);
615 else if (dm->wake_state & !opts)
616 set_irq_wake(dm->irq_wake, 0);
617 }
618
619 dm->wake_state = opts;
620 return 0;
621}
622
571static const struct ethtool_ops dm9000_ethtool_ops = { 623static const struct ethtool_ops dm9000_ethtool_ops = {
572 .get_drvinfo = dm9000_get_drvinfo, 624 .get_drvinfo = dm9000_get_drvinfo,
573 .get_settings = dm9000_get_settings, 625 .get_settings = dm9000_get_settings,
@@ -576,6 +628,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
576 .set_msglevel = dm9000_set_msglevel, 628 .set_msglevel = dm9000_set_msglevel,
577 .nway_reset = dm9000_nway_reset, 629 .nway_reset = dm9000_nway_reset,
578 .get_link = dm9000_get_link, 630 .get_link = dm9000_get_link,
631 .get_wol = dm9000_get_wol,
632 .set_wol = dm9000_set_wol,
579 .get_eeprom_len = dm9000_get_eeprom_len, 633 .get_eeprom_len = dm9000_get_eeprom_len,
580 .get_eeprom = dm9000_get_eeprom, 634 .get_eeprom = dm9000_get_eeprom,
581 .set_eeprom = dm9000_set_eeprom, 635 .set_eeprom = dm9000_set_eeprom,
@@ -722,6 +776,7 @@ dm9000_init_dm9000(struct net_device *dev)
722{ 776{
723 board_info_t *db = netdev_priv(dev); 777 board_info_t *db = netdev_priv(dev);
724 unsigned int imr; 778 unsigned int imr;
779 unsigned int ncr;
725 780
726 dm9000_dbg(db, 1, "entering %s\n", __func__); 781 dm9000_dbg(db, 1, "entering %s\n", __func__);
727 782
@@ -736,8 +791,15 @@ dm9000_init_dm9000(struct net_device *dev)
736 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 791 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
737 iow(db, DM9000_GPR, 0); /* Enable PHY */ 792 iow(db, DM9000_GPR, 0); /* Enable PHY */
738 793
739 if (db->flags & DM9000_PLATF_EXT_PHY) 794 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
740 iow(db, DM9000_NCR, NCR_EXT_PHY); 795
796 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
797 * up dumping the wake events if we disable this. There is already
798 * a wake-mask in DM9000_WCR */
799 if (db->wake_supported)
800 ncr |= NCR_WAKEEN;
801
802 iow(db, DM9000_NCR, ncr);
741 803
742 /* Program operating register */ 804 /* Program operating register */
743 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 805 iow(db, DM9000_TCR, 0); /* TX Polling clear */
@@ -1045,6 +1107,41 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1045 return IRQ_HANDLED; 1107 return IRQ_HANDLED;
1046} 1108}
1047 1109
1110static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1111{
1112 struct net_device *dev = dev_id;
1113 board_info_t *db = netdev_priv(dev);
1114 unsigned long flags;
1115 unsigned nsr, wcr;
1116
1117 spin_lock_irqsave(&db->lock, flags);
1118
1119 nsr = ior(db, DM9000_NSR);
1120 wcr = ior(db, DM9000_WCR);
1121
1122 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1123
1124 if (nsr & NSR_WAKEST) {
1125 /* clear, so we can avoid */
1126 iow(db, DM9000_NSR, NSR_WAKEST);
1127
1128 if (wcr & WCR_LINKST)
1129 dev_info(db->dev, "wake by link status change\n");
1130 if (wcr & WCR_SAMPLEST)
1131 dev_info(db->dev, "wake by sample packet\n");
1132 if (wcr & WCR_MAGICST )
1133 dev_info(db->dev, "wake by magic packet\n");
1134 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1135 dev_err(db->dev, "wake signalled with no reason? "
1136 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1137
1138 }
1139
1140 spin_unlock_irqrestore(&db->lock, flags);
1141
1142 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1143}
1144
1048#ifdef CONFIG_NET_POLL_CONTROLLER 1145#ifdef CONFIG_NET_POLL_CONTROLLER
1049/* 1146/*
1050 *Used by netconsole 1147 *Used by netconsole
@@ -1078,7 +1175,7 @@ dm9000_open(struct net_device *dev)
1078 1175
1079 irqflags |= IRQF_SHARED; 1176 irqflags |= IRQF_SHARED;
1080 1177
1081 if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev)) 1178 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1082 return -EAGAIN; 1179 return -EAGAIN;
1083 1180
1084 /* Initialize DM9000 board */ 1181 /* Initialize DM9000 board */
@@ -1299,6 +1396,29 @@ dm9000_probe(struct platform_device *pdev)
1299 goto out; 1396 goto out;
1300 } 1397 }
1301 1398
1399 db->irq_wake = platform_get_irq(pdev, 1);
1400 if (db->irq_wake >= 0) {
1401 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1402
1403 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1404 IRQF_SHARED, dev_name(db->dev), ndev);
1405 if (ret) {
1406 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1407 } else {
1408
1409 /* test to see if irq is really wakeup capable */
1410 ret = set_irq_wake(db->irq_wake, 1);
1411 if (ret) {
1412 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1413 db->irq_wake, ret);
1414 ret = 0;
1415 } else {
1416 set_irq_wake(db->irq_wake, 0);
1417 db->wake_supported = 1;
1418 }
1419 }
1420 }
1421
1302 iosize = resource_size(db->addr_res); 1422 iosize = resource_size(db->addr_res);
1303 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1423 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1304 pdev->name); 1424 pdev->name);
@@ -1490,10 +1610,14 @@ dm9000_drv_suspend(struct device *dev)
1490 db = netdev_priv(ndev); 1610 db = netdev_priv(ndev);
1491 db->in_suspend = 1; 1611 db->in_suspend = 1;
1492 1612
1493 if (netif_running(ndev)) { 1613 if (!netif_running(ndev))
1494 netif_device_detach(ndev); 1614 return 0;
1615
1616 netif_device_detach(ndev);
1617
1618 /* only shutdown if not using WoL */
1619 if (!db->wake_state)
1495 dm9000_shutdown(ndev); 1620 dm9000_shutdown(ndev);
1496 }
1497 } 1621 }
1498 return 0; 1622 return 0;
1499} 1623}
@@ -1506,10 +1630,13 @@ dm9000_drv_resume(struct device *dev)
1506 board_info_t *db = netdev_priv(ndev); 1630 board_info_t *db = netdev_priv(ndev);
1507 1631
1508 if (ndev) { 1632 if (ndev) {
1509
1510 if (netif_running(ndev)) { 1633 if (netif_running(ndev)) {
1511 dm9000_reset(db); 1634 /* reset if we were not in wake mode to ensure if
1512 dm9000_init_dm9000(ndev); 1635 * the device was powered off it is in a known state */
1636 if (!db->wake_state) {
1637 dm9000_reset(db);
1638 dm9000_init_dm9000(ndev);
1639 }
1513 1640
1514 netif_device_attach(ndev); 1641 netif_device_attach(ndev);
1515 } 1642 }
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index fb1c924d79b4..55688bd1a3ef 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -111,6 +111,13 @@
111#define RSR_CE (1<<1) 111#define RSR_CE (1<<1)
112#define RSR_FOE (1<<0) 112#define RSR_FOE (1<<0)
113 113
114#define WCR_LINKEN (1 << 5)
115#define WCR_SAMPLEEN (1 << 4)
116#define WCR_MAGICEN (1 << 3)
117#define WCR_LINKST (1 << 2)
118#define WCR_SAMPLEST (1 << 1)
119#define WCR_MAGICST (1 << 0)
120
114#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 ) 121#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
115#define FCTR_LWOT(ot) ( ot & 0xf ) 122#define FCTR_LWOT(ot) ( ot & 0xf )
116 123
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3c29a20b751e..a81c7b0c41b0 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -622,6 +622,7 @@ struct nic {
622 u16 eeprom_wc; 622 u16 eeprom_wc;
623 __le16 eeprom[256]; 623 __le16 eeprom[256];
624 spinlock_t mdio_lock; 624 spinlock_t mdio_lock;
625 const struct firmware *fw;
625}; 626};
626 627
627static inline void e100_write_flush(struct nic *nic) 628static inline void e100_write_flush(struct nic *nic)
@@ -1223,9 +1224,9 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1223static const struct firmware *e100_request_firmware(struct nic *nic) 1224static const struct firmware *e100_request_firmware(struct nic *nic)
1224{ 1225{
1225 const char *fw_name; 1226 const char *fw_name;
1226 const struct firmware *fw; 1227 const struct firmware *fw = nic->fw;
1227 u8 timer, bundle, min_size; 1228 u8 timer, bundle, min_size;
1228 int err; 1229 int err = 0;
1229 1230
1230 /* do not load u-code for ICH devices */ 1231 /* do not load u-code for ICH devices */
1231 if (nic->flags & ich) 1232 if (nic->flags & ich)
@@ -1241,12 +1242,20 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1241 else /* No ucode on other devices */ 1242 else /* No ucode on other devices */
1242 return NULL; 1243 return NULL;
1243 1244
1244 err = request_firmware(&fw, fw_name, &nic->pdev->dev); 1245 /* If the firmware has not previously been loaded, request a pointer
1246 * to it. If it was previously loaded, we are reinitializing the
1247 * adapter, possibly in a resume from hibernate, in which case
1248 * request_firmware() cannot be used.
1249 */
1250 if (!fw)
1251 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1252
1245 if (err) { 1253 if (err) {
1246 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", 1254 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1247 fw_name, err); 1255 fw_name, err);
1248 return ERR_PTR(err); 1256 return ERR_PTR(err);
1249 } 1257 }
1258
1250 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes 1259 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1251 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ 1260 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1252 if (fw->size != UCODE_SIZE * 4 + 3) { 1261 if (fw->size != UCODE_SIZE * 4 + 3) {
@@ -1269,7 +1278,10 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1269 release_firmware(fw); 1278 release_firmware(fw);
1270 return ERR_PTR(-EINVAL); 1279 return ERR_PTR(-EINVAL);
1271 } 1280 }
1272 /* OK, firmware is validated and ready to use... */ 1281
1282 /* OK, firmware is validated and ready to use. Save a pointer
1283 * to it in the nic */
1284 nic->fw = fw;
1273 return fw; 1285 return fw;
1274} 1286}
1275 1287
@@ -1852,11 +1864,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1852#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1864#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1853static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1865static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1854{ 1866{
1855 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN))) 1867 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1856 return -ENOMEM; 1868 return -ENOMEM;
1857 1869
1858 /* Align, init, and map the RFD. */ 1870 /* Init, and map the RFD. */
1859 skb_reserve(rx->skb, NET_IP_ALIGN);
1860 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); 1871 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1861 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1872 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1862 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1873 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 42e2b7e21c29..a5665287bd64 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -302,7 +302,6 @@ struct e1000_adapter {
302 /* OS defined structs */ 302 /* OS defined structs */
303 struct net_device *netdev; 303 struct net_device *netdev;
304 struct pci_dev *pdev; 304 struct pci_dev *pdev;
305 struct net_device_stats net_stats;
306 305
307 /* structs defined in e1000_hw.h */ 306 /* structs defined in e1000_hw.h */
308 struct e1000_hw hw; 307 struct e1000_hw hw;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 490b2b7cd3ab..13e9ece16889 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -31,14 +31,22 @@
31#include "e1000.h" 31#include "e1000.h"
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33 33
34enum {NETDEV_STATS, E1000_STATS};
35
34struct e1000_stats { 36struct e1000_stats {
35 char stat_string[ETH_GSTRING_LEN]; 37 char stat_string[ETH_GSTRING_LEN];
38 int type;
36 int sizeof_stat; 39 int sizeof_stat;
37 int stat_offset; 40 int stat_offset;
38}; 41};
39 42
40#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \ 43#define E1000_STAT(m) E1000_STATS, \
41 offsetof(struct e1000_adapter, m) 44 sizeof(((struct e1000_adapter *)0)->m), \
45 offsetof(struct e1000_adapter, m)
46#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
47 sizeof(((struct net_device *)0)->m), \
48 offsetof(struct net_device, m)
49
42static const struct e1000_stats e1000_gstrings_stats[] = { 50static const struct e1000_stats e1000_gstrings_stats[] = {
43 { "rx_packets", E1000_STAT(stats.gprc) }, 51 { "rx_packets", E1000_STAT(stats.gprc) },
44 { "tx_packets", E1000_STAT(stats.gptc) }, 52 { "tx_packets", E1000_STAT(stats.gptc) },
@@ -50,19 +58,19 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
50 { "tx_multicast", E1000_STAT(stats.mptc) }, 58 { "tx_multicast", E1000_STAT(stats.mptc) },
51 { "rx_errors", E1000_STAT(stats.rxerrc) }, 59 { "rx_errors", E1000_STAT(stats.rxerrc) },
52 { "tx_errors", E1000_STAT(stats.txerrc) }, 60 { "tx_errors", E1000_STAT(stats.txerrc) },
53 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, 61 { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
54 { "multicast", E1000_STAT(stats.mprc) }, 62 { "multicast", E1000_STAT(stats.mprc) },
55 { "collisions", E1000_STAT(stats.colc) }, 63 { "collisions", E1000_STAT(stats.colc) },
56 { "rx_length_errors", E1000_STAT(stats.rlerrc) }, 64 { "rx_length_errors", E1000_STAT(stats.rlerrc) },
57 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, 65 { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
58 { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, 66 { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
59 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 67 { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
60 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 68 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
61 { "rx_missed_errors", E1000_STAT(stats.mpc) }, 69 { "rx_missed_errors", E1000_STAT(stats.mpc) },
62 { "tx_aborted_errors", E1000_STAT(stats.ecol) }, 70 { "tx_aborted_errors", E1000_STAT(stats.ecol) },
63 { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, 71 { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
64 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, 72 { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
65 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, 73 { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
66 { "tx_window_errors", E1000_STAT(stats.latecol) }, 74 { "tx_window_errors", E1000_STAT(stats.latecol) },
67 { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, 75 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
68 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 76 { "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -861,10 +869,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
861 869
862 /* NOTE: we don't test MSI interrupts here, yet */ 870 /* NOTE: we don't test MSI interrupts here, yet */
863 /* Hook up test interrupt handler just for this test */ 871 /* Hook up test interrupt handler just for this test */
864 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 872 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
865 netdev)) 873 netdev))
866 shared_int = false; 874 shared_int = false;
867 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 875 else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
868 netdev->name, netdev)) { 876 netdev->name, netdev)) {
869 *data = 1; 877 *data = 1;
870 return -1; 878 return -1;
@@ -1830,10 +1838,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1830{ 1838{
1831 struct e1000_adapter *adapter = netdev_priv(netdev); 1839 struct e1000_adapter *adapter = netdev_priv(netdev);
1832 int i; 1840 int i;
1841 char *p = NULL;
1833 1842
1834 e1000_update_stats(adapter); 1843 e1000_update_stats(adapter);
1835 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1844 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1836 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1845 switch (e1000_gstrings_stats[i].type) {
1846 case NETDEV_STATS:
1847 p = (char *) netdev +
1848 e1000_gstrings_stats[i].stat_offset;
1849 break;
1850 case E1000_STATS:
1851 p = (char *) adapter +
1852 e1000_gstrings_stats[i].stat_offset;
1853 break;
1854 }
1855
1837 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1856 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1838 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1857 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1839 } 1858 }
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index bcd192ca47b0..c938114a34ab 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3101,10 +3101,8 @@ static void e1000_reset_task(struct work_struct *work)
3101 3101
3102static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3102static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3103{ 3103{
3104 struct e1000_adapter *adapter = netdev_priv(netdev);
3105
3106 /* only return the current stats */ 3104 /* only return the current stats */
3107 return &adapter->net_stats; 3105 return &netdev->stats;
3108} 3106}
3109 3107
3110/** 3108/**
@@ -3196,6 +3194,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3196 3194
3197void e1000_update_stats(struct e1000_adapter *adapter) 3195void e1000_update_stats(struct e1000_adapter *adapter)
3198{ 3196{
3197 struct net_device *netdev = adapter->netdev;
3199 struct e1000_hw *hw = &adapter->hw; 3198 struct e1000_hw *hw = &adapter->hw;
3200 struct pci_dev *pdev = adapter->pdev; 3199 struct pci_dev *pdev = adapter->pdev;
3201 unsigned long flags; 3200 unsigned long flags;
@@ -3288,32 +3287,32 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3288 } 3287 }
3289 3288
3290 /* Fill out the OS statistics structure */ 3289 /* Fill out the OS statistics structure */
3291 adapter->net_stats.multicast = adapter->stats.mprc; 3290 netdev->stats.multicast = adapter->stats.mprc;
3292 adapter->net_stats.collisions = adapter->stats.colc; 3291 netdev->stats.collisions = adapter->stats.colc;
3293 3292
3294 /* Rx Errors */ 3293 /* Rx Errors */
3295 3294
3296 /* RLEC on some newer hardware can be incorrect so build 3295 /* RLEC on some newer hardware can be incorrect so build
3297 * our own version based on RUC and ROC */ 3296 * our own version based on RUC and ROC */
3298 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3297 netdev->stats.rx_errors = adapter->stats.rxerrc +
3299 adapter->stats.crcerrs + adapter->stats.algnerrc + 3298 adapter->stats.crcerrs + adapter->stats.algnerrc +
3300 adapter->stats.ruc + adapter->stats.roc + 3299 adapter->stats.ruc + adapter->stats.roc +
3301 adapter->stats.cexterr; 3300 adapter->stats.cexterr;
3302 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3301 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3303 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc; 3302 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3304 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3303 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3305 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3304 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3306 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3305 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3307 3306
3308 /* Tx Errors */ 3307 /* Tx Errors */
3309 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3308 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3310 adapter->net_stats.tx_errors = adapter->stats.txerrc; 3309 netdev->stats.tx_errors = adapter->stats.txerrc;
3311 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3310 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3312 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3311 netdev->stats.tx_window_errors = adapter->stats.latecol;
3313 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3312 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3314 if (hw->bad_tx_carr_stats_fd && 3313 if (hw->bad_tx_carr_stats_fd &&
3315 adapter->link_duplex == FULL_DUPLEX) { 3314 adapter->link_duplex == FULL_DUPLEX) {
3316 adapter->net_stats.tx_carrier_errors = 0; 3315 netdev->stats.tx_carrier_errors = 0;
3317 adapter->stats.tncrs = 0; 3316 adapter->stats.tncrs = 0;
3318 } 3317 }
3319 3318
@@ -3514,8 +3513,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3514 } 3513 }
3515 adapter->total_tx_bytes += total_tx_bytes; 3514 adapter->total_tx_bytes += total_tx_bytes;
3516 adapter->total_tx_packets += total_tx_packets; 3515 adapter->total_tx_packets += total_tx_packets;
3517 adapter->net_stats.tx_bytes += total_tx_bytes; 3516 netdev->stats.tx_bytes += total_tx_bytes;
3518 adapter->net_stats.tx_packets += total_tx_packets; 3517 netdev->stats.tx_packets += total_tx_packets;
3519 return (count < tx_ring->count); 3518 return (count < tx_ring->count);
3520} 3519}
3521 3520
@@ -3767,8 +3766,8 @@ next_desc:
3767 3766
3768 adapter->total_rx_packets += total_rx_packets; 3767 adapter->total_rx_packets += total_rx_packets;
3769 adapter->total_rx_bytes += total_rx_bytes; 3768 adapter->total_rx_bytes += total_rx_bytes;
3770 adapter->net_stats.rx_bytes += total_rx_bytes; 3769 netdev->stats.rx_bytes += total_rx_bytes;
3771 adapter->net_stats.rx_packets += total_rx_packets; 3770 netdev->stats.rx_packets += total_rx_packets;
3772 return cleaned; 3771 return cleaned;
3773} 3772}
3774 3773
@@ -3867,9 +3866,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3867 * of reassembly being done in the stack */ 3866 * of reassembly being done in the stack */
3868 if (length < copybreak) { 3867 if (length < copybreak) {
3869 struct sk_buff *new_skb = 3868 struct sk_buff *new_skb =
3870 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3869 netdev_alloc_skb_ip_align(netdev, length);
3871 if (new_skb) { 3870 if (new_skb) {
3872 skb_reserve(new_skb, NET_IP_ALIGN);
3873 skb_copy_to_linear_data_offset(new_skb, 3871 skb_copy_to_linear_data_offset(new_skb,
3874 -NET_IP_ALIGN, 3872 -NET_IP_ALIGN,
3875 (skb->data - 3873 (skb->data -
@@ -3916,8 +3914,8 @@ next_desc:
3916 3914
3917 adapter->total_rx_packets += total_rx_packets; 3915 adapter->total_rx_packets += total_rx_packets;
3918 adapter->total_rx_bytes += total_rx_bytes; 3916 adapter->total_rx_bytes += total_rx_bytes;
3919 adapter->net_stats.rx_bytes += total_rx_bytes; 3917 netdev->stats.rx_bytes += total_rx_bytes;
3920 adapter->net_stats.rx_packets += total_rx_packets; 3918 netdev->stats.rx_packets += total_rx_packets;
3921 return cleaned; 3919 return cleaned;
3922} 3920}
3923 3921
@@ -3938,9 +3936,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3938 struct e1000_buffer *buffer_info; 3936 struct e1000_buffer *buffer_info;
3939 struct sk_buff *skb; 3937 struct sk_buff *skb;
3940 unsigned int i; 3938 unsigned int i;
3941 unsigned int bufsz = 256 - 3939 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
3942 16 /*for skb_reserve */ -
3943 NET_IP_ALIGN;
3944 3940
3945 i = rx_ring->next_to_use; 3941 i = rx_ring->next_to_use;
3946 buffer_info = &rx_ring->buffer_info[i]; 3942 buffer_info = &rx_ring->buffer_info[i];
@@ -3952,7 +3948,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3952 goto check_page; 3948 goto check_page;
3953 } 3949 }
3954 3950
3955 skb = netdev_alloc_skb(netdev, bufsz); 3951 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3956 if (unlikely(!skb)) { 3952 if (unlikely(!skb)) {
3957 /* Better luck next round */ 3953 /* Better luck next round */
3958 adapter->alloc_rx_buff_failed++; 3954 adapter->alloc_rx_buff_failed++;
@@ -3965,7 +3961,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3965 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3961 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
3966 "at %p\n", bufsz, skb->data); 3962 "at %p\n", bufsz, skb->data);
3967 /* Try again, without freeing the previous */ 3963 /* Try again, without freeing the previous */
3968 skb = netdev_alloc_skb(netdev, bufsz); 3964 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3969 /* Failed allocation, critical failure */ 3965 /* Failed allocation, critical failure */
3970 if (!skb) { 3966 if (!skb) {
3971 dev_kfree_skb(oldskb); 3967 dev_kfree_skb(oldskb);
@@ -3983,12 +3979,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3983 /* Use new allocation */ 3979 /* Use new allocation */
3984 dev_kfree_skb(oldskb); 3980 dev_kfree_skb(oldskb);
3985 } 3981 }
3986 /* Make buffer alignment 2 beyond a 16 byte boundary
3987 * this will result in a 16 byte aligned IP header after
3988 * the 14 byte MAC header is removed
3989 */
3990 skb_reserve(skb, NET_IP_ALIGN);
3991
3992 buffer_info->skb = skb; 3982 buffer_info->skb = skb;
3993 buffer_info->length = adapter->rx_buffer_len; 3983 buffer_info->length = adapter->rx_buffer_len;
3994check_page: 3984check_page:
@@ -4045,7 +4035,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4045 struct e1000_buffer *buffer_info; 4035 struct e1000_buffer *buffer_info;
4046 struct sk_buff *skb; 4036 struct sk_buff *skb;
4047 unsigned int i; 4037 unsigned int i;
4048 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 4038 unsigned int bufsz = adapter->rx_buffer_len;
4049 4039
4050 i = rx_ring->next_to_use; 4040 i = rx_ring->next_to_use;
4051 buffer_info = &rx_ring->buffer_info[i]; 4041 buffer_info = &rx_ring->buffer_info[i];
@@ -4057,7 +4047,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4057 goto map_skb; 4047 goto map_skb;
4058 } 4048 }
4059 4049
4060 skb = netdev_alloc_skb(netdev, bufsz); 4050 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4061 if (unlikely(!skb)) { 4051 if (unlikely(!skb)) {
4062 /* Better luck next round */ 4052 /* Better luck next round */
4063 adapter->alloc_rx_buff_failed++; 4053 adapter->alloc_rx_buff_failed++;
@@ -4070,7 +4060,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4070 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4060 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4071 "at %p\n", bufsz, skb->data); 4061 "at %p\n", bufsz, skb->data);
4072 /* Try again, without freeing the previous */ 4062 /* Try again, without freeing the previous */
4073 skb = netdev_alloc_skb(netdev, bufsz); 4063 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4074 /* Failed allocation, critical failure */ 4064 /* Failed allocation, critical failure */
4075 if (!skb) { 4065 if (!skb) {
4076 dev_kfree_skb(oldskb); 4066 dev_kfree_skb(oldskb);
@@ -4089,12 +4079,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4089 /* Use new allocation */ 4079 /* Use new allocation */
4090 dev_kfree_skb(oldskb); 4080 dev_kfree_skb(oldskb);
4091 } 4081 }
4092 /* Make buffer alignment 2 beyond a 16 byte boundary
4093 * this will result in a 16 byte aligned IP header after
4094 * the 14 byte MAC header is removed
4095 */
4096 skb_reserve(skb, NET_IP_ALIGN);
4097
4098 buffer_info->skb = skb; 4082 buffer_info->skb = skb;
4099 buffer_info->length = adapter->rx_buffer_len; 4083 buffer_info->length = adapter->rx_buffer_len;
4100map_skb: 4084map_skb:
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index d1e0563a67df..62bbc6e0a76a 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -43,10 +43,6 @@
43 * 82583V Gigabit Network Connection 43 * 82583V Gigabit Network Connection
44 */ 44 */
45 45
46#include <linux/netdevice.h>
47#include <linux/delay.h>
48#include <linux/pci.h>
49
50#include "e1000.h" 46#include "e1000.h"
51 47
52#define ID_LED_RESERVED_F746 0xF746 48#define ID_LED_RESERVED_F746 0xF746
@@ -76,8 +72,6 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
76/** 72/**
77 * e1000_init_phy_params_82571 - Init PHY func ptrs. 73 * e1000_init_phy_params_82571 - Init PHY func ptrs.
78 * @hw: pointer to the HW structure 74 * @hw: pointer to the HW structure
79 *
80 * This is a function pointer entry point called by the api module.
81 **/ 75 **/
82static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) 76static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
83{ 77{
@@ -140,8 +134,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
140/** 134/**
141 * e1000_init_nvm_params_82571 - Init NVM func ptrs. 135 * e1000_init_nvm_params_82571 - Init NVM func ptrs.
142 * @hw: pointer to the HW structure 136 * @hw: pointer to the HW structure
143 *
144 * This is a function pointer entry point called by the api module.
145 **/ 137 **/
146static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) 138static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
147{ 139{
@@ -205,8 +197,6 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
205/** 197/**
206 * e1000_init_mac_params_82571 - Init MAC func ptrs. 198 * e1000_init_mac_params_82571 - Init MAC func ptrs.
207 * @hw: pointer to the HW structure 199 * @hw: pointer to the HW structure
208 *
209 * This is a function pointer entry point called by the api module.
210 **/ 200 **/
211static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter) 201static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
212{ 202{
@@ -240,7 +230,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
240 /* Set rar entry count */ 230 /* Set rar entry count */
241 mac->rar_entry_count = E1000_RAR_ENTRIES; 231 mac->rar_entry_count = E1000_RAR_ENTRIES;
242 /* Set if manageability features are enabled. */ 232 /* Set if manageability features are enabled. */
243 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; 233 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
234 ? true : false;
244 235
245 /* check for link */ 236 /* check for link */
246 switch (hw->phy.media_type) { 237 switch (hw->phy.media_type) {
@@ -313,7 +304,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
313 * indicates that the bootagent or EFI code has 304 * indicates that the bootagent or EFI code has
314 * improperly left this bit enabled 305 * improperly left this bit enabled
315 */ 306 */
316 hw_dbg(hw, "Please update your 82571 Bootagent\n"); 307 e_dbg("Please update your 82571 Bootagent\n");
317 } 308 }
318 ew32(SWSM, swsm & ~E1000_SWSM_SMBI); 309 ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
319 } 310 }
@@ -487,7 +478,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
487 } 478 }
488 479
489 if (i == sw_timeout) { 480 if (i == sw_timeout) {
490 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); 481 e_dbg("Driver can't access device - SMBI bit is set.\n");
491 hw->dev_spec.e82571.smb_counter++; 482 hw->dev_spec.e82571.smb_counter++;
492 } 483 }
493 /* Get the FW semaphore. */ 484 /* Get the FW semaphore. */
@@ -505,7 +496,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
505 if (i == fw_timeout) { 496 if (i == fw_timeout) {
506 /* Release semaphores */ 497 /* Release semaphores */
507 e1000_put_hw_semaphore_82571(hw); 498 e1000_put_hw_semaphore_82571(hw);
508 hw_dbg(hw, "Driver can't access the NVM\n"); 499 e_dbg("Driver can't access the NVM\n");
509 return -E1000_ERR_NVM; 500 return -E1000_ERR_NVM;
510 } 501 }
511 502
@@ -702,8 +693,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
702 u16 words, u16 *data) 693 u16 words, u16 *data)
703{ 694{
704 struct e1000_nvm_info *nvm = &hw->nvm; 695 struct e1000_nvm_info *nvm = &hw->nvm;
705 u32 i; 696 u32 i, eewr = 0;
706 u32 eewr = 0;
707 s32 ret_val = 0; 697 s32 ret_val = 0;
708 698
709 /* 699 /*
@@ -712,7 +702,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
712 */ 702 */
713 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 703 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
714 (words == 0)) { 704 (words == 0)) {
715 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 705 e_dbg("nvm parameter(s) out of bounds\n");
716 return -E1000_ERR_NVM; 706 return -E1000_ERR_NVM;
717 } 707 }
718 708
@@ -753,7 +743,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
753 timeout--; 743 timeout--;
754 } 744 }
755 if (!timeout) { 745 if (!timeout) {
756 hw_dbg(hw, "MNG configuration cycle has not completed.\n"); 746 e_dbg("MNG configuration cycle has not completed.\n");
757 return -E1000_ERR_RESET; 747 return -E1000_ERR_RESET;
758 } 748 }
759 749
@@ -763,7 +753,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
763/** 753/**
764 * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state 754 * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
765 * @hw: pointer to the HW structure 755 * @hw: pointer to the HW structure
766 * @active: TRUE to enable LPLU, FALSE to disable 756 * @active: true to enable LPLU, false to disable
767 * 757 *
768 * Sets the LPLU D0 state according to the active flag. When activating LPLU 758 * Sets the LPLU D0 state according to the active flag. When activating LPLU
769 * this function also disables smart speed and vice versa. LPLU will not be 759 * this function also disables smart speed and vice versa. LPLU will not be
@@ -834,15 +824,11 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
834 * e1000_reset_hw_82571 - Reset hardware 824 * e1000_reset_hw_82571 - Reset hardware
835 * @hw: pointer to the HW structure 825 * @hw: pointer to the HW structure
836 * 826 *
837 * This resets the hardware into a known state. This is a 827 * This resets the hardware into a known state.
838 * function pointer entry point called by the api module.
839 **/ 828 **/
840static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 829static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
841{ 830{
842 u32 ctrl; 831 u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
843 u32 extcnf_ctrl;
844 u32 ctrl_ext;
845 u32 icr;
846 s32 ret_val; 832 s32 ret_val;
847 u16 i = 0; 833 u16 i = 0;
848 834
@@ -852,9 +838,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
852 */ 838 */
853 ret_val = e1000e_disable_pcie_master(hw); 839 ret_val = e1000e_disable_pcie_master(hw);
854 if (ret_val) 840 if (ret_val)
855 hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 841 e_dbg("PCI-E Master disable polling has failed.\n");
856 842
857 hw_dbg(hw, "Masking off all interrupts\n"); 843 e_dbg("Masking off all interrupts\n");
858 ew32(IMC, 0xffffffff); 844 ew32(IMC, 0xffffffff);
859 845
860 ew32(RCTL, 0); 846 ew32(RCTL, 0);
@@ -893,7 +879,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
893 879
894 ctrl = er32(CTRL); 880 ctrl = er32(CTRL);
895 881
896 hw_dbg(hw, "Issuing a global reset to MAC\n"); 882 e_dbg("Issuing a global reset to MAC\n");
897 ew32(CTRL, ctrl | E1000_CTRL_RST); 883 ew32(CTRL, ctrl | E1000_CTRL_RST);
898 884
899 if (hw->nvm.type == e1000_nvm_flash_hw) { 885 if (hw->nvm.type == e1000_nvm_flash_hw) {
@@ -951,20 +937,18 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
951 struct e1000_mac_info *mac = &hw->mac; 937 struct e1000_mac_info *mac = &hw->mac;
952 u32 reg_data; 938 u32 reg_data;
953 s32 ret_val; 939 s32 ret_val;
954 u16 i; 940 u16 i, rar_count = mac->rar_entry_count;
955 u16 rar_count = mac->rar_entry_count;
956 941
957 e1000_initialize_hw_bits_82571(hw); 942 e1000_initialize_hw_bits_82571(hw);
958 943
959 /* Initialize identification LED */ 944 /* Initialize identification LED */
960 ret_val = e1000e_id_led_init(hw); 945 ret_val = e1000e_id_led_init(hw);
961 if (ret_val) { 946 if (ret_val)
962 hw_dbg(hw, "Error initializing identification LED\n"); 947 e_dbg("Error initializing identification LED\n");
963 return ret_val; 948 /* This is not fatal and we should not stop init due to this */
964 }
965 949
966 /* Disabling VLAN filtering */ 950 /* Disabling VLAN filtering */
967 hw_dbg(hw, "Initializing the IEEE VLAN\n"); 951 e_dbg("Initializing the IEEE VLAN\n");
968 e1000e_clear_vfta(hw); 952 e1000e_clear_vfta(hw);
969 953
970 /* Setup the receive address. */ 954 /* Setup the receive address. */
@@ -978,7 +962,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
978 e1000e_init_rx_addrs(hw, rar_count); 962 e1000e_init_rx_addrs(hw, rar_count);
979 963
980 /* Zero out the Multicast HASH table */ 964 /* Zero out the Multicast HASH table */
981 hw_dbg(hw, "Zeroing the MTA\n"); 965 e_dbg("Zeroing the MTA\n");
982 for (i = 0; i < mac->mta_reg_count; i++) 966 for (i = 0; i < mac->mta_reg_count; i++)
983 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 967 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
984 968
@@ -1125,6 +1109,13 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1125 reg |= (1 << 22); 1109 reg |= (1 << 22);
1126 ew32(GCR, reg); 1110 ew32(GCR, reg);
1127 1111
1112 /*
1113 * Workaround for hardware errata.
1114 * apply workaround for hardware errata documented in errata
1115 * docs Fixes issue where some error prone or unreliable PCIe
1116 * completions are occurring, particularly with ASPM enabled.
1117 * Without fix, issue can cause tx timeouts.
1118 */
1128 reg = er32(GCR2); 1119 reg = er32(GCR2);
1129 reg |= 1; 1120 reg |= 1;
1130 ew32(GCR2, reg); 1121 ew32(GCR2, reg);
@@ -1387,7 +1378,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1387 */ 1378 */
1388 mac->serdes_link_state = 1379 mac->serdes_link_state =
1389 e1000_serdes_link_autoneg_progress; 1380 e1000_serdes_link_autoneg_progress;
1390 hw_dbg(hw, "AN_UP -> AN_PROG\n"); 1381 e_dbg("AN_UP -> AN_PROG\n");
1391 } 1382 }
1392 break; 1383 break;
1393 1384
@@ -1405,7 +1396,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1405 (ctrl & ~E1000_CTRL_SLU)); 1396 (ctrl & ~E1000_CTRL_SLU));
1406 mac->serdes_link_state = 1397 mac->serdes_link_state =
1407 e1000_serdes_link_autoneg_progress; 1398 e1000_serdes_link_autoneg_progress;
1408 hw_dbg(hw, "FORCED_UP -> AN_PROG\n"); 1399 e_dbg("FORCED_UP -> AN_PROG\n");
1409 } 1400 }
1410 break; 1401 break;
1411 1402
@@ -1419,7 +1410,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1419 if (status & E1000_STATUS_LU) { 1410 if (status & E1000_STATUS_LU) {
1420 mac->serdes_link_state = 1411 mac->serdes_link_state =
1421 e1000_serdes_link_autoneg_complete; 1412 e1000_serdes_link_autoneg_complete;
1422 hw_dbg(hw, "AN_PROG -> AN_UP\n"); 1413 e_dbg("AN_PROG -> AN_UP\n");
1423 } else { 1414 } else {
1424 /* 1415 /*
1425 * Disable autoneg, force link up and 1416 * Disable autoneg, force link up and
@@ -1434,12 +1425,12 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1434 ret_val = 1425 ret_val =
1435 e1000e_config_fc_after_link_up(hw); 1426 e1000e_config_fc_after_link_up(hw);
1436 if (ret_val) { 1427 if (ret_val) {
1437 hw_dbg(hw, "Error config flow control\n"); 1428 e_dbg("Error config flow control\n");
1438 break; 1429 break;
1439 } 1430 }
1440 mac->serdes_link_state = 1431 mac->serdes_link_state =
1441 e1000_serdes_link_forced_up; 1432 e1000_serdes_link_forced_up;
1442 hw_dbg(hw, "AN_PROG -> FORCED_UP\n"); 1433 e_dbg("AN_PROG -> FORCED_UP\n");
1443 } 1434 }
1444 mac->serdes_has_link = true; 1435 mac->serdes_has_link = true;
1445 break; 1436 break;
@@ -1454,14 +1445,14 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1454 (ctrl & ~E1000_CTRL_SLU)); 1445 (ctrl & ~E1000_CTRL_SLU));
1455 mac->serdes_link_state = 1446 mac->serdes_link_state =
1456 e1000_serdes_link_autoneg_progress; 1447 e1000_serdes_link_autoneg_progress;
1457 hw_dbg(hw, "DOWN -> AN_PROG\n"); 1448 e_dbg("DOWN -> AN_PROG\n");
1458 break; 1449 break;
1459 } 1450 }
1460 } else { 1451 } else {
1461 if (!(rxcw & E1000_RXCW_SYNCH)) { 1452 if (!(rxcw & E1000_RXCW_SYNCH)) {
1462 mac->serdes_has_link = false; 1453 mac->serdes_has_link = false;
1463 mac->serdes_link_state = e1000_serdes_link_down; 1454 mac->serdes_link_state = e1000_serdes_link_down;
1464 hw_dbg(hw, "ANYSTATE -> DOWN\n"); 1455 e_dbg("ANYSTATE -> DOWN\n");
1465 } else { 1456 } else {
1466 /* 1457 /*
1467 * We have sync, and can tolerate one 1458 * We have sync, and can tolerate one
@@ -1473,7 +1464,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1473 if (rxcw & E1000_RXCW_IV) { 1464 if (rxcw & E1000_RXCW_IV) {
1474 mac->serdes_link_state = e1000_serdes_link_down; 1465 mac->serdes_link_state = e1000_serdes_link_down;
1475 mac->serdes_has_link = false; 1466 mac->serdes_has_link = false;
1476 hw_dbg(hw, "ANYSTATE -> DOWN\n"); 1467 e_dbg("ANYSTATE -> DOWN\n");
1477 } 1468 }
1478 } 1469 }
1479 } 1470 }
@@ -1495,7 +1486,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1495 1486
1496 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1487 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1497 if (ret_val) { 1488 if (ret_val) {
1498 hw_dbg(hw, "NVM Read Error\n"); 1489 e_dbg("NVM Read Error\n");
1499 return ret_val; 1490 return ret_val;
1500 } 1491 }
1501 1492
@@ -1525,7 +1516,7 @@ static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
1525bool e1000e_get_laa_state_82571(struct e1000_hw *hw) 1516bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
1526{ 1517{
1527 if (hw->mac.type != e1000_82571) 1518 if (hw->mac.type != e1000_82571)
1528 return 0; 1519 return false;
1529 1520
1530 return hw->dev_spec.e82571.laa_is_present; 1521 return hw->dev_spec.e82571.laa_is_present;
1531} 1522}
@@ -1616,44 +1607,42 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1616 **/ 1607 **/
1617static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) 1608static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
1618{ 1609{
1619 u32 temp;
1620
1621 e1000e_clear_hw_cntrs_base(hw); 1610 e1000e_clear_hw_cntrs_base(hw);
1622 1611
1623 temp = er32(PRC64); 1612 er32(PRC64);
1624 temp = er32(PRC127); 1613 er32(PRC127);
1625 temp = er32(PRC255); 1614 er32(PRC255);
1626 temp = er32(PRC511); 1615 er32(PRC511);
1627 temp = er32(PRC1023); 1616 er32(PRC1023);
1628 temp = er32(PRC1522); 1617 er32(PRC1522);
1629 temp = er32(PTC64); 1618 er32(PTC64);
1630 temp = er32(PTC127); 1619 er32(PTC127);
1631 temp = er32(PTC255); 1620 er32(PTC255);
1632 temp = er32(PTC511); 1621 er32(PTC511);
1633 temp = er32(PTC1023); 1622 er32(PTC1023);
1634 temp = er32(PTC1522); 1623 er32(PTC1522);
1635 1624
1636 temp = er32(ALGNERRC); 1625 er32(ALGNERRC);
1637 temp = er32(RXERRC); 1626 er32(RXERRC);
1638 temp = er32(TNCRS); 1627 er32(TNCRS);
1639 temp = er32(CEXTERR); 1628 er32(CEXTERR);
1640 temp = er32(TSCTC); 1629 er32(TSCTC);
1641 temp = er32(TSCTFC); 1630 er32(TSCTFC);
1642 1631
1643 temp = er32(MGTPRC); 1632 er32(MGTPRC);
1644 temp = er32(MGTPDC); 1633 er32(MGTPDC);
1645 temp = er32(MGTPTC); 1634 er32(MGTPTC);
1646 1635
1647 temp = er32(IAC); 1636 er32(IAC);
1648 temp = er32(ICRXOC); 1637 er32(ICRXOC);
1649 1638
1650 temp = er32(ICRXPTC); 1639 er32(ICRXPTC);
1651 temp = er32(ICRXATC); 1640 er32(ICRXATC);
1652 temp = er32(ICTXPTC); 1641 er32(ICTXPTC);
1653 temp = er32(ICTXATC); 1642 er32(ICTXATC);
1654 temp = er32(ICTXQEC); 1643 er32(ICTXQEC);
1655 temp = er32(ICTXQMTC); 1644 er32(ICTXQMTC);
1656 temp = er32(ICRXDMTC); 1645 er32(ICRXDMTC);
1657} 1646}
1658 1647
1659static struct e1000_mac_operations e82571_mac_ops = { 1648static struct e1000_mac_operations e82571_mac_ops = {
@@ -1675,64 +1664,64 @@ static struct e1000_mac_operations e82571_mac_ops = {
1675}; 1664};
1676 1665
1677static struct e1000_phy_operations e82_phy_ops_igp = { 1666static struct e1000_phy_operations e82_phy_ops_igp = {
1678 .acquire_phy = e1000_get_hw_semaphore_82571, 1667 .acquire = e1000_get_hw_semaphore_82571,
1679 .check_reset_block = e1000e_check_reset_block_generic, 1668 .check_reset_block = e1000e_check_reset_block_generic,
1680 .commit_phy = NULL, 1669 .commit = NULL,
1681 .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, 1670 .force_speed_duplex = e1000e_phy_force_speed_duplex_igp,
1682 .get_cfg_done = e1000_get_cfg_done_82571, 1671 .get_cfg_done = e1000_get_cfg_done_82571,
1683 .get_cable_length = e1000e_get_cable_length_igp_2, 1672 .get_cable_length = e1000e_get_cable_length_igp_2,
1684 .get_phy_info = e1000e_get_phy_info_igp, 1673 .get_info = e1000e_get_phy_info_igp,
1685 .read_phy_reg = e1000e_read_phy_reg_igp, 1674 .read_reg = e1000e_read_phy_reg_igp,
1686 .release_phy = e1000_put_hw_semaphore_82571, 1675 .release = e1000_put_hw_semaphore_82571,
1687 .reset_phy = e1000e_phy_hw_reset_generic, 1676 .reset = e1000e_phy_hw_reset_generic,
1688 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, 1677 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1689 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1678 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1690 .write_phy_reg = e1000e_write_phy_reg_igp, 1679 .write_reg = e1000e_write_phy_reg_igp,
1691 .cfg_on_link_up = NULL, 1680 .cfg_on_link_up = NULL,
1692}; 1681};
1693 1682
1694static struct e1000_phy_operations e82_phy_ops_m88 = { 1683static struct e1000_phy_operations e82_phy_ops_m88 = {
1695 .acquire_phy = e1000_get_hw_semaphore_82571, 1684 .acquire = e1000_get_hw_semaphore_82571,
1696 .check_reset_block = e1000e_check_reset_block_generic, 1685 .check_reset_block = e1000e_check_reset_block_generic,
1697 .commit_phy = e1000e_phy_sw_reset, 1686 .commit = e1000e_phy_sw_reset,
1698 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, 1687 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1699 .get_cfg_done = e1000e_get_cfg_done, 1688 .get_cfg_done = e1000e_get_cfg_done,
1700 .get_cable_length = e1000e_get_cable_length_m88, 1689 .get_cable_length = e1000e_get_cable_length_m88,
1701 .get_phy_info = e1000e_get_phy_info_m88, 1690 .get_info = e1000e_get_phy_info_m88,
1702 .read_phy_reg = e1000e_read_phy_reg_m88, 1691 .read_reg = e1000e_read_phy_reg_m88,
1703 .release_phy = e1000_put_hw_semaphore_82571, 1692 .release = e1000_put_hw_semaphore_82571,
1704 .reset_phy = e1000e_phy_hw_reset_generic, 1693 .reset = e1000e_phy_hw_reset_generic,
1705 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, 1694 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1706 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1695 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1707 .write_phy_reg = e1000e_write_phy_reg_m88, 1696 .write_reg = e1000e_write_phy_reg_m88,
1708 .cfg_on_link_up = NULL, 1697 .cfg_on_link_up = NULL,
1709}; 1698};
1710 1699
1711static struct e1000_phy_operations e82_phy_ops_bm = { 1700static struct e1000_phy_operations e82_phy_ops_bm = {
1712 .acquire_phy = e1000_get_hw_semaphore_82571, 1701 .acquire = e1000_get_hw_semaphore_82571,
1713 .check_reset_block = e1000e_check_reset_block_generic, 1702 .check_reset_block = e1000e_check_reset_block_generic,
1714 .commit_phy = e1000e_phy_sw_reset, 1703 .commit = e1000e_phy_sw_reset,
1715 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, 1704 .force_speed_duplex = e1000e_phy_force_speed_duplex_m88,
1716 .get_cfg_done = e1000e_get_cfg_done, 1705 .get_cfg_done = e1000e_get_cfg_done,
1717 .get_cable_length = e1000e_get_cable_length_m88, 1706 .get_cable_length = e1000e_get_cable_length_m88,
1718 .get_phy_info = e1000e_get_phy_info_m88, 1707 .get_info = e1000e_get_phy_info_m88,
1719 .read_phy_reg = e1000e_read_phy_reg_bm2, 1708 .read_reg = e1000e_read_phy_reg_bm2,
1720 .release_phy = e1000_put_hw_semaphore_82571, 1709 .release = e1000_put_hw_semaphore_82571,
1721 .reset_phy = e1000e_phy_hw_reset_generic, 1710 .reset = e1000e_phy_hw_reset_generic,
1722 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, 1711 .set_d0_lplu_state = e1000_set_d0_lplu_state_82571,
1723 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1712 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1724 .write_phy_reg = e1000e_write_phy_reg_bm2, 1713 .write_reg = e1000e_write_phy_reg_bm2,
1725 .cfg_on_link_up = NULL, 1714 .cfg_on_link_up = NULL,
1726}; 1715};
1727 1716
1728static struct e1000_nvm_operations e82571_nvm_ops = { 1717static struct e1000_nvm_operations e82571_nvm_ops = {
1729 .acquire_nvm = e1000_acquire_nvm_82571, 1718 .acquire = e1000_acquire_nvm_82571,
1730 .read_nvm = e1000e_read_nvm_eerd, 1719 .read = e1000e_read_nvm_eerd,
1731 .release_nvm = e1000_release_nvm_82571, 1720 .release = e1000_release_nvm_82571,
1732 .update_nvm = e1000_update_nvm_checksum_82571, 1721 .update = e1000_update_nvm_checksum_82571,
1733 .valid_led_default = e1000_valid_led_default_82571, 1722 .valid_led_default = e1000_valid_led_default_82571,
1734 .validate_nvm = e1000_validate_nvm_checksum_82571, 1723 .validate = e1000_validate_nvm_checksum_82571,
1735 .write_nvm = e1000_write_nvm_82571, 1724 .write = e1000_write_nvm_82571,
1736}; 1725};
1737 1726
1738struct e1000_info e1000_82571_info = { 1727struct e1000_info e1000_82571_info = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index 1190167a8b3d..86d2809763c3 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 3e187b0e4203..3102d738cfd1 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h>
39 40
40#include "hw.h" 41#include "hw.h"
41 42
@@ -47,9 +48,9 @@ struct e1000_info;
47 48
48#ifdef DEBUG 49#ifdef DEBUG
49#define e_dbg(format, arg...) \ 50#define e_dbg(format, arg...) \
50 e_printk(KERN_DEBUG , adapter, format, ## arg) 51 e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
51#else 52#else
52#define e_dbg(format, arg...) do { (void)(adapter); } while (0) 53#define e_dbg(format, arg...) do { (void)(hw); } while (0)
53#endif 54#endif
54 55
55#define e_err(format, arg...) \ 56#define e_err(format, arg...) \
@@ -331,7 +332,6 @@ struct e1000_adapter {
331 /* OS defined structs */ 332 /* OS defined structs */
332 struct net_device *netdev; 333 struct net_device *netdev;
333 struct pci_dev *pdev; 334 struct pci_dev *pdev;
334 struct net_device_stats net_stats;
335 335
336 /* structs defined in e1000_hw.h */ 336 /* structs defined in e1000_hw.h */
337 struct e1000_hw hw; 337 struct e1000_hw hw;
@@ -366,6 +366,7 @@ struct e1000_adapter {
366 struct work_struct downshift_task; 366 struct work_struct downshift_task;
367 struct work_struct update_phy_task; 367 struct work_struct update_phy_task;
368 struct work_struct led_blink_task; 368 struct work_struct led_blink_task;
369 struct work_struct print_hang_task;
369}; 370};
370 371
371struct e1000_info { 372struct e1000_info {
@@ -488,6 +489,7 @@ extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
488extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); 489extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
489extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); 490extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
490extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw); 491extern void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw);
492extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
491 493
492extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw); 494extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
493extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); 495extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
@@ -585,7 +587,7 @@ extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
585 587
586static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) 588static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
587{ 589{
588 return hw->phy.ops.reset_phy(hw); 590 return hw->phy.ops.reset(hw);
589} 591}
590 592
591static inline s32 e1000_check_reset_block(struct e1000_hw *hw) 593static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
@@ -595,12 +597,12 @@ static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
595 597
596static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) 598static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
597{ 599{
598 return hw->phy.ops.read_phy_reg(hw, offset, data); 600 return hw->phy.ops.read_reg(hw, offset, data);
599} 601}
600 602
601static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) 603static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
602{ 604{
603 return hw->phy.ops.write_phy_reg(hw, offset, data); 605 return hw->phy.ops.write_reg(hw, offset, data);
604} 606}
605 607
606static inline s32 e1000_get_cable_length(struct e1000_hw *hw) 608static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
@@ -620,27 +622,27 @@ extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
620 622
621static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) 623static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
622{ 624{
623 return hw->nvm.ops.validate_nvm(hw); 625 return hw->nvm.ops.validate(hw);
624} 626}
625 627
626static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) 628static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
627{ 629{
628 return hw->nvm.ops.update_nvm(hw); 630 return hw->nvm.ops.update(hw);
629} 631}
630 632
631static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 633static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
632{ 634{
633 return hw->nvm.ops.read_nvm(hw, offset, words, data); 635 return hw->nvm.ops.read(hw, offset, words, data);
634} 636}
635 637
636static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 638static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
637{ 639{
638 return hw->nvm.ops.write_nvm(hw, offset, words, data); 640 return hw->nvm.ops.write(hw, offset, words, data);
639} 641}
640 642
641static inline s32 e1000_get_phy_info(struct e1000_hw *hw) 643static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
642{ 644{
643 return hw->phy.ops.get_phy_info(hw); 645 return hw->phy.ops.get_info(hw);
644} 646}
645 647
646static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw) 648static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index ae5d73689353..e50579859e06 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -31,11 +31,6 @@
31 * 80003ES2LAN Gigabit Ethernet Controller (Serdes) 31 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
32 */ 32 */
33 33
34#include <linux/netdevice.h>
35#include <linux/ethtool.h>
36#include <linux/delay.h>
37#include <linux/pci.h>
38
39#include "e1000.h" 34#include "e1000.h"
40 35
41#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 36#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00
@@ -121,8 +116,6 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
121/** 116/**
122 * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. 117 * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
123 * @hw: pointer to the HW structure 118 * @hw: pointer to the HW structure
124 *
125 * This is a function pointer entry point called by the api module.
126 **/ 119 **/
127static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) 120static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
128{ 121{
@@ -152,8 +145,6 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
152/** 145/**
153 * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. 146 * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
154 * @hw: pointer to the HW structure 147 * @hw: pointer to the HW structure
155 *
156 * This is a function pointer entry point called by the api module.
157 **/ 148 **/
158static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) 149static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
159{ 150{
@@ -200,8 +191,6 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
200/** 191/**
201 * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. 192 * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
202 * @hw: pointer to the HW structure 193 * @hw: pointer to the HW structure
203 *
204 * This is a function pointer entry point called by the api module.
205 **/ 194 **/
206static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter) 195static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
207{ 196{
@@ -224,7 +213,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
224 /* Set rar entry count */ 213 /* Set rar entry count */
225 mac->rar_entry_count = E1000_RAR_ENTRIES; 214 mac->rar_entry_count = E1000_RAR_ENTRIES;
226 /* Set if manageability features are enabled. */ 215 /* Set if manageability features are enabled. */
227 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0; 216 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
217 ? true : false;
228 218
229 /* check for link */ 219 /* check for link */
230 switch (hw->phy.media_type) { 220 switch (hw->phy.media_type) {
@@ -272,8 +262,7 @@ static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
272 * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY 262 * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
273 * @hw: pointer to the HW structure 263 * @hw: pointer to the HW structure
274 * 264 *
275 * A wrapper to acquire access rights to the correct PHY. This is a 265 * A wrapper to acquire access rights to the correct PHY.
276 * function pointer entry point called by the api module.
277 **/ 266 **/
278static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) 267static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
279{ 268{
@@ -287,8 +276,7 @@ static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
287 * e1000_release_phy_80003es2lan - Release rights to access PHY 276 * e1000_release_phy_80003es2lan - Release rights to access PHY
288 * @hw: pointer to the HW structure 277 * @hw: pointer to the HW structure
289 * 278 *
290 * A wrapper to release access rights to the correct PHY. This is a 279 * A wrapper to release access rights to the correct PHY.
291 * function pointer entry point called by the api module.
292 **/ 280 **/
293static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) 281static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
294{ 282{
@@ -333,8 +321,7 @@ static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
333 * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM 321 * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
334 * @hw: pointer to the HW structure 322 * @hw: pointer to the HW structure
335 * 323 *
336 * Acquire the semaphore to access the EEPROM. This is a function 324 * Acquire the semaphore to access the EEPROM.
337 * pointer entry point called by the api module.
338 **/ 325 **/
339static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) 326static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
340{ 327{
@@ -356,8 +343,7 @@ static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
356 * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM 343 * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
357 * @hw: pointer to the HW structure 344 * @hw: pointer to the HW structure
358 * 345 *
359 * Release the semaphore used to access the EEPROM. This is a 346 * Release the semaphore used to access the EEPROM.
360 * function pointer entry point called by the api module.
361 **/ 347 **/
362static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) 348static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
363{ 349{
@@ -399,8 +385,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
399 } 385 }
400 386
401 if (i == timeout) { 387 if (i == timeout) {
402 hw_dbg(hw, 388 e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
403 "Driver can't access resource, SW_FW_SYNC timeout.\n");
404 return -E1000_ERR_SWFW_SYNC; 389 return -E1000_ERR_SWFW_SYNC;
405 } 390 }
406 391
@@ -440,8 +425,7 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
440 * @offset: offset of the register to read 425 * @offset: offset of the register to read
441 * @data: pointer to the data returned from the operation 426 * @data: pointer to the data returned from the operation
442 * 427 *
443 * Read the GG82563 PHY register. This is a function pointer entry 428 * Read the GG82563 PHY register.
444 * point called by the api module.
445 **/ 429 **/
446static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, 430static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
447 u32 offset, u16 *data) 431 u32 offset, u16 *data)
@@ -505,8 +489,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
505 * @offset: offset of the register to read 489 * @offset: offset of the register to read
506 * @data: value to write to the register 490 * @data: value to write to the register
507 * 491 *
508 * Write to the GG82563 PHY register. This is a function pointer entry 492 * Write to the GG82563 PHY register.
509 * point called by the api module.
510 **/ 493 **/
511static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, 494static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
512 u32 offset, u16 data) 495 u32 offset, u16 data)
@@ -571,8 +554,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
571 * @words: number of words to write 554 * @words: number of words to write
572 * @data: buffer of data to write to the NVM 555 * @data: buffer of data to write to the NVM
573 * 556 *
574 * Write "words" of data to the ESB2 NVM. This is a function 557 * Write "words" of data to the ESB2 NVM.
575 * pointer entry point called by the api module.
576 **/ 558 **/
577static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, 559static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
578 u16 words, u16 *data) 560 u16 words, u16 *data)
@@ -602,7 +584,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
602 timeout--; 584 timeout--;
603 } 585 }
604 if (!timeout) { 586 if (!timeout) {
605 hw_dbg(hw, "MNG configuration cycle has not completed.\n"); 587 e_dbg("MNG configuration cycle has not completed.\n");
606 return -E1000_ERR_RESET; 588 return -E1000_ERR_RESET;
607 } 589 }
608 590
@@ -635,7 +617,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
635 if (ret_val) 617 if (ret_val)
636 return ret_val; 618 return ret_val;
637 619
638 hw_dbg(hw, "GG82563 PSCR: %X\n", phy_data); 620 e_dbg("GG82563 PSCR: %X\n", phy_data);
639 621
640 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); 622 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
641 if (ret_val) 623 if (ret_val)
@@ -653,7 +635,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
653 udelay(1); 635 udelay(1);
654 636
655 if (hw->phy.autoneg_wait_to_complete) { 637 if (hw->phy.autoneg_wait_to_complete) {
656 hw_dbg(hw, "Waiting for forced speed/duplex link " 638 e_dbg("Waiting for forced speed/duplex link "
657 "on GG82563 phy.\n"); 639 "on GG82563 phy.\n");
658 640
659 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 641 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
@@ -713,8 +695,7 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
713{ 695{
714 struct e1000_phy_info *phy = &hw->phy; 696 struct e1000_phy_info *phy = &hw->phy;
715 s32 ret_val; 697 s32 ret_val;
716 u16 phy_data; 698 u16 phy_data, index;
717 u16 index;
718 699
719 ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); 700 ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
720 if (ret_val) 701 if (ret_val)
@@ -736,7 +717,6 @@ static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
736 * @duplex: pointer to duplex buffer 717 * @duplex: pointer to duplex buffer
737 * 718 *
738 * Retrieve the current speed and duplex configuration. 719 * Retrieve the current speed and duplex configuration.
739 * This is a function pointer entry point called by the api module.
740 **/ 720 **/
741static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, 721static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
742 u16 *duplex) 722 u16 *duplex)
@@ -762,12 +742,10 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
762 * @hw: pointer to the HW structure 742 * @hw: pointer to the HW structure
763 * 743 *
764 * Perform a global reset to the ESB2 controller. 744 * Perform a global reset to the ESB2 controller.
765 * This is a function pointer entry point called by the api module.
766 **/ 745 **/
767static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) 746static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
768{ 747{
769 u32 ctrl; 748 u32 ctrl, icr;
770 u32 icr;
771 s32 ret_val; 749 s32 ret_val;
772 750
773 /* 751 /*
@@ -776,9 +754,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
776 */ 754 */
777 ret_val = e1000e_disable_pcie_master(hw); 755 ret_val = e1000e_disable_pcie_master(hw);
778 if (ret_val) 756 if (ret_val)
779 hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 757 e_dbg("PCI-E Master disable polling has failed.\n");
780 758
781 hw_dbg(hw, "Masking off all interrupts\n"); 759 e_dbg("Masking off all interrupts\n");
782 ew32(IMC, 0xffffffff); 760 ew32(IMC, 0xffffffff);
783 761
784 ew32(RCTL, 0); 762 ew32(RCTL, 0);
@@ -790,7 +768,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
790 ctrl = er32(CTRL); 768 ctrl = er32(CTRL);
791 769
792 ret_val = e1000_acquire_phy_80003es2lan(hw); 770 ret_val = e1000_acquire_phy_80003es2lan(hw);
793 hw_dbg(hw, "Issuing a global reset to MAC\n"); 771 e_dbg("Issuing a global reset to MAC\n");
794 ew32(CTRL, ctrl | E1000_CTRL_RST); 772 ew32(CTRL, ctrl | E1000_CTRL_RST);
795 e1000_release_phy_80003es2lan(hw); 773 e1000_release_phy_80003es2lan(hw);
796 774
@@ -811,7 +789,6 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
811 * @hw: pointer to the HW structure 789 * @hw: pointer to the HW structure
812 * 790 *
813 * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. 791 * Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
814 * This is a function pointer entry point called by the api module.
815 **/ 792 **/
816static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) 793static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
817{ 794{
@@ -824,20 +801,19 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
824 801
825 /* Initialize identification LED */ 802 /* Initialize identification LED */
826 ret_val = e1000e_id_led_init(hw); 803 ret_val = e1000e_id_led_init(hw);
827 if (ret_val) { 804 if (ret_val)
828 hw_dbg(hw, "Error initializing identification LED\n"); 805 e_dbg("Error initializing identification LED\n");
829 return ret_val; 806 /* This is not fatal and we should not stop init due to this */
830 }
831 807
832 /* Disabling VLAN filtering */ 808 /* Disabling VLAN filtering */
833 hw_dbg(hw, "Initializing the IEEE VLAN\n"); 809 e_dbg("Initializing the IEEE VLAN\n");
834 e1000e_clear_vfta(hw); 810 e1000e_clear_vfta(hw);
835 811
836 /* Setup the receive address. */ 812 /* Setup the receive address. */
837 e1000e_init_rx_addrs(hw, mac->rar_entry_count); 813 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
838 814
839 /* Zero out the Multicast HASH table */ 815 /* Zero out the Multicast HASH table */
840 hw_dbg(hw, "Zeroing the MTA\n"); 816 e_dbg("Zeroing the MTA\n");
841 for (i = 0; i < mac->mta_reg_count; i++) 817 for (i = 0; i < mac->mta_reg_count; i++)
842 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 818 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
843 819
@@ -994,7 +970,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
994 /* SW Reset the PHY so all changes take effect */ 970 /* SW Reset the PHY so all changes take effect */
995 ret_val = e1000e_commit_phy(hw); 971 ret_val = e1000e_commit_phy(hw);
996 if (ret_val) { 972 if (ret_val) {
997 hw_dbg(hw, "Error Resetting the PHY\n"); 973 e_dbg("Error Resetting the PHY\n");
998 return ret_val; 974 return ret_val;
999 } 975 }
1000 976
@@ -1325,44 +1301,42 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1325 **/ 1301 **/
1326static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) 1302static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
1327{ 1303{
1328 u32 temp;
1329
1330 e1000e_clear_hw_cntrs_base(hw); 1304 e1000e_clear_hw_cntrs_base(hw);
1331 1305
1332 temp = er32(PRC64); 1306 er32(PRC64);
1333 temp = er32(PRC127); 1307 er32(PRC127);
1334 temp = er32(PRC255); 1308 er32(PRC255);
1335 temp = er32(PRC511); 1309 er32(PRC511);
1336 temp = er32(PRC1023); 1310 er32(PRC1023);
1337 temp = er32(PRC1522); 1311 er32(PRC1522);
1338 temp = er32(PTC64); 1312 er32(PTC64);
1339 temp = er32(PTC127); 1313 er32(PTC127);
1340 temp = er32(PTC255); 1314 er32(PTC255);
1341 temp = er32(PTC511); 1315 er32(PTC511);
1342 temp = er32(PTC1023); 1316 er32(PTC1023);
1343 temp = er32(PTC1522); 1317 er32(PTC1522);
1344 1318
1345 temp = er32(ALGNERRC); 1319 er32(ALGNERRC);
1346 temp = er32(RXERRC); 1320 er32(RXERRC);
1347 temp = er32(TNCRS); 1321 er32(TNCRS);
1348 temp = er32(CEXTERR); 1322 er32(CEXTERR);
1349 temp = er32(TSCTC); 1323 er32(TSCTC);
1350 temp = er32(TSCTFC); 1324 er32(TSCTFC);
1351 1325
1352 temp = er32(MGTPRC); 1326 er32(MGTPRC);
1353 temp = er32(MGTPDC); 1327 er32(MGTPDC);
1354 temp = er32(MGTPTC); 1328 er32(MGTPTC);
1355 1329
1356 temp = er32(IAC); 1330 er32(IAC);
1357 temp = er32(ICRXOC); 1331 er32(ICRXOC);
1358 1332
1359 temp = er32(ICRXPTC); 1333 er32(ICRXPTC);
1360 temp = er32(ICRXATC); 1334 er32(ICRXATC);
1361 temp = er32(ICTXPTC); 1335 er32(ICTXPTC);
1362 temp = er32(ICTXATC); 1336 er32(ICTXATC);
1363 temp = er32(ICTXQEC); 1337 er32(ICTXQEC);
1364 temp = er32(ICTXQMTC); 1338 er32(ICTXQMTC);
1365 temp = er32(ICRXDMTC); 1339 er32(ICRXDMTC);
1366} 1340}
1367 1341
1368static struct e1000_mac_operations es2_mac_ops = { 1342static struct e1000_mac_operations es2_mac_ops = {
@@ -1384,30 +1358,30 @@ static struct e1000_mac_operations es2_mac_ops = {
1384}; 1358};
1385 1359
1386static struct e1000_phy_operations es2_phy_ops = { 1360static struct e1000_phy_operations es2_phy_ops = {
1387 .acquire_phy = e1000_acquire_phy_80003es2lan, 1361 .acquire = e1000_acquire_phy_80003es2lan,
1388 .check_reset_block = e1000e_check_reset_block_generic, 1362 .check_reset_block = e1000e_check_reset_block_generic,
1389 .commit_phy = e1000e_phy_sw_reset, 1363 .commit = e1000e_phy_sw_reset,
1390 .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, 1364 .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan,
1391 .get_cfg_done = e1000_get_cfg_done_80003es2lan, 1365 .get_cfg_done = e1000_get_cfg_done_80003es2lan,
1392 .get_cable_length = e1000_get_cable_length_80003es2lan, 1366 .get_cable_length = e1000_get_cable_length_80003es2lan,
1393 .get_phy_info = e1000e_get_phy_info_m88, 1367 .get_info = e1000e_get_phy_info_m88,
1394 .read_phy_reg = e1000_read_phy_reg_gg82563_80003es2lan, 1368 .read_reg = e1000_read_phy_reg_gg82563_80003es2lan,
1395 .release_phy = e1000_release_phy_80003es2lan, 1369 .release = e1000_release_phy_80003es2lan,
1396 .reset_phy = e1000e_phy_hw_reset_generic, 1370 .reset = e1000e_phy_hw_reset_generic,
1397 .set_d0_lplu_state = NULL, 1371 .set_d0_lplu_state = NULL,
1398 .set_d3_lplu_state = e1000e_set_d3_lplu_state, 1372 .set_d3_lplu_state = e1000e_set_d3_lplu_state,
1399 .write_phy_reg = e1000_write_phy_reg_gg82563_80003es2lan, 1373 .write_reg = e1000_write_phy_reg_gg82563_80003es2lan,
1400 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, 1374 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
1401}; 1375};
1402 1376
1403static struct e1000_nvm_operations es2_nvm_ops = { 1377static struct e1000_nvm_operations es2_nvm_ops = {
1404 .acquire_nvm = e1000_acquire_nvm_80003es2lan, 1378 .acquire = e1000_acquire_nvm_80003es2lan,
1405 .read_nvm = e1000e_read_nvm_eerd, 1379 .read = e1000e_read_nvm_eerd,
1406 .release_nvm = e1000_release_nvm_80003es2lan, 1380 .release = e1000_release_nvm_80003es2lan,
1407 .update_nvm = e1000e_update_nvm_checksum_generic, 1381 .update = e1000e_update_nvm_checksum_generic,
1408 .valid_led_default = e1000e_valid_led_default, 1382 .valid_led_default = e1000e_valid_led_default,
1409 .validate_nvm = e1000e_validate_nvm_checksum_generic, 1383 .validate = e1000e_validate_nvm_checksum_generic,
1410 .write_nvm = e1000_write_nvm_80003es2lan, 1384 .write = e1000_write_nvm_80003es2lan,
1411}; 1385};
1412 1386
1413struct e1000_info e1000_es2_info = { 1387struct e1000_info e1000_es2_info = {
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index e82638ecae88..b6243cad3103 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -35,14 +35,22 @@
35 35
36#include "e1000.h" 36#include "e1000.h"
37 37
38enum {NETDEV_STATS, E1000_STATS};
39
38struct e1000_stats { 40struct e1000_stats {
39 char stat_string[ETH_GSTRING_LEN]; 41 char stat_string[ETH_GSTRING_LEN];
42 int type;
40 int sizeof_stat; 43 int sizeof_stat;
41 int stat_offset; 44 int stat_offset;
42}; 45};
43 46
44#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ 47#define E1000_STAT(m) E1000_STATS, \
45 offsetof(struct e1000_adapter, m) 48 sizeof(((struct e1000_adapter *)0)->m), \
49 offsetof(struct e1000_adapter, m)
50#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
51 sizeof(((struct net_device *)0)->m), \
52 offsetof(struct net_device, m)
53
46static const struct e1000_stats e1000_gstrings_stats[] = { 54static const struct e1000_stats e1000_gstrings_stats[] = {
47 { "rx_packets", E1000_STAT(stats.gprc) }, 55 { "rx_packets", E1000_STAT(stats.gprc) },
48 { "tx_packets", E1000_STAT(stats.gptc) }, 56 { "tx_packets", E1000_STAT(stats.gptc) },
@@ -52,21 +60,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
52 { "tx_broadcast", E1000_STAT(stats.bptc) }, 60 { "tx_broadcast", E1000_STAT(stats.bptc) },
53 { "rx_multicast", E1000_STAT(stats.mprc) }, 61 { "rx_multicast", E1000_STAT(stats.mprc) },
54 { "tx_multicast", E1000_STAT(stats.mptc) }, 62 { "tx_multicast", E1000_STAT(stats.mptc) },
55 { "rx_errors", E1000_STAT(net_stats.rx_errors) }, 63 { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) },
56 { "tx_errors", E1000_STAT(net_stats.tx_errors) }, 64 { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) },
57 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, 65 { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
58 { "multicast", E1000_STAT(stats.mprc) }, 66 { "multicast", E1000_STAT(stats.mprc) },
59 { "collisions", E1000_STAT(stats.colc) }, 67 { "collisions", E1000_STAT(stats.colc) },
60 { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, 68 { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) },
61 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, 69 { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
62 { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, 70 { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
63 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 71 { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
64 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 72 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
65 { "rx_missed_errors", E1000_STAT(stats.mpc) }, 73 { "rx_missed_errors", E1000_STAT(stats.mpc) },
66 { "tx_aborted_errors", E1000_STAT(stats.ecol) }, 74 { "tx_aborted_errors", E1000_STAT(stats.ecol) },
67 { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, 75 { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
68 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, 76 { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
69 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, 77 { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
70 { "tx_window_errors", E1000_STAT(stats.latecol) }, 78 { "tx_window_errors", E1000_STAT(stats.latecol) },
71 { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, 79 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
72 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 80 { "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -182,6 +190,17 @@ static int e1000_get_settings(struct net_device *netdev,
182static u32 e1000_get_link(struct net_device *netdev) 190static u32 e1000_get_link(struct net_device *netdev)
183{ 191{
184 struct e1000_adapter *adapter = netdev_priv(netdev); 192 struct e1000_adapter *adapter = netdev_priv(netdev);
193 struct e1000_mac_info *mac = &adapter->hw.mac;
194
195 /*
196 * If the link is not reported up to netdev, interrupts are disabled,
197 * and so the physical link state may have changed since we last
198 * looked. Set get_link_status to make sure that the true link
199 * state is interrogated, rather than pulling a cached and possibly
200 * stale link state from the driver.
201 */
202 if (!netif_carrier_ok(netdev))
203 mac->get_link_status = 1;
185 204
186 return e1000_has_link(adapter); 205 return e1000_has_link(adapter);
187} 206}
@@ -596,7 +615,9 @@ static int e1000_set_eeprom(struct net_device *netdev,
596 * and flush shadow RAM for applicable controllers 615 * and flush shadow RAM for applicable controllers
597 */ 616 */
598 if ((first_word <= NVM_CHECKSUM_REG) || 617 if ((first_word <= NVM_CHECKSUM_REG) ||
599 (hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82573)) 618 (hw->mac.type == e1000_82583) ||
619 (hw->mac.type == e1000_82574) ||
620 (hw->mac.type == e1000_82573))
600 ret_val = e1000e_update_nvm_checksum(hw); 621 ret_val = e1000e_update_nvm_checksum(hw);
601 622
602out: 623out:
@@ -929,10 +950,10 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
929 e1000e_set_interrupt_capability(adapter); 950 e1000e_set_interrupt_capability(adapter);
930 } 951 }
931 /* Hook up test interrupt handler just for this test */ 952 /* Hook up test interrupt handler just for this test */
932 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 953 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
933 netdev)) { 954 netdev)) {
934 shared_int = 0; 955 shared_int = 0;
935 } else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 956 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
936 netdev->name, netdev)) { 957 netdev->name, netdev)) {
937 *data = 1; 958 *data = 1;
938 ret_val = -1; 959 ret_val = -1;
@@ -1239,6 +1260,10 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1239 1260
1240 hw->mac.autoneg = 0; 1261 hw->mac.autoneg = 0;
1241 1262
1263 /* Workaround: K1 must be disabled for stable 1Gbps operation */
1264 if (hw->mac.type == e1000_pchlan)
1265 e1000_configure_k1_ich8lan(hw, false);
1266
1242 if (hw->phy.type == e1000_phy_m88) { 1267 if (hw->phy.type == e1000_phy_m88) {
1243 /* Auto-MDI/MDIX Off */ 1268 /* Auto-MDI/MDIX Off */
1244 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1269 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
@@ -1769,12 +1794,11 @@ static int e1000_set_wol(struct net_device *netdev,
1769{ 1794{
1770 struct e1000_adapter *adapter = netdev_priv(netdev); 1795 struct e1000_adapter *adapter = netdev_priv(netdev);
1771 1796
1772 if (wol->wolopts & WAKE_MAGICSECURE)
1773 return -EOPNOTSUPP;
1774
1775 if (!(adapter->flags & FLAG_HAS_WOL) || 1797 if (!(adapter->flags & FLAG_HAS_WOL) ||
1776 !device_can_wakeup(&adapter->pdev->dev)) 1798 !device_can_wakeup(&adapter->pdev->dev) ||
1777 return wol->wolopts ? -EOPNOTSUPP : 0; 1799 (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1800 WAKE_MAGIC | WAKE_PHY | WAKE_ARP)))
1801 return -EOPNOTSUPP;
1778 1802
1779 /* these settings will always override what we currently have */ 1803 /* these settings will always override what we currently have */
1780 adapter->wol = 0; 1804 adapter->wol = 0;
@@ -1832,6 +1856,7 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1832 1856
1833 if ((hw->phy.type == e1000_phy_ife) || 1857 if ((hw->phy.type == e1000_phy_ife) ||
1834 (hw->mac.type == e1000_pchlan) || 1858 (hw->mac.type == e1000_pchlan) ||
1859 (hw->mac.type == e1000_82583) ||
1835 (hw->mac.type == e1000_82574)) { 1860 (hw->mac.type == e1000_82574)) {
1836 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task); 1861 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
1837 if (!adapter->blink_timer.function) { 1862 if (!adapter->blink_timer.function) {
@@ -1912,10 +1937,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1912{ 1937{
1913 struct e1000_adapter *adapter = netdev_priv(netdev); 1938 struct e1000_adapter *adapter = netdev_priv(netdev);
1914 int i; 1939 int i;
1940 char *p = NULL;
1915 1941
1916 e1000e_update_stats(adapter); 1942 e1000e_update_stats(adapter);
1917 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1943 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1918 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1944 switch (e1000_gstrings_stats[i].type) {
1945 case NETDEV_STATS:
1946 p = (char *) netdev +
1947 e1000_gstrings_stats[i].stat_offset;
1948 break;
1949 case E1000_STATS:
1950 p = (char *) adapter +
1951 e1000_gstrings_stats[i].stat_offset;
1952 break;
1953 }
1954
1919 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1955 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1920 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1956 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1921 } 1957 }
@@ -1975,6 +2011,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
1975 .get_sset_count = e1000e_get_sset_count, 2011 .get_sset_count = e1000e_get_sset_count,
1976 .get_coalesce = e1000_get_coalesce, 2012 .get_coalesce = e1000_get_coalesce,
1977 .set_coalesce = e1000_set_coalesce, 2013 .set_coalesce = e1000_set_coalesce,
2014 .get_flags = ethtool_op_get_flags,
2015 .set_flags = ethtool_op_set_flags,
1978}; 2016};
1979 2017
1980void e1000e_set_ethtool_ops(struct net_device *netdev) 2018void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index aaea41ef794d..426155c15cef 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -755,34 +755,34 @@ struct e1000_mac_operations {
755 755
756/* Function pointers for the PHY. */ 756/* Function pointers for the PHY. */
757struct e1000_phy_operations { 757struct e1000_phy_operations {
758 s32 (*acquire_phy)(struct e1000_hw *); 758 s32 (*acquire)(struct e1000_hw *);
759 s32 (*cfg_on_link_up)(struct e1000_hw *);
759 s32 (*check_polarity)(struct e1000_hw *); 760 s32 (*check_polarity)(struct e1000_hw *);
760 s32 (*check_reset_block)(struct e1000_hw *); 761 s32 (*check_reset_block)(struct e1000_hw *);
761 s32 (*commit_phy)(struct e1000_hw *); 762 s32 (*commit)(struct e1000_hw *);
762 s32 (*force_speed_duplex)(struct e1000_hw *); 763 s32 (*force_speed_duplex)(struct e1000_hw *);
763 s32 (*get_cfg_done)(struct e1000_hw *hw); 764 s32 (*get_cfg_done)(struct e1000_hw *hw);
764 s32 (*get_cable_length)(struct e1000_hw *); 765 s32 (*get_cable_length)(struct e1000_hw *);
765 s32 (*get_phy_info)(struct e1000_hw *); 766 s32 (*get_info)(struct e1000_hw *);
766 s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); 767 s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
767 s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *); 768 s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
768 void (*release_phy)(struct e1000_hw *); 769 void (*release)(struct e1000_hw *);
769 s32 (*reset_phy)(struct e1000_hw *); 770 s32 (*reset)(struct e1000_hw *);
770 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); 771 s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
771 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); 772 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
772 s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); 773 s32 (*write_reg)(struct e1000_hw *, u32, u16);
773 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); 774 s32 (*write_reg_locked)(struct e1000_hw *, u32, u16);
774 s32 (*cfg_on_link_up)(struct e1000_hw *);
775}; 775};
776 776
777/* Function pointers for the NVM. */ 777/* Function pointers for the NVM. */
778struct e1000_nvm_operations { 778struct e1000_nvm_operations {
779 s32 (*acquire_nvm)(struct e1000_hw *); 779 s32 (*acquire)(struct e1000_hw *);
780 s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *); 780 s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
781 void (*release_nvm)(struct e1000_hw *); 781 void (*release)(struct e1000_hw *);
782 s32 (*update_nvm)(struct e1000_hw *); 782 s32 (*update)(struct e1000_hw *);
783 s32 (*valid_led_default)(struct e1000_hw *, u16 *); 783 s32 (*valid_led_default)(struct e1000_hw *, u16 *);
784 s32 (*validate_nvm)(struct e1000_hw *); 784 s32 (*validate)(struct e1000_hw *);
785 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *); 785 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
786}; 786};
787 787
788struct e1000_mac_info { 788struct e1000_mac_info {
@@ -925,15 +925,4 @@ struct e1000_hw {
925 } dev_spec; 925 } dev_spec;
926}; 926};
927 927
928#ifdef DEBUG
929#define hw_dbg(hw, format, arg...) \
930 printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg)
931#else
932static inline int __attribute__ ((format (printf, 2, 3)))
933hw_dbg(struct e1000_hw *hw, const char *format, ...)
934{
935 return 0;
936}
937#endif
938
939#endif 928#endif
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index eff3f4783655..7530fc5d81c3 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -54,11 +54,6 @@
54 * 82578DC Gigabit Network Connection 54 * 82578DC Gigabit Network Connection
55 */ 55 */
56 56
57#include <linux/netdevice.h>
58#include <linux/ethtool.h>
59#include <linux/delay.h>
60#include <linux/pci.h>
61
62#include "e1000.h" 57#include "e1000.h"
63 58
64#define ICH_FLASH_GFPREG 0x0000 59#define ICH_FLASH_GFPREG 0x0000
@@ -224,7 +219,6 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
224static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); 219static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
225static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); 220static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
226static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); 221static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
227static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
228 222
229static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 223static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
230{ 224{
@@ -266,12 +260,12 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
266 phy->reset_delay_us = 100; 260 phy->reset_delay_us = 100;
267 261
268 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; 262 phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
269 phy->ops.read_phy_reg = e1000_read_phy_reg_hv; 263 phy->ops.read_reg = e1000_read_phy_reg_hv;
270 phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked; 264 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
271 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; 265 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
272 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; 266 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
273 phy->ops.write_phy_reg = e1000_write_phy_reg_hv; 267 phy->ops.write_reg = e1000_write_phy_reg_hv;
274 phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked; 268 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 269 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
276 270
277 phy->id = e1000_phy_unknown; 271 phy->id = e1000_phy_unknown;
@@ -283,8 +277,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
283 phy->ops.force_speed_duplex = 277 phy->ops.force_speed_duplex =
284 e1000_phy_force_speed_duplex_82577; 278 e1000_phy_force_speed_duplex_82577;
285 phy->ops.get_cable_length = e1000_get_cable_length_82577; 279 phy->ops.get_cable_length = e1000_get_cable_length_82577;
286 phy->ops.get_phy_info = e1000_get_phy_info_82577; 280 phy->ops.get_info = e1000_get_phy_info_82577;
287 phy->ops.commit_phy = e1000e_phy_sw_reset; 281 phy->ops.commit = e1000e_phy_sw_reset;
288 } 282 }
289 283
290 return ret_val; 284 return ret_val;
@@ -311,8 +305,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
311 */ 305 */
312 ret_val = e1000e_determine_phy_address(hw); 306 ret_val = e1000e_determine_phy_address(hw);
313 if (ret_val) { 307 if (ret_val) {
314 hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; 308 phy->ops.write_reg = e1000e_write_phy_reg_bm;
315 hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; 309 phy->ops.read_reg = e1000e_read_phy_reg_bm;
316 ret_val = e1000e_determine_phy_address(hw); 310 ret_val = e1000e_determine_phy_address(hw);
317 if (ret_val) 311 if (ret_val)
318 return ret_val; 312 return ret_val;
@@ -332,8 +326,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
332 case IGP03E1000_E_PHY_ID: 326 case IGP03E1000_E_PHY_ID:
333 phy->type = e1000_phy_igp_3; 327 phy->type = e1000_phy_igp_3;
334 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 328 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
335 phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked; 329 phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
336 phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked; 330 phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
337 break; 331 break;
338 case IFE_E_PHY_ID: 332 case IFE_E_PHY_ID:
339 case IFE_PLUS_E_PHY_ID: 333 case IFE_PLUS_E_PHY_ID:
@@ -344,9 +338,9 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
344 case BME1000_E_PHY_ID: 338 case BME1000_E_PHY_ID:
345 phy->type = e1000_phy_bm; 339 phy->type = e1000_phy_bm;
346 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 340 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
347 hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm; 341 phy->ops.read_reg = e1000e_read_phy_reg_bm;
348 hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm; 342 phy->ops.write_reg = e1000e_write_phy_reg_bm;
349 hw->phy.ops.commit_phy = e1000e_phy_sw_reset; 343 phy->ops.commit = e1000e_phy_sw_reset;
350 break; 344 break;
351 default: 345 default:
352 return -E1000_ERR_PHY; 346 return -E1000_ERR_PHY;
@@ -374,7 +368,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
374 368
375 /* Can't read flash registers if the register set isn't mapped. */ 369 /* Can't read flash registers if the register set isn't mapped. */
376 if (!hw->flash_address) { 370 if (!hw->flash_address) {
377 hw_dbg(hw, "ERROR: Flash registers not mapped\n"); 371 e_dbg("ERROR: Flash registers not mapped\n");
378 return -E1000_ERR_CONFIG; 372 return -E1000_ERR_CONFIG;
379 } 373 }
380 374
@@ -407,7 +401,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
407 401
408 /* Clear shadow ram */ 402 /* Clear shadow ram */
409 for (i = 0; i < nvm->word_size; i++) { 403 for (i = 0; i < nvm->word_size; i++) {
410 dev_spec->shadow_ram[i].modified = 0; 404 dev_spec->shadow_ram[i].modified = false;
411 dev_spec->shadow_ram[i].value = 0xFFFF; 405 dev_spec->shadow_ram[i].value = 0xFFFF;
412 } 406 }
413 407
@@ -436,7 +430,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
436 if (mac->type == e1000_ich8lan) 430 if (mac->type == e1000_ich8lan)
437 mac->rar_entry_count--; 431 mac->rar_entry_count--;
438 /* Set if manageability features are enabled. */ 432 /* Set if manageability features are enabled. */
439 mac->arc_subsystem_valid = 1; 433 mac->arc_subsystem_valid = true;
440 434
441 /* LED operations */ 435 /* LED operations */
442 switch (mac->type) { 436 switch (mac->type) {
@@ -470,7 +464,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
470 464
471 /* Enable PCS Lock-loss workaround for ICH8 */ 465 /* Enable PCS Lock-loss workaround for ICH8 */
472 if (mac->type == e1000_ich8lan) 466 if (mac->type == e1000_ich8lan)
473 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, 1); 467 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
474 468
475 return 0; 469 return 0;
476} 470}
@@ -556,7 +550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
556 */ 550 */
557 ret_val = e1000e_config_fc_after_link_up(hw); 551 ret_val = e1000e_config_fc_after_link_up(hw);
558 if (ret_val) 552 if (ret_val)
559 hw_dbg(hw, "Error configuring flow control\n"); 553 e_dbg("Error configuring flow control\n");
560 554
561out: 555out:
562 return ret_val; 556 return ret_val;
@@ -636,8 +630,6 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
636 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; 630 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
637 s32 ret_val = 0; 631 s32 ret_val = 0;
638 632
639 might_sleep();
640
641 mutex_lock(&swflag_mutex); 633 mutex_lock(&swflag_mutex);
642 634
643 while (timeout) { 635 while (timeout) {
@@ -650,7 +642,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
650 } 642 }
651 643
652 if (!timeout) { 644 if (!timeout) {
653 hw_dbg(hw, "SW/FW/HW has locked the resource for too long.\n"); 645 e_dbg("SW/FW/HW has locked the resource for too long.\n");
654 ret_val = -E1000_ERR_CONFIG; 646 ret_val = -E1000_ERR_CONFIG;
655 goto out; 647 goto out;
656 } 648 }
@@ -670,7 +662,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
670 } 662 }
671 663
672 if (!timeout) { 664 if (!timeout) {
673 hw_dbg(hw, "Failed to acquire the semaphore.\n"); 665 e_dbg("Failed to acquire the semaphore.\n");
674 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; 666 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
675 ew32(EXTCNF_CTRL, extcnf_ctrl); 667 ew32(EXTCNF_CTRL, extcnf_ctrl);
676 ret_val = -E1000_ERR_CONFIG; 668 ret_val = -E1000_ERR_CONFIG;
@@ -714,7 +706,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
714 **/ 706 **/
715static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) 707static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
716{ 708{
717 u32 fwsm = er32(FWSM); 709 u32 fwsm;
710
711 fwsm = er32(FWSM);
718 712
719 return (fwsm & E1000_FWSM_MODE_MASK) == 713 return (fwsm & E1000_FWSM_MODE_MASK) ==
720 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); 714 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
@@ -779,12 +773,12 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
779 if (ret_val) 773 if (ret_val)
780 return ret_val; 774 return ret_val;
781 775
782 hw_dbg(hw, "IFE PMC: %X\n", data); 776 e_dbg("IFE PMC: %X\n", data);
783 777
784 udelay(1); 778 udelay(1);
785 779
786 if (phy->autoneg_wait_to_complete) { 780 if (phy->autoneg_wait_to_complete) {
787 hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); 781 e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
788 782
789 ret_val = e1000e_phy_has_link_generic(hw, 783 ret_val = e1000e_phy_has_link_generic(hw,
790 PHY_FORCE_LIMIT, 784 PHY_FORCE_LIMIT,
@@ -794,7 +788,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
794 return ret_val; 788 return ret_val;
795 789
796 if (!link) 790 if (!link)
797 hw_dbg(hw, "Link taking longer than expected.\n"); 791 e_dbg("Link taking longer than expected.\n");
798 792
799 /* Try once more */ 793 /* Try once more */
800 ret_val = e1000e_phy_has_link_generic(hw, 794 ret_val = e1000e_phy_has_link_generic(hw,
@@ -822,7 +816,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
822 s32 ret_val; 816 s32 ret_val;
823 u16 word_addr, reg_data, reg_addr, phy_page = 0; 817 u16 word_addr, reg_data, reg_addr, phy_page = 0;
824 818
825 ret_val = hw->phy.ops.acquire_phy(hw); 819 ret_val = hw->phy.ops.acquire(hw);
826 if (ret_val) 820 if (ret_val)
827 return ret_val; 821 return ret_val;
828 822
@@ -918,7 +912,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
918 reg_addr &= PHY_REG_MASK; 912 reg_addr &= PHY_REG_MASK;
919 reg_addr |= phy_page; 913 reg_addr |= phy_page;
920 914
921 ret_val = phy->ops.write_phy_reg_locked(hw, 915 ret_val = phy->ops.write_reg_locked(hw,
922 (u32)reg_addr, 916 (u32)reg_addr,
923 reg_data); 917 reg_data);
924 if (ret_val) 918 if (ret_val)
@@ -927,7 +921,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
927 } 921 }
928 922
929out: 923out:
930 hw->phy.ops.release_phy(hw); 924 hw->phy.ops.release(hw);
931 return ret_val; 925 return ret_val;
932} 926}
933 927
@@ -951,15 +945,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
951 goto out; 945 goto out;
952 946
953 /* Wrap the whole flow with the sw flag */ 947 /* Wrap the whole flow with the sw flag */
954 ret_val = hw->phy.ops.acquire_phy(hw); 948 ret_val = hw->phy.ops.acquire(hw);
955 if (ret_val) 949 if (ret_val)
956 goto out; 950 goto out;
957 951
958 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 952 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
959 if (link) { 953 if (link) {
960 if (hw->phy.type == e1000_phy_82578) { 954 if (hw->phy.type == e1000_phy_82578) {
961 ret_val = hw->phy.ops.read_phy_reg_locked(hw, 955 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
962 BM_CS_STATUS,
963 &status_reg); 956 &status_reg);
964 if (ret_val) 957 if (ret_val)
965 goto release; 958 goto release;
@@ -975,8 +968,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
975 } 968 }
976 969
977 if (hw->phy.type == e1000_phy_82577) { 970 if (hw->phy.type == e1000_phy_82577) {
978 ret_val = hw->phy.ops.read_phy_reg_locked(hw, 971 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
979 HV_M_STATUS,
980 &status_reg); 972 &status_reg);
981 if (ret_val) 973 if (ret_val)
982 goto release; 974 goto release;
@@ -992,14 +984,14 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
992 } 984 }
993 985
994 /* Link stall fix for link up */ 986 /* Link stall fix for link up */
995 ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), 987 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
996 0x0100); 988 0x0100);
997 if (ret_val) 989 if (ret_val)
998 goto release; 990 goto release;
999 991
1000 } else { 992 } else {
1001 /* Link stall fix for link down */ 993 /* Link stall fix for link down */
1002 ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), 994 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
1003 0x4100); 995 0x4100);
1004 if (ret_val) 996 if (ret_val)
1005 goto release; 997 goto release;
@@ -1008,7 +1000,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1008 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); 1000 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
1009 1001
1010release: 1002release:
1011 hw->phy.ops.release_phy(hw); 1003 hw->phy.ops.release(hw);
1012out: 1004out:
1013 return ret_val; 1005 return ret_val;
1014} 1006}
@@ -1023,7 +1015,7 @@ out:
1023 * 1015 *
1024 * Success returns 0, Failure returns -E1000_ERR_PHY (-2) 1016 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1025 **/ 1017 **/
1026static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) 1018s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1027{ 1019{
1028 s32 ret_val = 0; 1020 s32 ret_val = 0;
1029 u32 ctrl_reg = 0; 1021 u32 ctrl_reg = 0;
@@ -1084,7 +1076,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1084 if (hw->mac.type != e1000_pchlan) 1076 if (hw->mac.type != e1000_pchlan)
1085 return ret_val; 1077 return ret_val;
1086 1078
1087 ret_val = hw->phy.ops.acquire_phy(hw); 1079 ret_val = hw->phy.ops.acquire(hw);
1088 if (ret_val) 1080 if (ret_val)
1089 return ret_val; 1081 return ret_val;
1090 1082
@@ -1098,7 +1090,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1098 1090
1099 mac_reg = er32(PHY_CTRL); 1091 mac_reg = er32(PHY_CTRL);
1100 1092
1101 ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg); 1093 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
1102 if (ret_val) 1094 if (ret_val)
1103 goto out; 1095 goto out;
1104 1096
@@ -1120,10 +1112,10 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1120 /* Restart auto-neg to activate the bits */ 1112 /* Restart auto-neg to activate the bits */
1121 if (!e1000_check_reset_block(hw)) 1113 if (!e1000_check_reset_block(hw))
1122 oem_reg |= HV_OEM_BITS_RESTART_AN; 1114 oem_reg |= HV_OEM_BITS_RESTART_AN;
1123 ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); 1115 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
1124 1116
1125out: 1117out:
1126 hw->phy.ops.release_phy(hw); 1118 hw->phy.ops.release(hw);
1127 1119
1128 return ret_val; 1120 return ret_val;
1129} 1121}
@@ -1166,7 +1158,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1166 } 1158 }
1167 1159
1168 /* Select page 0 */ 1160 /* Select page 0 */
1169 ret_val = hw->phy.ops.acquire_phy(hw); 1161 ret_val = hw->phy.ops.acquire(hw);
1170 if (ret_val) 1162 if (ret_val)
1171 return ret_val; 1163 return ret_val;
1172 1164
@@ -1174,7 +1166,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1174 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); 1166 ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
1175 if (ret_val) 1167 if (ret_val)
1176 goto out; 1168 goto out;
1177 hw->phy.ops.release_phy(hw); 1169 hw->phy.ops.release(hw);
1178 1170
1179 /* 1171 /*
1180 * Configure the K1 Si workaround during phy reset assuming there is 1172 * Configure the K1 Si workaround during phy reset assuming there is
@@ -1210,7 +1202,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1210 * leave the PHY in a bad state possibly resulting in no link. 1202 * leave the PHY in a bad state possibly resulting in no link.
1211 */ 1203 */
1212 if (loop == 0) 1204 if (loop == 0)
1213 hw_dbg(hw, "LAN_INIT_DONE not set, increase timeout\n"); 1205 e_dbg("LAN_INIT_DONE not set, increase timeout\n");
1214 1206
1215 /* Clear the Init Done bit for the next init event */ 1207 /* Clear the Init Done bit for the next init event */
1216 data = er32(STATUS); 1208 data = er32(STATUS);
@@ -1281,7 +1273,7 @@ static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
1281 return ret_val; 1273 return ret_val;
1282 1274
1283 if (!link) { 1275 if (!link) {
1284 hw_dbg(hw, "Phy info is only valid if link is up\n"); 1276 e_dbg("Phy info is only valid if link is up\n");
1285 return -E1000_ERR_CONFIG; 1277 return -E1000_ERR_CONFIG;
1286 } 1278 }
1287 1279
@@ -1412,7 +1404,7 @@ out:
1412/** 1404/**
1413 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state 1405 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
1414 * @hw: pointer to the HW structure 1406 * @hw: pointer to the HW structure
1415 * @active: TRUE to enable LPLU, FALSE to disable 1407 * @active: true to enable LPLU, false to disable
1416 * 1408 *
1417 * Sets the LPLU D0 state according to the active flag. When 1409 * Sets the LPLU D0 state according to the active flag. When
1418 * activating LPLU this function also disables smart speed 1410 * activating LPLU this function also disables smart speed
@@ -1498,7 +1490,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
1498/** 1490/**
1499 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state 1491 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
1500 * @hw: pointer to the HW structure 1492 * @hw: pointer to the HW structure
1501 * @active: TRUE to enable LPLU, FALSE to disable 1493 * @active: true to enable LPLU, false to disable
1502 * 1494 *
1503 * Sets the LPLU D3 state according to the active flag. When 1495 * Sets the LPLU D3 state according to the active flag. When
1504 * activating LPLU this function also disables smart speed 1496 * activating LPLU this function also disables smart speed
@@ -1611,7 +1603,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
1611 1603
1612 return 0; 1604 return 0;
1613 } 1605 }
1614 hw_dbg(hw, "Unable to determine valid NVM bank via EEC - " 1606 e_dbg("Unable to determine valid NVM bank via EEC - "
1615 "reading flash signature\n"); 1607 "reading flash signature\n");
1616 /* fall-thru */ 1608 /* fall-thru */
1617 default: 1609 default:
@@ -1641,7 +1633,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
1641 return 0; 1633 return 0;
1642 } 1634 }
1643 1635
1644 hw_dbg(hw, "ERROR: No valid NVM bank present\n"); 1636 e_dbg("ERROR: No valid NVM bank present\n");
1645 return -E1000_ERR_NVM; 1637 return -E1000_ERR_NVM;
1646 } 1638 }
1647 1639
@@ -1669,16 +1661,16 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1669 1661
1670 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1662 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1671 (words == 0)) { 1663 (words == 0)) {
1672 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1664 e_dbg("nvm parameter(s) out of bounds\n");
1673 ret_val = -E1000_ERR_NVM; 1665 ret_val = -E1000_ERR_NVM;
1674 goto out; 1666 goto out;
1675 } 1667 }
1676 1668
1677 nvm->ops.acquire_nvm(hw); 1669 nvm->ops.acquire(hw);
1678 1670
1679 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1671 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
1680 if (ret_val) { 1672 if (ret_val) {
1681 hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); 1673 e_dbg("Could not detect valid bank, assuming bank 0\n");
1682 bank = 0; 1674 bank = 0;
1683 } 1675 }
1684 1676
@@ -1700,11 +1692,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1700 } 1692 }
1701 } 1693 }
1702 1694
1703 nvm->ops.release_nvm(hw); 1695 nvm->ops.release(hw);
1704 1696
1705out: 1697out:
1706 if (ret_val) 1698 if (ret_val)
1707 hw_dbg(hw, "NVM read error: %d\n", ret_val); 1699 e_dbg("NVM read error: %d\n", ret_val);
1708 1700
1709 return ret_val; 1701 return ret_val;
1710} 1702}
@@ -1726,7 +1718,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1726 1718
1727 /* Check if the flash descriptor is valid */ 1719 /* Check if the flash descriptor is valid */
1728 if (hsfsts.hsf_status.fldesvalid == 0) { 1720 if (hsfsts.hsf_status.fldesvalid == 0) {
1729 hw_dbg(hw, "Flash descriptor invalid. " 1721 e_dbg("Flash descriptor invalid. "
1730 "SW Sequencing must be used."); 1722 "SW Sequencing must be used.");
1731 return -E1000_ERR_NVM; 1723 return -E1000_ERR_NVM;
1732 } 1724 }
@@ -1776,7 +1768,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1776 hsfsts.hsf_status.flcdone = 1; 1768 hsfsts.hsf_status.flcdone = 1;
1777 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 1769 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1778 } else { 1770 } else {
1779 hw_dbg(hw, "Flash controller busy, cannot get access"); 1771 e_dbg("Flash controller busy, cannot get access");
1780 } 1772 }
1781 } 1773 }
1782 1774
@@ -1926,7 +1918,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1926 /* Repeat for some time before giving up. */ 1918 /* Repeat for some time before giving up. */
1927 continue; 1919 continue;
1928 } else if (hsfsts.hsf_status.flcdone == 0) { 1920 } else if (hsfsts.hsf_status.flcdone == 0) {
1929 hw_dbg(hw, "Timeout error - flash cycle " 1921 e_dbg("Timeout error - flash cycle "
1930 "did not complete."); 1922 "did not complete.");
1931 break; 1923 break;
1932 } 1924 }
@@ -1954,18 +1946,18 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
1954 1946
1955 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || 1947 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
1956 (words == 0)) { 1948 (words == 0)) {
1957 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1949 e_dbg("nvm parameter(s) out of bounds\n");
1958 return -E1000_ERR_NVM; 1950 return -E1000_ERR_NVM;
1959 } 1951 }
1960 1952
1961 nvm->ops.acquire_nvm(hw); 1953 nvm->ops.acquire(hw);
1962 1954
1963 for (i = 0; i < words; i++) { 1955 for (i = 0; i < words; i++) {
1964 dev_spec->shadow_ram[offset+i].modified = 1; 1956 dev_spec->shadow_ram[offset+i].modified = true;
1965 dev_spec->shadow_ram[offset+i].value = data[i]; 1957 dev_spec->shadow_ram[offset+i].value = data[i];
1966 } 1958 }
1967 1959
1968 nvm->ops.release_nvm(hw); 1960 nvm->ops.release(hw);
1969 1961
1970 return 0; 1962 return 0;
1971} 1963}
@@ -1996,7 +1988,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1996 if (nvm->type != e1000_nvm_flash_sw) 1988 if (nvm->type != e1000_nvm_flash_sw)
1997 goto out; 1989 goto out;
1998 1990
1999 nvm->ops.acquire_nvm(hw); 1991 nvm->ops.acquire(hw);
2000 1992
2001 /* 1993 /*
2002 * We're writing to the opposite bank so if we're on bank 1, 1994 * We're writing to the opposite bank so if we're on bank 1,
@@ -2005,7 +1997,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2005 */ 1997 */
2006 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); 1998 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
2007 if (ret_val) { 1999 if (ret_val) {
2008 hw_dbg(hw, "Could not detect valid bank, assuming bank 0\n"); 2000 e_dbg("Could not detect valid bank, assuming bank 0\n");
2009 bank = 0; 2001 bank = 0;
2010 } 2002 }
2011 2003
@@ -2014,7 +2006,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2014 old_bank_offset = 0; 2006 old_bank_offset = 0;
2015 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 2007 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
2016 if (ret_val) { 2008 if (ret_val) {
2017 nvm->ops.release_nvm(hw); 2009 nvm->ops.release(hw);
2018 goto out; 2010 goto out;
2019 } 2011 }
2020 } else { 2012 } else {
@@ -2022,7 +2014,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2022 new_bank_offset = 0; 2014 new_bank_offset = 0;
2023 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 2015 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
2024 if (ret_val) { 2016 if (ret_val) {
2025 nvm->ops.release_nvm(hw); 2017 nvm->ops.release(hw);
2026 goto out; 2018 goto out;
2027 } 2019 }
2028 } 2020 }
@@ -2079,8 +2071,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2079 */ 2071 */
2080 if (ret_val) { 2072 if (ret_val) {
2081 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 2073 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
2082 hw_dbg(hw, "Flash commit failed.\n"); 2074 e_dbg("Flash commit failed.\n");
2083 nvm->ops.release_nvm(hw); 2075 nvm->ops.release(hw);
2084 goto out; 2076 goto out;
2085 } 2077 }
2086 2078
@@ -2093,7 +2085,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2093 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 2085 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
2094 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 2086 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
2095 if (ret_val) { 2087 if (ret_val) {
2096 nvm->ops.release_nvm(hw); 2088 nvm->ops.release(hw);
2097 goto out; 2089 goto out;
2098 } 2090 }
2099 data &= 0xBFFF; 2091 data &= 0xBFFF;
@@ -2101,7 +2093,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2101 act_offset * 2 + 1, 2093 act_offset * 2 + 1,
2102 (u8)(data >> 8)); 2094 (u8)(data >> 8));
2103 if (ret_val) { 2095 if (ret_val) {
2104 nvm->ops.release_nvm(hw); 2096 nvm->ops.release(hw);
2105 goto out; 2097 goto out;
2106 } 2098 }
2107 2099
@@ -2114,17 +2106,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2114 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 2106 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2115 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 2107 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2116 if (ret_val) { 2108 if (ret_val) {
2117 nvm->ops.release_nvm(hw); 2109 nvm->ops.release(hw);
2118 goto out; 2110 goto out;
2119 } 2111 }
2120 2112
2121 /* Great! Everything worked, we can now clear the cached entries. */ 2113 /* Great! Everything worked, we can now clear the cached entries. */
2122 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 2114 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
2123 dev_spec->shadow_ram[i].modified = 0; 2115 dev_spec->shadow_ram[i].modified = false;
2124 dev_spec->shadow_ram[i].value = 0xFFFF; 2116 dev_spec->shadow_ram[i].value = 0xFFFF;
2125 } 2117 }
2126 2118
2127 nvm->ops.release_nvm(hw); 2119 nvm->ops.release(hw);
2128 2120
2129 /* 2121 /*
2130 * Reload the EEPROM, or else modifications will not appear 2122 * Reload the EEPROM, or else modifications will not appear
@@ -2135,7 +2127,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2135 2127
2136out: 2128out:
2137 if (ret_val) 2129 if (ret_val)
2138 hw_dbg(hw, "NVM update error: %d\n", ret_val); 2130 e_dbg("NVM update error: %d\n", ret_val);
2139 2131
2140 return ret_val; 2132 return ret_val;
2141} 2133}
@@ -2193,7 +2185,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
2193 union ich8_hws_flash_status hsfsts; 2185 union ich8_hws_flash_status hsfsts;
2194 u32 gfpreg; 2186 u32 gfpreg;
2195 2187
2196 nvm->ops.acquire_nvm(hw); 2188 nvm->ops.acquire(hw);
2197 2189
2198 gfpreg = er32flash(ICH_FLASH_GFPREG); 2190 gfpreg = er32flash(ICH_FLASH_GFPREG);
2199 2191
@@ -2214,7 +2206,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
2214 hsfsts.hsf_status.flockdn = true; 2206 hsfsts.hsf_status.flockdn = true;
2215 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); 2207 ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
2216 2208
2217 nvm->ops.release_nvm(hw); 2209 nvm->ops.release(hw);
2218} 2210}
2219 2211
2220/** 2212/**
@@ -2285,7 +2277,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2285 /* Repeat for some time before giving up. */ 2277 /* Repeat for some time before giving up. */
2286 continue; 2278 continue;
2287 if (hsfsts.hsf_status.flcdone == 0) { 2279 if (hsfsts.hsf_status.flcdone == 0) {
2288 hw_dbg(hw, "Timeout error - flash cycle " 2280 e_dbg("Timeout error - flash cycle "
2289 "did not complete."); 2281 "did not complete.");
2290 break; 2282 break;
2291 } 2283 }
@@ -2330,7 +2322,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
2330 return ret_val; 2322 return ret_val;
2331 2323
2332 for (program_retries = 0; program_retries < 100; program_retries++) { 2324 for (program_retries = 0; program_retries < 100; program_retries++) {
2333 hw_dbg(hw, "Retrying Byte %2.2X at offset %u\n", byte, offset); 2325 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
2334 udelay(100); 2326 udelay(100);
2335 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 2327 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
2336 if (!ret_val) 2328 if (!ret_val)
@@ -2360,9 +2352,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2360 u32 flash_bank_size = nvm->flash_bank_size * 2; 2352 u32 flash_bank_size = nvm->flash_bank_size * 2;
2361 s32 ret_val; 2353 s32 ret_val;
2362 s32 count = 0; 2354 s32 count = 0;
2363 s32 iteration; 2355 s32 j, iteration, sector_size;
2364 s32 sector_size;
2365 s32 j;
2366 2356
2367 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2357 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2368 2358
@@ -2465,7 +2455,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
2465 2455
2466 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 2456 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
2467 if (ret_val) { 2457 if (ret_val) {
2468 hw_dbg(hw, "NVM Read Error\n"); 2458 e_dbg("NVM Read Error\n");
2469 return ret_val; 2459 return ret_val;
2470 } 2460 }
2471 2461
@@ -2595,10 +2585,10 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2595 */ 2585 */
2596 ret_val = e1000e_disable_pcie_master(hw); 2586 ret_val = e1000e_disable_pcie_master(hw);
2597 if (ret_val) { 2587 if (ret_val) {
2598 hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 2588 e_dbg("PCI-E Master disable polling has failed.\n");
2599 } 2589 }
2600 2590
2601 hw_dbg(hw, "Masking off all interrupts\n"); 2591 e_dbg("Masking off all interrupts\n");
2602 ew32(IMC, 0xffffffff); 2592 ew32(IMC, 0xffffffff);
2603 2593
2604 /* 2594 /*
@@ -2650,7 +2640,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2650 } 2640 }
2651 ret_val = e1000_acquire_swflag_ich8lan(hw); 2641 ret_val = e1000_acquire_swflag_ich8lan(hw);
2652 /* Whether or not the swflag was acquired, we need to reset the part */ 2642 /* Whether or not the swflag was acquired, we need to reset the part */
2653 hw_dbg(hw, "Issuing a global reset to ich8lan\n"); 2643 e_dbg("Issuing a global reset to ich8lan\n");
2654 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 2644 ew32(CTRL, (ctrl | E1000_CTRL_RST));
2655 msleep(20); 2645 msleep(20);
2656 2646
@@ -2670,7 +2660,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2670 * return with an error. This can happen in situations 2660 * return with an error. This can happen in situations
2671 * where there is no eeprom and prevents getting link. 2661 * where there is no eeprom and prevents getting link.
2672 */ 2662 */
2673 hw_dbg(hw, "Auto Read Done did not complete\n"); 2663 e_dbg("Auto Read Done did not complete\n");
2674 } 2664 }
2675 } 2665 }
2676 /* Dummy read to clear the phy wakeup bit after lcd reset */ 2666 /* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -2731,16 +2721,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2731 2721
2732 /* Initialize identification LED */ 2722 /* Initialize identification LED */
2733 ret_val = mac->ops.id_led_init(hw); 2723 ret_val = mac->ops.id_led_init(hw);
2734 if (ret_val) { 2724 if (ret_val)
2735 hw_dbg(hw, "Error initializing identification LED\n"); 2725 e_dbg("Error initializing identification LED\n");
2736 return ret_val; 2726 /* This is not fatal and we should not stop init due to this */
2737 }
2738 2727
2739 /* Setup the receive address. */ 2728 /* Setup the receive address. */
2740 e1000e_init_rx_addrs(hw, mac->rar_entry_count); 2729 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
2741 2730
2742 /* Zero out the Multicast HASH table */ 2731 /* Zero out the Multicast HASH table */
2743 hw_dbg(hw, "Zeroing the MTA\n"); 2732 e_dbg("Zeroing the MTA\n");
2744 for (i = 0; i < mac->mta_reg_count; i++) 2733 for (i = 0; i < mac->mta_reg_count; i++)
2745 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); 2734 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
2746 2735
@@ -2750,7 +2739,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
2750 * Reset the phy after disabling host wakeup to reset the Rx buffer. 2739 * Reset the phy after disabling host wakeup to reset the Rx buffer.
2751 */ 2740 */
2752 if (hw->phy.type == e1000_phy_82578) { 2741 if (hw->phy.type == e1000_phy_82578) {
2753 hw->phy.ops.read_phy_reg(hw, BM_WUC, &i); 2742 hw->phy.ops.read_reg(hw, BM_WUC, &i);
2754 ret_val = e1000_phy_hw_reset_ich8lan(hw); 2743 ret_val = e1000_phy_hw_reset_ich8lan(hw);
2755 if (ret_val) 2744 if (ret_val)
2756 return ret_val; 2745 return ret_val;
@@ -2886,7 +2875,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2886 */ 2875 */
2887 hw->fc.current_mode = hw->fc.requested_mode; 2876 hw->fc.current_mode = hw->fc.requested_mode;
2888 2877
2889 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", 2878 e_dbg("After fix-ups FlowControl is now = %x\n",
2890 hw->fc.current_mode); 2879 hw->fc.current_mode);
2891 2880
2892 /* Continue to configure the copper link. */ 2881 /* Continue to configure the copper link. */
@@ -2897,7 +2886,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2897 ew32(FCTTV, hw->fc.pause_time); 2886 ew32(FCTTV, hw->fc.pause_time);
2898 if ((hw->phy.type == e1000_phy_82578) || 2887 if ((hw->phy.type == e1000_phy_82578) ||
2899 (hw->phy.type == e1000_phy_82577)) { 2888 (hw->phy.type == e1000_phy_82577)) {
2900 ret_val = hw->phy.ops.write_phy_reg(hw, 2889 ret_val = hw->phy.ops.write_reg(hw,
2901 PHY_REG(BM_PORT_CTRL_PAGE, 27), 2890 PHY_REG(BM_PORT_CTRL_PAGE, 27),
2902 hw->fc.pause_time); 2891 hw->fc.pause_time);
2903 if (ret_val) 2892 if (ret_val)
@@ -2960,7 +2949,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2960 return ret_val; 2949 return ret_val;
2961 break; 2950 break;
2962 case e1000_phy_ife: 2951 case e1000_phy_ife:
2963 ret_val = hw->phy.ops.read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, 2952 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
2964 &reg_data); 2953 &reg_data);
2965 if (ret_val) 2954 if (ret_val)
2966 return ret_val; 2955 return ret_val;
@@ -2979,7 +2968,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
2979 reg_data |= IFE_PMC_AUTO_MDIX; 2968 reg_data |= IFE_PMC_AUTO_MDIX;
2980 break; 2969 break;
2981 } 2970 }
2982 ret_val = hw->phy.ops.write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, 2971 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
2983 reg_data); 2972 reg_data);
2984 if (ret_val) 2973 if (ret_val)
2985 return ret_val; 2974 return ret_val;
@@ -3092,8 +3081,8 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3092 * @hw: pointer to the HW structure 3081 * @hw: pointer to the HW structure
3093 * @state: boolean value used to set the current Kumeran workaround state 3082 * @state: boolean value used to set the current Kumeran workaround state
3094 * 3083 *
3095 * If ICH8, set the current Kumeran workaround state (enabled - TRUE 3084 * If ICH8, set the current Kumeran workaround state (enabled - true
3096 * /disabled - FALSE). 3085 * /disabled - false).
3097 **/ 3086 **/
3098void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 3087void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3099 bool state) 3088 bool state)
@@ -3101,7 +3090,7 @@ void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3101 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3090 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3102 3091
3103 if (hw->mac.type != e1000_ich8lan) { 3092 if (hw->mac.type != e1000_ich8lan) {
3104 hw_dbg(hw, "Workaround applies to ICH8 only.\n"); 3093 e_dbg("Workaround applies to ICH8 only.\n");
3105 return; 3094 return;
3106 } 3095 }
3107 3096
@@ -3281,7 +3270,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3281 **/ 3270 **/
3282static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3271static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3283{ 3272{
3284 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, 3273 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3285 (u16)hw->mac.ledctl_mode1); 3274 (u16)hw->mac.ledctl_mode1);
3286} 3275}
3287 3276
@@ -3293,7 +3282,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3293 **/ 3282 **/
3294static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3283static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3295{ 3284{
3296 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, 3285 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
3297 (u16)hw->mac.ledctl_default); 3286 (u16)hw->mac.ledctl_default);
3298} 3287}
3299 3288
@@ -3325,7 +3314,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3325 } 3314 }
3326 } 3315 }
3327 3316
3328 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); 3317 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3329} 3318}
3330 3319
3331/** 3320/**
@@ -3356,7 +3345,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3356 } 3345 }
3357 } 3346 }
3358 3347
3359 return hw->phy.ops.write_phy_reg(hw, HV_LED_CONFIG, data); 3348 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
3360} 3349}
3361 3350
3362/** 3351/**
@@ -3379,8 +3368,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3379 if (status & E1000_STATUS_PHYRA) 3368 if (status & E1000_STATUS_PHYRA)
3380 ew32(STATUS, status & ~E1000_STATUS_PHYRA); 3369 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
3381 else 3370 else
3382 hw_dbg(hw, 3371 e_dbg("PHY Reset Asserted not set - needs delay\n");
3383 "PHY Reset Asserted not set - needs delay\n");
3384 } 3372 }
3385 3373
3386 e1000e_get_cfg_done(hw); 3374 e1000e_get_cfg_done(hw);
@@ -3395,7 +3383,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3395 } else { 3383 } else {
3396 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 3384 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3397 /* Maybe we should do a basic PHY config */ 3385 /* Maybe we should do a basic PHY config */
3398 hw_dbg(hw, "EEPROM not present\n"); 3386 e_dbg("EEPROM not present\n");
3399 return -E1000_ERR_CONFIG; 3387 return -E1000_ERR_CONFIG;
3400 } 3388 }
3401 } 3389 }
@@ -3412,42 +3400,41 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3412 **/ 3400 **/
3413static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) 3401static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3414{ 3402{
3415 u32 temp;
3416 u16 phy_data; 3403 u16 phy_data;
3417 3404
3418 e1000e_clear_hw_cntrs_base(hw); 3405 e1000e_clear_hw_cntrs_base(hw);
3419 3406
3420 temp = er32(ALGNERRC); 3407 er32(ALGNERRC);
3421 temp = er32(RXERRC); 3408 er32(RXERRC);
3422 temp = er32(TNCRS); 3409 er32(TNCRS);
3423 temp = er32(CEXTERR); 3410 er32(CEXTERR);
3424 temp = er32(TSCTC); 3411 er32(TSCTC);
3425 temp = er32(TSCTFC); 3412 er32(TSCTFC);
3426 3413
3427 temp = er32(MGTPRC); 3414 er32(MGTPRC);
3428 temp = er32(MGTPDC); 3415 er32(MGTPDC);
3429 temp = er32(MGTPTC); 3416 er32(MGTPTC);
3430 3417
3431 temp = er32(IAC); 3418 er32(IAC);
3432 temp = er32(ICRXOC); 3419 er32(ICRXOC);
3433 3420
3434 /* Clear PHY statistics registers */ 3421 /* Clear PHY statistics registers */
3435 if ((hw->phy.type == e1000_phy_82578) || 3422 if ((hw->phy.type == e1000_phy_82578) ||
3436 (hw->phy.type == e1000_phy_82577)) { 3423 (hw->phy.type == e1000_phy_82577)) {
3437 hw->phy.ops.read_phy_reg(hw, HV_SCC_UPPER, &phy_data); 3424 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
3438 hw->phy.ops.read_phy_reg(hw, HV_SCC_LOWER, &phy_data); 3425 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
3439 hw->phy.ops.read_phy_reg(hw, HV_ECOL_UPPER, &phy_data); 3426 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
3440 hw->phy.ops.read_phy_reg(hw, HV_ECOL_LOWER, &phy_data); 3427 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
3441 hw->phy.ops.read_phy_reg(hw, HV_MCC_UPPER, &phy_data); 3428 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
3442 hw->phy.ops.read_phy_reg(hw, HV_MCC_LOWER, &phy_data); 3429 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
3443 hw->phy.ops.read_phy_reg(hw, HV_LATECOL_UPPER, &phy_data); 3430 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
3444 hw->phy.ops.read_phy_reg(hw, HV_LATECOL_LOWER, &phy_data); 3431 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
3445 hw->phy.ops.read_phy_reg(hw, HV_COLC_UPPER, &phy_data); 3432 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
3446 hw->phy.ops.read_phy_reg(hw, HV_COLC_LOWER, &phy_data); 3433 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
3447 hw->phy.ops.read_phy_reg(hw, HV_DC_UPPER, &phy_data); 3434 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
3448 hw->phy.ops.read_phy_reg(hw, HV_DC_LOWER, &phy_data); 3435 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
3449 hw->phy.ops.read_phy_reg(hw, HV_TNCRS_UPPER, &phy_data); 3436 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
3450 hw->phy.ops.read_phy_reg(hw, HV_TNCRS_LOWER, &phy_data); 3437 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
3451 } 3438 }
3452} 3439}
3453 3440
@@ -3470,29 +3457,29 @@ static struct e1000_mac_operations ich8_mac_ops = {
3470}; 3457};
3471 3458
3472static struct e1000_phy_operations ich8_phy_ops = { 3459static struct e1000_phy_operations ich8_phy_ops = {
3473 .acquire_phy = e1000_acquire_swflag_ich8lan, 3460 .acquire = e1000_acquire_swflag_ich8lan,
3474 .check_reset_block = e1000_check_reset_block_ich8lan, 3461 .check_reset_block = e1000_check_reset_block_ich8lan,
3475 .commit_phy = NULL, 3462 .commit = NULL,
3476 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan, 3463 .force_speed_duplex = e1000_phy_force_speed_duplex_ich8lan,
3477 .get_cfg_done = e1000_get_cfg_done_ich8lan, 3464 .get_cfg_done = e1000_get_cfg_done_ich8lan,
3478 .get_cable_length = e1000e_get_cable_length_igp_2, 3465 .get_cable_length = e1000e_get_cable_length_igp_2,
3479 .get_phy_info = e1000_get_phy_info_ich8lan, 3466 .get_info = e1000_get_phy_info_ich8lan,
3480 .read_phy_reg = e1000e_read_phy_reg_igp, 3467 .read_reg = e1000e_read_phy_reg_igp,
3481 .release_phy = e1000_release_swflag_ich8lan, 3468 .release = e1000_release_swflag_ich8lan,
3482 .reset_phy = e1000_phy_hw_reset_ich8lan, 3469 .reset = e1000_phy_hw_reset_ich8lan,
3483 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, 3470 .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan,
3484 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, 3471 .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan,
3485 .write_phy_reg = e1000e_write_phy_reg_igp, 3472 .write_reg = e1000e_write_phy_reg_igp,
3486}; 3473};
3487 3474
3488static struct e1000_nvm_operations ich8_nvm_ops = { 3475static struct e1000_nvm_operations ich8_nvm_ops = {
3489 .acquire_nvm = e1000_acquire_nvm_ich8lan, 3476 .acquire = e1000_acquire_nvm_ich8lan,
3490 .read_nvm = e1000_read_nvm_ich8lan, 3477 .read = e1000_read_nvm_ich8lan,
3491 .release_nvm = e1000_release_nvm_ich8lan, 3478 .release = e1000_release_nvm_ich8lan,
3492 .update_nvm = e1000_update_nvm_checksum_ich8lan, 3479 .update = e1000_update_nvm_checksum_ich8lan,
3493 .valid_led_default = e1000_valid_led_default_ich8lan, 3480 .valid_led_default = e1000_valid_led_default_ich8lan,
3494 .validate_nvm = e1000_validate_nvm_checksum_ich8lan, 3481 .validate = e1000_validate_nvm_checksum_ich8lan,
3495 .write_nvm = e1000_write_nvm_ich8lan, 3482 .write = e1000_write_nvm_ich8lan,
3496}; 3483};
3497 3484
3498struct e1000_info e1000_ich8_info = { 3485struct e1000_info e1000_ich8_info = {
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 99ba2b8a2a05..f690a1055b41 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -26,11 +26,6 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#include <linux/netdevice.h>
30#include <linux/ethtool.h>
31#include <linux/delay.h>
32#include <linux/pci.h>
33
34#include "e1000.h" 29#include "e1000.h"
35 30
36enum e1000_mng_mode { 31enum e1000_mng_mode {
@@ -115,12 +110,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
115 u32 i; 110 u32 i;
116 111
117 /* Setup the receive address */ 112 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n"); 113 e_dbg("Programming MAC Address into RAR[0]\n");
119 114
120 e1000e_rar_set(hw, hw->mac.addr, 0); 115 e1000e_rar_set(hw, hw->mac.addr, 0);
121 116
122 /* Zero out the other (rar_entry_count - 1) receive addresses */ 117 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1); 118 e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) { 119 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); 120 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush(); 121 e1e_flush();
@@ -276,7 +271,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
276 for (; mc_addr_count > 0; mc_addr_count--) { 271 for (; mc_addr_count > 0; mc_addr_count--) {
277 u32 hash_value, hash_reg, hash_bit, mta; 272 u32 hash_value, hash_reg, hash_bit, mta;
278 hash_value = e1000_hash_mc_addr(hw, mc_addr_list); 273 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
279 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value); 274 e_dbg("Hash value = 0x%03X\n", hash_value);
280 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); 275 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
281 hash_bit = hash_value & 0x1F; 276 hash_bit = hash_value & 0x1F;
282 mta = (1 << hash_bit); 277 mta = (1 << hash_bit);
@@ -300,45 +295,43 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
300 **/ 295 **/
301void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) 296void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
302{ 297{
303 u32 temp; 298 er32(CRCERRS);
304 299 er32(SYMERRS);
305 temp = er32(CRCERRS); 300 er32(MPC);
306 temp = er32(SYMERRS); 301 er32(SCC);
307 temp = er32(MPC); 302 er32(ECOL);
308 temp = er32(SCC); 303 er32(MCC);
309 temp = er32(ECOL); 304 er32(LATECOL);
310 temp = er32(MCC); 305 er32(COLC);
311 temp = er32(LATECOL); 306 er32(DC);
312 temp = er32(COLC); 307 er32(SEC);
313 temp = er32(DC); 308 er32(RLEC);
314 temp = er32(SEC); 309 er32(XONRXC);
315 temp = er32(RLEC); 310 er32(XONTXC);
316 temp = er32(XONRXC); 311 er32(XOFFRXC);
317 temp = er32(XONTXC); 312 er32(XOFFTXC);
318 temp = er32(XOFFRXC); 313 er32(FCRUC);
319 temp = er32(XOFFTXC); 314 er32(GPRC);
320 temp = er32(FCRUC); 315 er32(BPRC);
321 temp = er32(GPRC); 316 er32(MPRC);
322 temp = er32(BPRC); 317 er32(GPTC);
323 temp = er32(MPRC); 318 er32(GORCL);
324 temp = er32(GPTC); 319 er32(GORCH);
325 temp = er32(GORCL); 320 er32(GOTCL);
326 temp = er32(GORCH); 321 er32(GOTCH);
327 temp = er32(GOTCL); 322 er32(RNBC);
328 temp = er32(GOTCH); 323 er32(RUC);
329 temp = er32(RNBC); 324 er32(RFC);
330 temp = er32(RUC); 325 er32(ROC);
331 temp = er32(RFC); 326 er32(RJC);
332 temp = er32(ROC); 327 er32(TORL);
333 temp = er32(RJC); 328 er32(TORH);
334 temp = er32(TORL); 329 er32(TOTL);
335 temp = er32(TORH); 330 er32(TOTH);
336 temp = er32(TOTL); 331 er32(TPR);
337 temp = er32(TOTH); 332 er32(TPT);
338 temp = er32(TPR); 333 er32(MPTC);
339 temp = er32(TPT); 334 er32(BPTC);
340 temp = er32(MPTC);
341 temp = er32(BPTC);
342} 335}
343 336
344/** 337/**
@@ -376,7 +369,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
376 if (!link) 369 if (!link)
377 return ret_val; /* No link detected */ 370 return ret_val; /* No link detected */
378 371
379 mac->get_link_status = 0; 372 mac->get_link_status = false;
380 373
381 /* 374 /*
382 * Check if there was DownShift, must be checked 375 * Check if there was DownShift, must be checked
@@ -408,7 +401,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
408 */ 401 */
409 ret_val = e1000e_config_fc_after_link_up(hw); 402 ret_val = e1000e_config_fc_after_link_up(hw);
410 if (ret_val) { 403 if (ret_val) {
411 hw_dbg(hw, "Error configuring flow control\n"); 404 e_dbg("Error configuring flow control\n");
412 } 405 }
413 406
414 return ret_val; 407 return ret_val;
@@ -448,7 +441,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
448 mac->autoneg_failed = 1; 441 mac->autoneg_failed = 1;
449 return 0; 442 return 0;
450 } 443 }
451 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); 444 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
452 445
453 /* Disable auto-negotiation in the TXCW register */ 446 /* Disable auto-negotiation in the TXCW register */
454 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 447 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -461,7 +454,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
461 /* Configure Flow Control after forcing link up. */ 454 /* Configure Flow Control after forcing link up. */
462 ret_val = e1000e_config_fc_after_link_up(hw); 455 ret_val = e1000e_config_fc_after_link_up(hw);
463 if (ret_val) { 456 if (ret_val) {
464 hw_dbg(hw, "Error configuring flow control\n"); 457 e_dbg("Error configuring flow control\n");
465 return ret_val; 458 return ret_val;
466 } 459 }
467 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 460 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -471,7 +464,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
471 * and disable forced link in the Device Control register 464 * and disable forced link in the Device Control register
472 * in an attempt to auto-negotiate with our link partner. 465 * in an attempt to auto-negotiate with our link partner.
473 */ 466 */
474 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); 467 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
475 ew32(TXCW, mac->txcw); 468 ew32(TXCW, mac->txcw);
476 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 469 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
477 470
@@ -513,7 +506,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
513 mac->autoneg_failed = 1; 506 mac->autoneg_failed = 1;
514 return 0; 507 return 0;
515 } 508 }
516 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n"); 509 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
517 510
518 /* Disable auto-negotiation in the TXCW register */ 511 /* Disable auto-negotiation in the TXCW register */
519 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); 512 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
@@ -526,7 +519,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
526 /* Configure Flow Control after forcing link up. */ 519 /* Configure Flow Control after forcing link up. */
527 ret_val = e1000e_config_fc_after_link_up(hw); 520 ret_val = e1000e_config_fc_after_link_up(hw);
528 if (ret_val) { 521 if (ret_val) {
529 hw_dbg(hw, "Error configuring flow control\n"); 522 e_dbg("Error configuring flow control\n");
530 return ret_val; 523 return ret_val;
531 } 524 }
532 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 525 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -536,7 +529,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
536 * and disable forced link in the Device Control register 529 * and disable forced link in the Device Control register
537 * in an attempt to auto-negotiate with our link partner. 530 * in an attempt to auto-negotiate with our link partner.
538 */ 531 */
539 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n"); 532 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
540 ew32(TXCW, mac->txcw); 533 ew32(TXCW, mac->txcw);
541 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 534 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
542 535
@@ -553,11 +546,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
553 if (rxcw & E1000_RXCW_SYNCH) { 546 if (rxcw & E1000_RXCW_SYNCH) {
554 if (!(rxcw & E1000_RXCW_IV)) { 547 if (!(rxcw & E1000_RXCW_IV)) {
555 mac->serdes_has_link = true; 548 mac->serdes_has_link = true;
556 hw_dbg(hw, "SERDES: Link up - forced.\n"); 549 e_dbg("SERDES: Link up - forced.\n");
557 } 550 }
558 } else { 551 } else {
559 mac->serdes_has_link = false; 552 mac->serdes_has_link = false;
560 hw_dbg(hw, "SERDES: Link down - force failed.\n"); 553 e_dbg("SERDES: Link down - force failed.\n");
561 } 554 }
562 } 555 }
563 556
@@ -570,20 +563,20 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
570 if (rxcw & E1000_RXCW_SYNCH) { 563 if (rxcw & E1000_RXCW_SYNCH) {
571 if (!(rxcw & E1000_RXCW_IV)) { 564 if (!(rxcw & E1000_RXCW_IV)) {
572 mac->serdes_has_link = true; 565 mac->serdes_has_link = true;
573 hw_dbg(hw, "SERDES: Link up - autoneg " 566 e_dbg("SERDES: Link up - autoneg "
574 "completed sucessfully.\n"); 567 "completed sucessfully.\n");
575 } else { 568 } else {
576 mac->serdes_has_link = false; 569 mac->serdes_has_link = false;
577 hw_dbg(hw, "SERDES: Link down - invalid" 570 e_dbg("SERDES: Link down - invalid"
578 "codewords detected in autoneg.\n"); 571 "codewords detected in autoneg.\n");
579 } 572 }
580 } else { 573 } else {
581 mac->serdes_has_link = false; 574 mac->serdes_has_link = false;
582 hw_dbg(hw, "SERDES: Link down - no sync.\n"); 575 e_dbg("SERDES: Link down - no sync.\n");
583 } 576 }
584 } else { 577 } else {
585 mac->serdes_has_link = false; 578 mac->serdes_has_link = false;
586 hw_dbg(hw, "SERDES: Link down - autoneg failed\n"); 579 e_dbg("SERDES: Link down - autoneg failed\n");
587 } 580 }
588 } 581 }
589 582
@@ -614,7 +607,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
614 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); 607 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
615 608
616 if (ret_val) { 609 if (ret_val) {
617 hw_dbg(hw, "NVM Read Error\n"); 610 e_dbg("NVM Read Error\n");
618 return ret_val; 611 return ret_val;
619 } 612 }
620 613
@@ -667,7 +660,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
667 */ 660 */
668 hw->fc.current_mode = hw->fc.requested_mode; 661 hw->fc.current_mode = hw->fc.requested_mode;
669 662
670 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", 663 e_dbg("After fix-ups FlowControl is now = %x\n",
671 hw->fc.current_mode); 664 hw->fc.current_mode);
672 665
673 /* Call the necessary media_type subroutine to configure the link. */ 666 /* Call the necessary media_type subroutine to configure the link. */
@@ -681,7 +674,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
681 * control is disabled, because it does not hurt anything to 674 * control is disabled, because it does not hurt anything to
682 * initialize these registers. 675 * initialize these registers.
683 */ 676 */
684 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n"); 677 e_dbg("Initializing the Flow Control address, type and timer regs\n");
685 ew32(FCT, FLOW_CONTROL_TYPE); 678 ew32(FCT, FLOW_CONTROL_TYPE);
686 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 679 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
687 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); 680 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
@@ -751,7 +744,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
751 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 744 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
752 break; 745 break;
753 default: 746 default:
754 hw_dbg(hw, "Flow control param set incorrectly\n"); 747 e_dbg("Flow control param set incorrectly\n");
755 return -E1000_ERR_CONFIG; 748 return -E1000_ERR_CONFIG;
756 break; 749 break;
757 } 750 }
@@ -789,7 +782,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
789 break; 782 break;
790 } 783 }
791 if (i == FIBER_LINK_UP_LIMIT) { 784 if (i == FIBER_LINK_UP_LIMIT) {
792 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); 785 e_dbg("Never got a valid link from auto-neg!!!\n");
793 mac->autoneg_failed = 1; 786 mac->autoneg_failed = 1;
794 /* 787 /*
795 * AutoNeg failed to achieve a link, so we'll call 788 * AutoNeg failed to achieve a link, so we'll call
@@ -799,13 +792,13 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
799 */ 792 */
800 ret_val = mac->ops.check_for_link(hw); 793 ret_val = mac->ops.check_for_link(hw);
801 if (ret_val) { 794 if (ret_val) {
802 hw_dbg(hw, "Error while checking for link\n"); 795 e_dbg("Error while checking for link\n");
803 return ret_val; 796 return ret_val;
804 } 797 }
805 mac->autoneg_failed = 0; 798 mac->autoneg_failed = 0;
806 } else { 799 } else {
807 mac->autoneg_failed = 0; 800 mac->autoneg_failed = 0;
808 hw_dbg(hw, "Valid Link Found\n"); 801 e_dbg("Valid Link Found\n");
809 } 802 }
810 803
811 return 0; 804 return 0;
@@ -841,7 +834,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
841 * then the link-up status bit will be set and the flow control enable 834 * then the link-up status bit will be set and the flow control enable
842 * bits (RFCE and TFCE) will be set according to their negotiated value. 835 * bits (RFCE and TFCE) will be set according to their negotiated value.
843 */ 836 */
844 hw_dbg(hw, "Auto-negotiation enabled\n"); 837 e_dbg("Auto-negotiation enabled\n");
845 838
846 ew32(CTRL, ctrl); 839 ew32(CTRL, ctrl);
847 e1e_flush(); 840 e1e_flush();
@@ -856,7 +849,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
856 (er32(CTRL) & E1000_CTRL_SWDPIN1)) { 849 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
857 ret_val = e1000_poll_fiber_serdes_link_generic(hw); 850 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
858 } else { 851 } else {
859 hw_dbg(hw, "No signal detected\n"); 852 e_dbg("No signal detected\n");
860 } 853 }
861 854
862 return 0; 855 return 0;
@@ -952,7 +945,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
952 * 3: Both Rx and Tx flow control (symmetric) is enabled. 945 * 3: Both Rx and Tx flow control (symmetric) is enabled.
953 * other: No other values should be possible at this point. 946 * other: No other values should be possible at this point.
954 */ 947 */
955 hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode); 948 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
956 949
957 switch (hw->fc.current_mode) { 950 switch (hw->fc.current_mode) {
958 case e1000_fc_none: 951 case e1000_fc_none:
@@ -970,7 +963,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
970 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 963 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
971 break; 964 break;
972 default: 965 default:
973 hw_dbg(hw, "Flow control param set incorrectly\n"); 966 e_dbg("Flow control param set incorrectly\n");
974 return -E1000_ERR_CONFIG; 967 return -E1000_ERR_CONFIG;
975 } 968 }
976 969
@@ -1011,7 +1004,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1011 } 1004 }
1012 1005
1013 if (ret_val) { 1006 if (ret_val) {
1014 hw_dbg(hw, "Error forcing flow control settings\n"); 1007 e_dbg("Error forcing flow control settings\n");
1015 return ret_val; 1008 return ret_val;
1016 } 1009 }
1017 1010
@@ -1035,7 +1028,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1035 return ret_val; 1028 return ret_val;
1036 1029
1037 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { 1030 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1038 hw_dbg(hw, "Copper PHY and Auto Neg " 1031 e_dbg("Copper PHY and Auto Neg "
1039 "has not completed.\n"); 1032 "has not completed.\n");
1040 return ret_val; 1033 return ret_val;
1041 } 1034 }
@@ -1100,10 +1093,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1100 */ 1093 */
1101 if (hw->fc.requested_mode == e1000_fc_full) { 1094 if (hw->fc.requested_mode == e1000_fc_full) {
1102 hw->fc.current_mode = e1000_fc_full; 1095 hw->fc.current_mode = e1000_fc_full;
1103 hw_dbg(hw, "Flow Control = FULL.\r\n"); 1096 e_dbg("Flow Control = FULL.\r\n");
1104 } else { 1097 } else {
1105 hw->fc.current_mode = e1000_fc_rx_pause; 1098 hw->fc.current_mode = e1000_fc_rx_pause;
1106 hw_dbg(hw, "Flow Control = " 1099 e_dbg("Flow Control = "
1107 "RX PAUSE frames only.\r\n"); 1100 "RX PAUSE frames only.\r\n");
1108 } 1101 }
1109 } 1102 }
@@ -1121,7 +1114,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1121 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1114 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1122 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1115 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1123 hw->fc.current_mode = e1000_fc_tx_pause; 1116 hw->fc.current_mode = e1000_fc_tx_pause;
1124 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n"); 1117 e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
1125 } 1118 }
1126 /* 1119 /*
1127 * For transmitting PAUSE frames ONLY. 1120 * For transmitting PAUSE frames ONLY.
@@ -1137,14 +1130,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1137 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1130 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1138 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1131 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1139 hw->fc.current_mode = e1000_fc_rx_pause; 1132 hw->fc.current_mode = e1000_fc_rx_pause;
1140 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n"); 1133 e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
1141 } else { 1134 } else {
1142 /* 1135 /*
1143 * Per the IEEE spec, at this point flow control 1136 * Per the IEEE spec, at this point flow control
1144 * should be disabled. 1137 * should be disabled.
1145 */ 1138 */
1146 hw->fc.current_mode = e1000_fc_none; 1139 hw->fc.current_mode = e1000_fc_none;
1147 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1140 e_dbg("Flow Control = NONE.\r\n");
1148 } 1141 }
1149 1142
1150 /* 1143 /*
@@ -1154,7 +1147,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1154 */ 1147 */
1155 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); 1148 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1156 if (ret_val) { 1149 if (ret_val) {
1157 hw_dbg(hw, "Error getting link speed and duplex\n"); 1150 e_dbg("Error getting link speed and duplex\n");
1158 return ret_val; 1151 return ret_val;
1159 } 1152 }
1160 1153
@@ -1167,7 +1160,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1167 */ 1160 */
1168 ret_val = e1000e_force_mac_fc(hw); 1161 ret_val = e1000e_force_mac_fc(hw);
1169 if (ret_val) { 1162 if (ret_val) {
1170 hw_dbg(hw, "Error forcing flow control settings\n"); 1163 e_dbg("Error forcing flow control settings\n");
1171 return ret_val; 1164 return ret_val;
1172 } 1165 }
1173 } 1166 }
@@ -1191,21 +1184,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
1191 status = er32(STATUS); 1184 status = er32(STATUS);
1192 if (status & E1000_STATUS_SPEED_1000) { 1185 if (status & E1000_STATUS_SPEED_1000) {
1193 *speed = SPEED_1000; 1186 *speed = SPEED_1000;
1194 hw_dbg(hw, "1000 Mbs, "); 1187 e_dbg("1000 Mbs, ");
1195 } else if (status & E1000_STATUS_SPEED_100) { 1188 } else if (status & E1000_STATUS_SPEED_100) {
1196 *speed = SPEED_100; 1189 *speed = SPEED_100;
1197 hw_dbg(hw, "100 Mbs, "); 1190 e_dbg("100 Mbs, ");
1198 } else { 1191 } else {
1199 *speed = SPEED_10; 1192 *speed = SPEED_10;
1200 hw_dbg(hw, "10 Mbs, "); 1193 e_dbg("10 Mbs, ");
1201 } 1194 }
1202 1195
1203 if (status & E1000_STATUS_FD) { 1196 if (status & E1000_STATUS_FD) {
1204 *duplex = FULL_DUPLEX; 1197 *duplex = FULL_DUPLEX;
1205 hw_dbg(hw, "Full Duplex\n"); 1198 e_dbg("Full Duplex\n");
1206 } else { 1199 } else {
1207 *duplex = HALF_DUPLEX; 1200 *duplex = HALF_DUPLEX;
1208 hw_dbg(hw, "Half Duplex\n"); 1201 e_dbg("Half Duplex\n");
1209 } 1202 }
1210 1203
1211 return 0; 1204 return 0;
@@ -1251,7 +1244,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1251 } 1244 }
1252 1245
1253 if (i == timeout) { 1246 if (i == timeout) {
1254 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n"); 1247 e_dbg("Driver can't access device - SMBI bit is set.\n");
1255 return -E1000_ERR_NVM; 1248 return -E1000_ERR_NVM;
1256 } 1249 }
1257 1250
@@ -1270,7 +1263,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1270 if (i == timeout) { 1263 if (i == timeout) {
1271 /* Release semaphores */ 1264 /* Release semaphores */
1272 e1000e_put_hw_semaphore(hw); 1265 e1000e_put_hw_semaphore(hw);
1273 hw_dbg(hw, "Driver can't access the NVM\n"); 1266 e_dbg("Driver can't access the NVM\n");
1274 return -E1000_ERR_NVM; 1267 return -E1000_ERR_NVM;
1275 } 1268 }
1276 1269
@@ -1310,7 +1303,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1310 } 1303 }
1311 1304
1312 if (i == AUTO_READ_DONE_TIMEOUT) { 1305 if (i == AUTO_READ_DONE_TIMEOUT) {
1313 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n"); 1306 e_dbg("Auto read by HW from NVM has not completed.\n");
1314 return -E1000_ERR_RESET; 1307 return -E1000_ERR_RESET;
1315 } 1308 }
1316 1309
@@ -1331,7 +1324,7 @@ s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1331 1324
1332 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); 1325 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1333 if (ret_val) { 1326 if (ret_val) {
1334 hw_dbg(hw, "NVM Read Error\n"); 1327 e_dbg("NVM Read Error\n");
1335 return ret_val; 1328 return ret_val;
1336 } 1329 }
1337 1330
@@ -1585,7 +1578,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1585 } 1578 }
1586 1579
1587 if (!timeout) { 1580 if (!timeout) {
1588 hw_dbg(hw, "Master requests are pending.\n"); 1581 e_dbg("Master requests are pending.\n");
1589 return -E1000_ERR_MASTER_REQUESTS_PENDING; 1582 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1590 } 1583 }
1591 1584
@@ -1608,7 +1601,7 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
1608 mac->ifs_step_size = IFS_STEP; 1601 mac->ifs_step_size = IFS_STEP;
1609 mac->ifs_ratio = IFS_RATIO; 1602 mac->ifs_ratio = IFS_RATIO;
1610 1603
1611 mac->in_ifs_mode = 0; 1604 mac->in_ifs_mode = false;
1612 ew32(AIT, 0); 1605 ew32(AIT, 0);
1613} 1606}
1614 1607
@@ -1625,7 +1618,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1625 1618
1626 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { 1619 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1627 if (mac->tx_packet_delta > MIN_NUM_XMITS) { 1620 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1628 mac->in_ifs_mode = 1; 1621 mac->in_ifs_mode = true;
1629 if (mac->current_ifs_val < mac->ifs_max_val) { 1622 if (mac->current_ifs_val < mac->ifs_max_val) {
1630 if (!mac->current_ifs_val) 1623 if (!mac->current_ifs_val)
1631 mac->current_ifs_val = mac->ifs_min_val; 1624 mac->current_ifs_val = mac->ifs_min_val;
@@ -1639,7 +1632,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1639 if (mac->in_ifs_mode && 1632 if (mac->in_ifs_mode &&
1640 (mac->tx_packet_delta <= MIN_NUM_XMITS)) { 1633 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1641 mac->current_ifs_val = 0; 1634 mac->current_ifs_val = 0;
1642 mac->in_ifs_mode = 0; 1635 mac->in_ifs_mode = false;
1643 ew32(AIT, 0); 1636 ew32(AIT, 0);
1644 } 1637 }
1645 } 1638 }
@@ -1809,7 +1802,7 @@ s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1809 if (!timeout) { 1802 if (!timeout) {
1810 eecd &= ~E1000_EECD_REQ; 1803 eecd &= ~E1000_EECD_REQ;
1811 ew32(EECD, eecd); 1804 ew32(EECD, eecd);
1812 hw_dbg(hw, "Could not acquire NVM grant\n"); 1805 e_dbg("Could not acquire NVM grant\n");
1813 return -E1000_ERR_NVM; 1806 return -E1000_ERR_NVM;
1814 } 1807 }
1815 1808
@@ -1914,7 +1907,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1914 } 1907 }
1915 1908
1916 if (!timeout) { 1909 if (!timeout) {
1917 hw_dbg(hw, "SPI NVM Status error\n"); 1910 e_dbg("SPI NVM Status error\n");
1918 return -E1000_ERR_NVM; 1911 return -E1000_ERR_NVM;
1919 } 1912 }
1920 } 1913 }
@@ -1943,7 +1936,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1943 */ 1936 */
1944 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1937 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1945 (words == 0)) { 1938 (words == 0)) {
1946 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1939 e_dbg("nvm parameter(s) out of bounds\n");
1947 return -E1000_ERR_NVM; 1940 return -E1000_ERR_NVM;
1948 } 1941 }
1949 1942
@@ -1986,11 +1979,11 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1986 */ 1979 */
1987 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1980 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1988 (words == 0)) { 1981 (words == 0)) {
1989 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1982 e_dbg("nvm parameter(s) out of bounds\n");
1990 return -E1000_ERR_NVM; 1983 return -E1000_ERR_NVM;
1991 } 1984 }
1992 1985
1993 ret_val = nvm->ops.acquire_nvm(hw); 1986 ret_val = nvm->ops.acquire(hw);
1994 if (ret_val) 1987 if (ret_val)
1995 return ret_val; 1988 return ret_val;
1996 1989
@@ -2001,7 +1994,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2001 1994
2002 ret_val = e1000_ready_nvm_eeprom(hw); 1995 ret_val = e1000_ready_nvm_eeprom(hw);
2003 if (ret_val) { 1996 if (ret_val) {
2004 nvm->ops.release_nvm(hw); 1997 nvm->ops.release(hw);
2005 return ret_val; 1998 return ret_val;
2006 } 1999 }
2007 2000
@@ -2040,7 +2033,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2040 } 2033 }
2041 2034
2042 msleep(10); 2035 msleep(10);
2043 nvm->ops.release_nvm(hw); 2036 nvm->ops.release(hw);
2044 return 0; 2037 return 0;
2045} 2038}
2046 2039
@@ -2066,7 +2059,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2066 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 2059 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2067 &mac_addr_offset); 2060 &mac_addr_offset);
2068 if (ret_val) { 2061 if (ret_val) {
2069 hw_dbg(hw, "NVM Read Error\n"); 2062 e_dbg("NVM Read Error\n");
2070 return ret_val; 2063 return ret_val;
2071 } 2064 }
2072 if (mac_addr_offset == 0xFFFF) 2065 if (mac_addr_offset == 0xFFFF)
@@ -2081,7 +2074,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2081 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, 2074 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2082 &nvm_data); 2075 &nvm_data);
2083 if (ret_val) { 2076 if (ret_val) {
2084 hw_dbg(hw, "NVM Read Error\n"); 2077 e_dbg("NVM Read Error\n");
2085 return ret_val; 2078 return ret_val;
2086 } 2079 }
2087 if (nvm_data & 0x0001) 2080 if (nvm_data & 0x0001)
@@ -2096,7 +2089,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2096 offset = mac_addr_offset + (i >> 1); 2089 offset = mac_addr_offset + (i >> 1);
2097 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); 2090 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2098 if (ret_val) { 2091 if (ret_val) {
2099 hw_dbg(hw, "NVM Read Error\n"); 2092 e_dbg("NVM Read Error\n");
2100 return ret_val; 2093 return ret_val;
2101 } 2094 }
2102 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF); 2095 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
@@ -2129,14 +2122,14 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2129 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 2122 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2130 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); 2123 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2131 if (ret_val) { 2124 if (ret_val) {
2132 hw_dbg(hw, "NVM Read Error\n"); 2125 e_dbg("NVM Read Error\n");
2133 return ret_val; 2126 return ret_val;
2134 } 2127 }
2135 checksum += nvm_data; 2128 checksum += nvm_data;
2136 } 2129 }
2137 2130
2138 if (checksum != (u16) NVM_SUM) { 2131 if (checksum != (u16) NVM_SUM) {
2139 hw_dbg(hw, "NVM Checksum Invalid\n"); 2132 e_dbg("NVM Checksum Invalid\n");
2140 return -E1000_ERR_NVM; 2133 return -E1000_ERR_NVM;
2141 } 2134 }
2142 2135
@@ -2160,7 +2153,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2160 for (i = 0; i < NVM_CHECKSUM_REG; i++) { 2153 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2161 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); 2154 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2162 if (ret_val) { 2155 if (ret_val) {
2163 hw_dbg(hw, "NVM Read Error while updating checksum.\n"); 2156 e_dbg("NVM Read Error while updating checksum.\n");
2164 return ret_val; 2157 return ret_val;
2165 } 2158 }
2166 checksum += nvm_data; 2159 checksum += nvm_data;
@@ -2168,7 +2161,7 @@ s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2168 checksum = (u16) NVM_SUM - checksum; 2161 checksum = (u16) NVM_SUM - checksum;
2169 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); 2162 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2170 if (ret_val) 2163 if (ret_val)
2171 hw_dbg(hw, "NVM Write Error while updating checksum.\n"); 2164 e_dbg("NVM Write Error while updating checksum.\n");
2172 2165
2173 return ret_val; 2166 return ret_val;
2174} 2167}
@@ -2231,7 +2224,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2231 /* Check that the host interface is enabled. */ 2224 /* Check that the host interface is enabled. */
2232 hicr = er32(HICR); 2225 hicr = er32(HICR);
2233 if ((hicr & E1000_HICR_EN) == 0) { 2226 if ((hicr & E1000_HICR_EN) == 0) {
2234 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n"); 2227 e_dbg("E1000_HOST_EN bit disabled.\n");
2235 return -E1000_ERR_HOST_INTERFACE_COMMAND; 2228 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2236 } 2229 }
2237 /* check the previous command is completed */ 2230 /* check the previous command is completed */
@@ -2243,7 +2236,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2243 } 2236 }
2244 2237
2245 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { 2238 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2246 hw_dbg(hw, "Previous command timeout failed .\n"); 2239 e_dbg("Previous command timeout failed .\n");
2247 return -E1000_ERR_HOST_INTERFACE_COMMAND; 2240 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2248 } 2241 }
2249 2242
@@ -2282,7 +2275,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2282 2275
2283 /* No manageability, no filtering */ 2276 /* No manageability, no filtering */
2284 if (!e1000e_check_mng_mode(hw)) { 2277 if (!e1000e_check_mng_mode(hw)) {
2285 hw->mac.tx_pkt_filtering = 0; 2278 hw->mac.tx_pkt_filtering = false;
2286 return 0; 2279 return 0;
2287 } 2280 }
2288 2281
@@ -2292,7 +2285,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2292 */ 2285 */
2293 ret_val = e1000_mng_enable_host_if(hw); 2286 ret_val = e1000_mng_enable_host_if(hw);
2294 if (ret_val != 0) { 2287 if (ret_val != 0) {
2295 hw->mac.tx_pkt_filtering = 0; 2288 hw->mac.tx_pkt_filtering = false;
2296 return ret_val; 2289 return ret_val;
2297 } 2290 }
2298 2291
@@ -2311,17 +2304,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2311 * take the safe route of assuming Tx filtering is enabled. 2304 * take the safe route of assuming Tx filtering is enabled.
2312 */ 2305 */
2313 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { 2306 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2314 hw->mac.tx_pkt_filtering = 1; 2307 hw->mac.tx_pkt_filtering = true;
2315 return 1; 2308 return 1;
2316 } 2309 }
2317 2310
2318 /* Cookie area is valid, make the final check for filtering. */ 2311 /* Cookie area is valid, make the final check for filtering. */
2319 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { 2312 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2320 hw->mac.tx_pkt_filtering = 0; 2313 hw->mac.tx_pkt_filtering = false;
2321 return 0; 2314 return 0;
2322 } 2315 }
2323 2316
2324 hw->mac.tx_pkt_filtering = 1; 2317 hw->mac.tx_pkt_filtering = true;
2325 return 1; 2318 return 1;
2326} 2319}
2327 2320
@@ -2478,7 +2471,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2478{ 2471{
2479 u32 manc; 2472 u32 manc;
2480 u32 fwsm, factps; 2473 u32 fwsm, factps;
2481 bool ret_val = 0; 2474 bool ret_val = false;
2482 2475
2483 manc = er32(MANC); 2476 manc = er32(MANC);
2484 2477
@@ -2493,13 +2486,13 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2493 if (!(factps & E1000_FACTPS_MNGCG) && 2486 if (!(factps & E1000_FACTPS_MNGCG) &&
2494 ((fwsm & E1000_FWSM_MODE_MASK) == 2487 ((fwsm & E1000_FWSM_MODE_MASK) ==
2495 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 2488 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2496 ret_val = 1; 2489 ret_val = true;
2497 return ret_val; 2490 return ret_val;
2498 } 2491 }
2499 } else { 2492 } else {
2500 if ((manc & E1000_MANC_SMBUS_EN) && 2493 if ((manc & E1000_MANC_SMBUS_EN) &&
2501 !(manc & E1000_MANC_ASF_EN)) { 2494 !(manc & E1000_MANC_ASF_EN)) {
2502 ret_val = 1; 2495 ret_val = true;
2503 return ret_val; 2496 return ret_val;
2504 } 2497 }
2505 } 2498 }
@@ -2514,14 +2507,14 @@ s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2514 2507
2515 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 2508 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2516 if (ret_val) { 2509 if (ret_val) {
2517 hw_dbg(hw, "NVM Read Error\n"); 2510 e_dbg("NVM Read Error\n");
2518 return ret_val; 2511 return ret_val;
2519 } 2512 }
2520 *pba_num = (u32)(nvm_data << 16); 2513 *pba_num = (u32)(nvm_data << 16);
2521 2514
2522 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 2515 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2523 if (ret_val) { 2516 if (ret_val) {
2524 hw_dbg(hw, "NVM Read Error\n"); 2517 e_dbg("NVM Read Error\n");
2525 return ret_val; 2518 return ret_val;
2526 } 2519 }
2527 *pba_num |= nvm_data; 2520 *pba_num |= nvm_data;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fad8f9ea0043..e546b4ebf155 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -65,17 +65,6 @@ static const struct e1000_info *e1000_info_tbl[] = {
65 [board_pchlan] = &e1000_pch_info, 65 [board_pchlan] = &e1000_pch_info,
66}; 66};
67 67
68#ifdef DEBUG
69/**
70 * e1000_get_hw_dev_name - return device name string
71 * used by hardware layer to print debugging information
72 **/
73char *e1000e_get_hw_dev_name(struct e1000_hw *hw)
74{
75 return hw->adapter->netdev->name;
76}
77#endif
78
79/** 68/**
80 * e1000_desc_unused - calculate if we have unused descriptors 69 * e1000_desc_unused - calculate if we have unused descriptors
81 **/ 70 **/
@@ -167,7 +156,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
167 struct e1000_buffer *buffer_info; 156 struct e1000_buffer *buffer_info;
168 struct sk_buff *skb; 157 struct sk_buff *skb;
169 unsigned int i; 158 unsigned int i;
170 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 159 unsigned int bufsz = adapter->rx_buffer_len;
171 160
172 i = rx_ring->next_to_use; 161 i = rx_ring->next_to_use;
173 buffer_info = &rx_ring->buffer_info[i]; 162 buffer_info = &rx_ring->buffer_info[i];
@@ -179,20 +168,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
179 goto map_skb; 168 goto map_skb;
180 } 169 }
181 170
182 skb = netdev_alloc_skb(netdev, bufsz); 171 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
183 if (!skb) { 172 if (!skb) {
184 /* Better luck next round */ 173 /* Better luck next round */
185 adapter->alloc_rx_buff_failed++; 174 adapter->alloc_rx_buff_failed++;
186 break; 175 break;
187 } 176 }
188 177
189 /*
190 * Make buffer alignment 2 beyond a 16 byte boundary
191 * this will result in a 16 byte aligned IP header after
192 * the 14 byte MAC header is removed
193 */
194 skb_reserve(skb, NET_IP_ALIGN);
195
196 buffer_info->skb = skb; 178 buffer_info->skb = skb;
197map_skb: 179map_skb:
198 buffer_info->dma = pci_map_single(pdev, skb->data, 180 buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -284,21 +266,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
284 cpu_to_le64(ps_page->dma); 266 cpu_to_le64(ps_page->dma);
285 } 267 }
286 268
287 skb = netdev_alloc_skb(netdev, 269 skb = netdev_alloc_skb_ip_align(netdev,
288 adapter->rx_ps_bsize0 + NET_IP_ALIGN); 270 adapter->rx_ps_bsize0);
289 271
290 if (!skb) { 272 if (!skb) {
291 adapter->alloc_rx_buff_failed++; 273 adapter->alloc_rx_buff_failed++;
292 break; 274 break;
293 } 275 }
294 276
295 /*
296 * Make buffer alignment 2 beyond a 16 byte boundary
297 * this will result in a 16 byte aligned IP header after
298 * the 14 byte MAC header is removed
299 */
300 skb_reserve(skb, NET_IP_ALIGN);
301
302 buffer_info->skb = skb; 277 buffer_info->skb = skb;
303 buffer_info->dma = pci_map_single(pdev, skb->data, 278 buffer_info->dma = pci_map_single(pdev, skb->data,
304 adapter->rx_ps_bsize0, 279 adapter->rx_ps_bsize0,
@@ -359,9 +334,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
359 struct e1000_buffer *buffer_info; 334 struct e1000_buffer *buffer_info;
360 struct sk_buff *skb; 335 struct sk_buff *skb;
361 unsigned int i; 336 unsigned int i;
362 unsigned int bufsz = 256 - 337 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
363 16 /* for skb_reserve */ -
364 NET_IP_ALIGN;
365 338
366 i = rx_ring->next_to_use; 339 i = rx_ring->next_to_use;
367 buffer_info = &rx_ring->buffer_info[i]; 340 buffer_info = &rx_ring->buffer_info[i];
@@ -373,19 +346,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
373 goto check_page; 346 goto check_page;
374 } 347 }
375 348
376 skb = netdev_alloc_skb(netdev, bufsz); 349 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
377 if (unlikely(!skb)) { 350 if (unlikely(!skb)) {
378 /* Better luck next round */ 351 /* Better luck next round */
379 adapter->alloc_rx_buff_failed++; 352 adapter->alloc_rx_buff_failed++;
380 break; 353 break;
381 } 354 }
382 355
383 /* Make buffer alignment 2 beyond a 16 byte boundary
384 * this will result in a 16 byte aligned IP header after
385 * the 14 byte MAC header is removed
386 */
387 skb_reserve(skb, NET_IP_ALIGN);
388
389 buffer_info->skb = skb; 356 buffer_info->skb = skb;
390check_page: 357check_page:
391 /* allocate a new page if necessary */ 358 /* allocate a new page if necessary */
@@ -437,6 +404,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
437{ 404{
438 struct net_device *netdev = adapter->netdev; 405 struct net_device *netdev = adapter->netdev;
439 struct pci_dev *pdev = adapter->pdev; 406 struct pci_dev *pdev = adapter->pdev;
407 struct e1000_hw *hw = &adapter->hw;
440 struct e1000_ring *rx_ring = adapter->rx_ring; 408 struct e1000_ring *rx_ring = adapter->rx_ring;
441 struct e1000_rx_desc *rx_desc, *next_rxd; 409 struct e1000_rx_desc *rx_desc, *next_rxd;
442 struct e1000_buffer *buffer_info, *next_buffer; 410 struct e1000_buffer *buffer_info, *next_buffer;
@@ -486,8 +454,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
486 * packet, also make sure the frame isn't just CRC only */ 454 * packet, also make sure the frame isn't just CRC only */
487 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 455 if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) {
488 /* All receives must fit into a single buffer */ 456 /* All receives must fit into a single buffer */
489 e_dbg("%s: Receive packet consumed multiple buffers\n", 457 e_dbg("Receive packet consumed multiple buffers\n");
490 netdev->name);
491 /* recycle */ 458 /* recycle */
492 buffer_info->skb = skb; 459 buffer_info->skb = skb;
493 goto next_desc; 460 goto next_desc;
@@ -513,9 +480,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
513 */ 480 */
514 if (length < copybreak) { 481 if (length < copybreak) {
515 struct sk_buff *new_skb = 482 struct sk_buff *new_skb =
516 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 483 netdev_alloc_skb_ip_align(netdev, length);
517 if (new_skb) { 484 if (new_skb) {
518 skb_reserve(new_skb, NET_IP_ALIGN);
519 skb_copy_to_linear_data_offset(new_skb, 485 skb_copy_to_linear_data_offset(new_skb,
520 -NET_IP_ALIGN, 486 -NET_IP_ALIGN,
521 (skb->data - 487 (skb->data -
@@ -560,8 +526,8 @@ next_desc:
560 526
561 adapter->total_rx_bytes += total_rx_bytes; 527 adapter->total_rx_bytes += total_rx_bytes;
562 adapter->total_rx_packets += total_rx_packets; 528 adapter->total_rx_packets += total_rx_packets;
563 adapter->net_stats.rx_bytes += total_rx_bytes; 529 netdev->stats.rx_bytes += total_rx_bytes;
564 adapter->net_stats.rx_packets += total_rx_packets; 530 netdev->stats.rx_packets += total_rx_packets;
565 return cleaned; 531 return cleaned;
566} 532}
567 533
@@ -578,15 +544,27 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
578 buffer_info->time_stamp = 0; 544 buffer_info->time_stamp = 0;
579} 545}
580 546
581static void e1000_print_tx_hang(struct e1000_adapter *adapter) 547static void e1000_print_hw_hang(struct work_struct *work)
582{ 548{
549 struct e1000_adapter *adapter = container_of(work,
550 struct e1000_adapter,
551 print_hang_task);
583 struct e1000_ring *tx_ring = adapter->tx_ring; 552 struct e1000_ring *tx_ring = adapter->tx_ring;
584 unsigned int i = tx_ring->next_to_clean; 553 unsigned int i = tx_ring->next_to_clean;
585 unsigned int eop = tx_ring->buffer_info[i].next_to_watch; 554 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
586 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); 555 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
556 struct e1000_hw *hw = &adapter->hw;
557 u16 phy_status, phy_1000t_status, phy_ext_status;
558 u16 pci_status;
559
560 e1e_rphy(hw, PHY_STATUS, &phy_status);
561 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
562 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
587 563
588 /* detected Tx unit hang */ 564 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
589 e_err("Detected Tx Unit Hang:\n" 565
566 /* detected Hardware unit hang */
567 e_err("Detected Hardware Unit Hang:\n"
590 " TDH <%x>\n" 568 " TDH <%x>\n"
591 " TDT <%x>\n" 569 " TDT <%x>\n"
592 " next_to_use <%x>\n" 570 " next_to_use <%x>\n"
@@ -595,7 +573,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
595 " time_stamp <%lx>\n" 573 " time_stamp <%lx>\n"
596 " next_to_watch <%x>\n" 574 " next_to_watch <%x>\n"
597 " jiffies <%lx>\n" 575 " jiffies <%lx>\n"
598 " next_to_watch.status <%x>\n", 576 " next_to_watch.status <%x>\n"
577 "MAC Status <%x>\n"
578 "PHY Status <%x>\n"
579 "PHY 1000BASE-T Status <%x>\n"
580 "PHY Extended Status <%x>\n"
581 "PCI Status <%x>\n",
599 readl(adapter->hw.hw_addr + tx_ring->head), 582 readl(adapter->hw.hw_addr + tx_ring->head),
600 readl(adapter->hw.hw_addr + tx_ring->tail), 583 readl(adapter->hw.hw_addr + tx_ring->tail),
601 tx_ring->next_to_use, 584 tx_ring->next_to_use,
@@ -603,7 +586,12 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter)
603 tx_ring->buffer_info[eop].time_stamp, 586 tx_ring->buffer_info[eop].time_stamp,
604 eop, 587 eop,
605 jiffies, 588 jiffies,
606 eop_desc->upper.fields.status); 589 eop_desc->upper.fields.status,
590 er32(STATUS),
591 phy_status,
592 phy_1000t_status,
593 phy_ext_status,
594 pci_status);
607} 595}
608 596
609/** 597/**
@@ -677,21 +665,23 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
677 } 665 }
678 666
679 if (adapter->detect_tx_hung) { 667 if (adapter->detect_tx_hung) {
680 /* Detect a transmit hang in hardware, this serializes the 668 /*
681 * check with the clearing of time_stamp and movement of i */ 669 * Detect a transmit hang in hardware, this serializes the
670 * check with the clearing of time_stamp and movement of i
671 */
682 adapter->detect_tx_hung = 0; 672 adapter->detect_tx_hung = 0;
683 if (tx_ring->buffer_info[i].time_stamp && 673 if (tx_ring->buffer_info[i].time_stamp &&
684 time_after(jiffies, tx_ring->buffer_info[i].time_stamp 674 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
685 + (adapter->tx_timeout_factor * HZ)) 675 + (adapter->tx_timeout_factor * HZ))
686 && !(er32(STATUS) & E1000_STATUS_TXOFF)) { 676 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
687 e1000_print_tx_hang(adapter); 677 schedule_work(&adapter->print_hang_task);
688 netif_stop_queue(netdev); 678 netif_stop_queue(netdev);
689 } 679 }
690 } 680 }
691 adapter->total_tx_bytes += total_tx_bytes; 681 adapter->total_tx_bytes += total_tx_bytes;
692 adapter->total_tx_packets += total_tx_packets; 682 adapter->total_tx_packets += total_tx_packets;
693 adapter->net_stats.tx_bytes += total_tx_bytes; 683 netdev->stats.tx_bytes += total_tx_bytes;
694 adapter->net_stats.tx_packets += total_tx_packets; 684 netdev->stats.tx_packets += total_tx_packets;
695 return (count < tx_ring->count); 685 return (count < tx_ring->count);
696} 686}
697 687
@@ -705,6 +695,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
705static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 695static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
706 int *work_done, int work_to_do) 696 int *work_done, int work_to_do)
707{ 697{
698 struct e1000_hw *hw = &adapter->hw;
708 union e1000_rx_desc_packet_split *rx_desc, *next_rxd; 699 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
709 struct net_device *netdev = adapter->netdev; 700 struct net_device *netdev = adapter->netdev;
710 struct pci_dev *pdev = adapter->pdev; 701 struct pci_dev *pdev = adapter->pdev;
@@ -748,8 +739,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
748 buffer_info->dma = 0; 739 buffer_info->dma = 0;
749 740
750 if (!(staterr & E1000_RXD_STAT_EOP)) { 741 if (!(staterr & E1000_RXD_STAT_EOP)) {
751 e_dbg("%s: Packet Split buffers didn't pick up the " 742 e_dbg("Packet Split buffers didn't pick up the full "
752 "full packet\n", netdev->name); 743 "packet\n");
753 dev_kfree_skb_irq(skb); 744 dev_kfree_skb_irq(skb);
754 goto next_desc; 745 goto next_desc;
755 } 746 }
@@ -762,8 +753,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
762 length = le16_to_cpu(rx_desc->wb.middle.length0); 753 length = le16_to_cpu(rx_desc->wb.middle.length0);
763 754
764 if (!length) { 755 if (!length) {
765 e_dbg("%s: Last part of the packet spanning multiple " 756 e_dbg("Last part of the packet spanning multiple "
766 "descriptors\n", netdev->name); 757 "descriptors\n");
767 dev_kfree_skb_irq(skb); 758 dev_kfree_skb_irq(skb);
768 goto next_desc; 759 goto next_desc;
769 } 760 }
@@ -871,8 +862,8 @@ next_desc:
871 862
872 adapter->total_rx_bytes += total_rx_bytes; 863 adapter->total_rx_bytes += total_rx_bytes;
873 adapter->total_rx_packets += total_rx_packets; 864 adapter->total_rx_packets += total_rx_packets;
874 adapter->net_stats.rx_bytes += total_rx_bytes; 865 netdev->stats.rx_bytes += total_rx_bytes;
875 adapter->net_stats.rx_packets += total_rx_packets; 866 netdev->stats.rx_packets += total_rx_packets;
876 return cleaned; 867 return cleaned;
877} 868}
878 869
@@ -1051,8 +1042,8 @@ next_desc:
1051 1042
1052 adapter->total_rx_bytes += total_rx_bytes; 1043 adapter->total_rx_bytes += total_rx_bytes;
1053 adapter->total_rx_packets += total_rx_packets; 1044 adapter->total_rx_packets += total_rx_packets;
1054 adapter->net_stats.rx_bytes += total_rx_bytes; 1045 netdev->stats.rx_bytes += total_rx_bytes;
1055 adapter->net_stats.rx_packets += total_rx_packets; 1046 netdev->stats.rx_packets += total_rx_packets;
1056 return cleaned; 1047 return cleaned;
1057} 1048}
1058 1049
@@ -1199,7 +1190,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
1199 struct e1000_hw *hw = &adapter->hw; 1190 struct e1000_hw *hw = &adapter->hw;
1200 u32 rctl, icr = er32(ICR); 1191 u32 rctl, icr = er32(ICR);
1201 1192
1202 if (!icr) 1193 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1203 return IRQ_NONE; /* Not our interrupt */ 1194 return IRQ_NONE; /* Not our interrupt */
1204 1195
1205 /* 1196 /*
@@ -1481,7 +1472,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1481 else 1472 else
1482 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1473 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1483 err = request_irq(adapter->msix_entries[vector].vector, 1474 err = request_irq(adapter->msix_entries[vector].vector,
1484 &e1000_intr_msix_rx, 0, adapter->rx_ring->name, 1475 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1485 netdev); 1476 netdev);
1486 if (err) 1477 if (err)
1487 goto out; 1478 goto out;
@@ -1494,7 +1485,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1494 else 1485 else
1495 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1486 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1496 err = request_irq(adapter->msix_entries[vector].vector, 1487 err = request_irq(adapter->msix_entries[vector].vector,
1497 &e1000_intr_msix_tx, 0, adapter->tx_ring->name, 1488 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
1498 netdev); 1489 netdev);
1499 if (err) 1490 if (err)
1500 goto out; 1491 goto out;
@@ -1503,7 +1494,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
1503 vector++; 1494 vector++;
1504 1495
1505 err = request_irq(adapter->msix_entries[vector].vector, 1496 err = request_irq(adapter->msix_entries[vector].vector,
1506 &e1000_msix_other, 0, netdev->name, netdev); 1497 e1000_msix_other, 0, netdev->name, netdev);
1507 if (err) 1498 if (err)
1508 goto out; 1499 goto out;
1509 1500
@@ -1534,7 +1525,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
1534 e1000e_set_interrupt_capability(adapter); 1525 e1000e_set_interrupt_capability(adapter);
1535 } 1526 }
1536 if (adapter->flags & FLAG_MSI_ENABLED) { 1527 if (adapter->flags & FLAG_MSI_ENABLED) {
1537 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, 0, 1528 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
1538 netdev->name, netdev); 1529 netdev->name, netdev);
1539 if (!err) 1530 if (!err)
1540 return err; 1531 return err;
@@ -1544,7 +1535,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
1544 adapter->int_mode = E1000E_INT_MODE_LEGACY; 1535 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1545 } 1536 }
1546 1537
1547 err = request_irq(adapter->pdev->irq, &e1000_intr, IRQF_SHARED, 1538 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
1548 netdev->name, netdev); 1539 netdev->name, netdev);
1549 if (err) 1540 if (err)
1550 e_err("Unable to allocate interrupt, Error: %d\n", err); 1541 e_err("Unable to allocate interrupt, Error: %d\n", err);
@@ -2464,8 +2455,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2464 ew32(ITR, 1000000000 / (adapter->itr * 256)); 2455 ew32(ITR, 1000000000 / (adapter->itr * 256));
2465 2456
2466 ctrl_ext = er32(CTRL_EXT); 2457 ctrl_ext = er32(CTRL_EXT);
2467 /* Reset delay timers after every interrupt */
2468 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
2469 /* Auto-Mask interrupts upon ICR access */ 2458 /* Auto-Mask interrupts upon ICR access */
2470 ctrl_ext |= E1000_CTRL_EXT_IAME; 2459 ctrl_ext |= E1000_CTRL_EXT_IAME;
2471 ew32(IAM, 0xffffffff); 2460 ew32(IAM, 0xffffffff);
@@ -2507,21 +2496,23 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2507 * packet size is equal or larger than the specified value (in 8 byte 2496 * packet size is equal or larger than the specified value (in 8 byte
2508 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 2497 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
2509 */ 2498 */
2510 if ((adapter->flags & FLAG_HAS_ERT) && 2499 if (adapter->flags & FLAG_HAS_ERT) {
2511 (adapter->netdev->mtu > ETH_DATA_LEN)) { 2500 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2512 u32 rxdctl = er32(RXDCTL(0)); 2501 u32 rxdctl = er32(RXDCTL(0));
2513 ew32(RXDCTL(0), rxdctl | 0x3); 2502 ew32(RXDCTL(0), rxdctl | 0x3);
2514 ew32(ERT, E1000_ERT_2048 | (1 << 13)); 2503 ew32(ERT, E1000_ERT_2048 | (1 << 13));
2515 /* 2504 /*
2516 * With jumbo frames and early-receive enabled, excessive 2505 * With jumbo frames and early-receive enabled,
2517 * C4->C2 latencies result in dropped transactions. 2506 * excessive C-state transition latencies result in
2518 */ 2507 * dropped transactions.
2519 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2508 */
2520 e1000e_driver_name, 55); 2509 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2521 } else { 2510 adapter->netdev->name, 55);
2522 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2511 } else {
2523 e1000e_driver_name, 2512 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
2524 PM_QOS_DEFAULT_VALUE); 2513 adapter->netdev->name,
2514 PM_QOS_DEFAULT_VALUE);
2515 }
2525 } 2516 }
2526 2517
2527 /* Enable Receives */ 2518 /* Enable Receives */
@@ -2856,6 +2847,12 @@ int e1000e_up(struct e1000_adapter *adapter)
2856{ 2847{
2857 struct e1000_hw *hw = &adapter->hw; 2848 struct e1000_hw *hw = &adapter->hw;
2858 2849
2850 /* DMA latency requirement to workaround early-receive/jumbo issue */
2851 if (adapter->flags & FLAG_HAS_ERT)
2852 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY,
2853 adapter->netdev->name,
2854 PM_QOS_DEFAULT_VALUE);
2855
2859 /* hardware has been reset, we need to reload some things */ 2856 /* hardware has been reset, we need to reload some things */
2860 e1000_configure(adapter); 2857 e1000_configure(adapter);
2861 2858
@@ -2916,6 +2913,10 @@ void e1000e_down(struct e1000_adapter *adapter)
2916 e1000_clean_tx_ring(adapter); 2913 e1000_clean_tx_ring(adapter);
2917 e1000_clean_rx_ring(adapter); 2914 e1000_clean_rx_ring(adapter);
2918 2915
2916 if (adapter->flags & FLAG_HAS_ERT)
2917 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
2918 adapter->netdev->name);
2919
2919 /* 2920 /*
2920 * TODO: for power management, we could drop the link and 2921 * TODO: for power management, we could drop the link and
2921 * pci_disable_device here. 2922 * pci_disable_device here.
@@ -2973,7 +2974,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
2973 struct e1000_hw *hw = &adapter->hw; 2974 struct e1000_hw *hw = &adapter->hw;
2974 u32 icr = er32(ICR); 2975 u32 icr = er32(ICR);
2975 2976
2976 e_dbg("%s: icr is %08X\n", netdev->name, icr); 2977 e_dbg("icr is %08X\n", icr);
2977 if (icr & E1000_ICR_RXSEQ) { 2978 if (icr & E1000_ICR_RXSEQ) {
2978 adapter->flags &= ~FLAG_MSI_TEST_FAILED; 2979 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
2979 wmb(); 2980 wmb();
@@ -3010,7 +3011,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3010 if (err) 3011 if (err)
3011 goto msi_test_failed; 3012 goto msi_test_failed;
3012 3013
3013 err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0, 3014 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3014 netdev->name, netdev); 3015 netdev->name, netdev);
3015 if (err) { 3016 if (err) {
3016 pci_disable_msi(adapter->pdev); 3017 pci_disable_msi(adapter->pdev);
@@ -3043,7 +3044,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3043 goto msi_test_failed; 3044 goto msi_test_failed;
3044 3045
3045 /* okay so the test worked, restore settings */ 3046 /* okay so the test worked, restore settings */
3046 e_dbg("%s: MSI interrupt test succeeded!\n", netdev->name); 3047 e_dbg("MSI interrupt test succeeded!\n");
3047msi_test_failed: 3048msi_test_failed:
3048 e1000e_set_interrupt_capability(adapter); 3049 e1000e_set_interrupt_capability(adapter);
3049 e1000_request_irq(adapter); 3050 e1000_request_irq(adapter);
@@ -3304,6 +3305,7 @@ static void e1000_update_phy_info(unsigned long data)
3304 **/ 3305 **/
3305void e1000e_update_stats(struct e1000_adapter *adapter) 3306void e1000e_update_stats(struct e1000_adapter *adapter)
3306{ 3307{
3308 struct net_device *netdev = adapter->netdev;
3307 struct e1000_hw *hw = &adapter->hw; 3309 struct e1000_hw *hw = &adapter->hw;
3308 struct pci_dev *pdev = adapter->pdev; 3310 struct pci_dev *pdev = adapter->pdev;
3309 u16 phy_data; 3311 u16 phy_data;
@@ -3398,8 +3400,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3398 adapter->stats.tsctfc += er32(TSCTFC); 3400 adapter->stats.tsctfc += er32(TSCTFC);
3399 3401
3400 /* Fill out the OS statistics structure */ 3402 /* Fill out the OS statistics structure */
3401 adapter->net_stats.multicast = adapter->stats.mprc; 3403 netdev->stats.multicast = adapter->stats.mprc;
3402 adapter->net_stats.collisions = adapter->stats.colc; 3404 netdev->stats.collisions = adapter->stats.colc;
3403 3405
3404 /* Rx Errors */ 3406 /* Rx Errors */
3405 3407
@@ -3407,22 +3409,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3407 * RLEC on some newer hardware can be incorrect so build 3409 * RLEC on some newer hardware can be incorrect so build
3408 * our own version based on RUC and ROC 3410 * our own version based on RUC and ROC
3409 */ 3411 */
3410 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3412 netdev->stats.rx_errors = adapter->stats.rxerrc +
3411 adapter->stats.crcerrs + adapter->stats.algnerrc + 3413 adapter->stats.crcerrs + adapter->stats.algnerrc +
3412 adapter->stats.ruc + adapter->stats.roc + 3414 adapter->stats.ruc + adapter->stats.roc +
3413 adapter->stats.cexterr; 3415 adapter->stats.cexterr;
3414 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 3416 netdev->stats.rx_length_errors = adapter->stats.ruc +
3415 adapter->stats.roc; 3417 adapter->stats.roc;
3416 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3418 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3417 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3419 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3418 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3420 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3419 3421
3420 /* Tx Errors */ 3422 /* Tx Errors */
3421 adapter->net_stats.tx_errors = adapter->stats.ecol + 3423 netdev->stats.tx_errors = adapter->stats.ecol +
3422 adapter->stats.latecol; 3424 adapter->stats.latecol;
3423 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3425 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3424 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3426 netdev->stats.tx_window_errors = adapter->stats.latecol;
3425 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3427 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3426 3428
3427 /* Tx Dropped needs to be maintained elsewhere */ 3429 /* Tx Dropped needs to be maintained elsewhere */
3428 3430
@@ -3776,68 +3778,64 @@ static int e1000_tso(struct e1000_adapter *adapter,
3776 u8 ipcss, ipcso, tucss, tucso, hdr_len; 3778 u8 ipcss, ipcso, tucss, tucso, hdr_len;
3777 int err; 3779 int err;
3778 3780
3779 if (skb_is_gso(skb)) { 3781 if (!skb_is_gso(skb))
3780 if (skb_header_cloned(skb)) { 3782 return 0;
3781 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3782 if (err)
3783 return err;
3784 }
3785 3783
3786 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3784 if (skb_header_cloned(skb)) {
3787 mss = skb_shinfo(skb)->gso_size; 3785 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3788 if (skb->protocol == htons(ETH_P_IP)) { 3786 if (err)
3789 struct iphdr *iph = ip_hdr(skb); 3787 return err;
3790 iph->tot_len = 0; 3788 }
3791 iph->check = 0;
3792 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3793 iph->daddr, 0,
3794 IPPROTO_TCP,
3795 0);
3796 cmd_length = E1000_TXD_CMD_IP;
3797 ipcse = skb_transport_offset(skb) - 1;
3798 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3799 ipv6_hdr(skb)->payload_len = 0;
3800 tcp_hdr(skb)->check =
3801 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3802 &ipv6_hdr(skb)->daddr,
3803 0, IPPROTO_TCP, 0);
3804 ipcse = 0;
3805 }
3806 ipcss = skb_network_offset(skb);
3807 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3808 tucss = skb_transport_offset(skb);
3809 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3810 tucse = 0;
3811 3789
3812 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 3790 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3813 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 3791 mss = skb_shinfo(skb)->gso_size;
3792 if (skb->protocol == htons(ETH_P_IP)) {
3793 struct iphdr *iph = ip_hdr(skb);
3794 iph->tot_len = 0;
3795 iph->check = 0;
3796 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
3797 0, IPPROTO_TCP, 0);
3798 cmd_length = E1000_TXD_CMD_IP;
3799 ipcse = skb_transport_offset(skb) - 1;
3800 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3801 ipv6_hdr(skb)->payload_len = 0;
3802 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3803 &ipv6_hdr(skb)->daddr,
3804 0, IPPROTO_TCP, 0);
3805 ipcse = 0;
3806 }
3807 ipcss = skb_network_offset(skb);
3808 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
3809 tucss = skb_transport_offset(skb);
3810 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
3811 tucse = 0;
3814 3812
3815 i = tx_ring->next_to_use; 3813 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
3816 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 3814 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
3817 buffer_info = &tx_ring->buffer_info[i];
3818 3815
3819 context_desc->lower_setup.ip_fields.ipcss = ipcss; 3816 i = tx_ring->next_to_use;
3820 context_desc->lower_setup.ip_fields.ipcso = ipcso; 3817 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
3821 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); 3818 buffer_info = &tx_ring->buffer_info[i];
3822 context_desc->upper_setup.tcp_fields.tucss = tucss;
3823 context_desc->upper_setup.tcp_fields.tucso = tucso;
3824 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3825 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3826 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3827 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3828 3819
3829 buffer_info->time_stamp = jiffies; 3820 context_desc->lower_setup.ip_fields.ipcss = ipcss;
3830 buffer_info->next_to_watch = i; 3821 context_desc->lower_setup.ip_fields.ipcso = ipcso;
3822 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
3823 context_desc->upper_setup.tcp_fields.tucss = tucss;
3824 context_desc->upper_setup.tcp_fields.tucso = tucso;
3825 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
3826 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
3827 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
3828 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
3831 3829
3832 i++; 3830 buffer_info->time_stamp = jiffies;
3833 if (i == tx_ring->count) 3831 buffer_info->next_to_watch = i;
3834 i = 0;
3835 tx_ring->next_to_use = i;
3836 3832
3837 return 1; 3833 i++;
3838 } 3834 if (i == tx_ring->count)
3835 i = 0;
3836 tx_ring->next_to_use = i;
3839 3837
3840 return 0; 3838 return 1;
3841} 3839}
3842 3840
3843static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb) 3841static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
@@ -4271,10 +4269,8 @@ static void e1000_reset_task(struct work_struct *work)
4271 **/ 4269 **/
4272static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4270static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4273{ 4271{
4274 struct e1000_adapter *adapter = netdev_priv(netdev);
4275
4276 /* only return the current stats */ 4272 /* only return the current stats */
4277 return &adapter->net_stats; 4273 return &netdev->stats;
4278} 4274}
4279 4275
4280/** 4276/**
@@ -4362,6 +4358,8 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4362 data->phy_id = adapter->hw.phy.addr; 4358 data->phy_id = adapter->hw.phy.addr;
4363 break; 4359 break;
4364 case SIOCGMIIREG: 4360 case SIOCGMIIREG:
4361 e1000_phy_read_status(adapter);
4362
4365 switch (data->reg_num & 0x1F) { 4363 switch (data->reg_num & 0x1F) {
4366 case MII_BMCR: 4364 case MII_BMCR:
4367 data->val_out = adapter->phy_regs.bmcr; 4365 data->val_out = adapter->phy_regs.bmcr;
@@ -4469,7 +4467,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4469 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 4467 e1e_wphy(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
4470 4468
4471 /* activate PHY wakeup */ 4469 /* activate PHY wakeup */
4472 retval = hw->phy.ops.acquire_phy(hw); 4470 retval = hw->phy.ops.acquire(hw);
4473 if (retval) { 4471 if (retval) {
4474 e_err("Could not acquire PHY\n"); 4472 e_err("Could not acquire PHY\n");
4475 return retval; 4473 return retval;
@@ -4486,7 +4484,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
4486 if (retval) 4484 if (retval)
4487 e_err("Could not set PHY Host Wakeup bit\n"); 4485 e_err("Could not set PHY Host Wakeup bit\n");
4488out: 4486out:
4489 hw->phy.ops.release_phy(hw); 4487 hw->phy.ops.release(hw);
4490 4488
4491 return retval; 4489 return retval;
4492} 4490}
@@ -5160,6 +5158,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5160 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 5158 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
5161 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 5159 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5162 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 5160 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5161 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5163 5162
5164 /* Initialize link parameters. User can change them with ethtool */ 5163 /* Initialize link parameters. User can change them with ethtool */
5165 adapter->hw.mac.autoneg = 1; 5164 adapter->hw.mac.autoneg = 1;
@@ -5283,6 +5282,11 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5283 del_timer_sync(&adapter->watchdog_timer); 5282 del_timer_sync(&adapter->watchdog_timer);
5284 del_timer_sync(&adapter->phy_info_timer); 5283 del_timer_sync(&adapter->phy_info_timer);
5285 5284
5285 cancel_work_sync(&adapter->reset_task);
5286 cancel_work_sync(&adapter->watchdog_task);
5287 cancel_work_sync(&adapter->downshift_task);
5288 cancel_work_sync(&adapter->update_phy_task);
5289 cancel_work_sync(&adapter->print_hang_task);
5286 flush_scheduled_work(); 5290 flush_scheduled_work();
5287 5291
5288 /* 5292 /*
@@ -5414,12 +5418,10 @@ static int __init e1000_init_module(void)
5414 int ret; 5418 int ret;
5415 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 5419 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
5416 e1000e_driver_name, e1000e_driver_version); 5420 e1000e_driver_name, e1000e_driver_version);
5417 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n", 5421 printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
5418 e1000e_driver_name); 5422 e1000e_driver_name);
5419 ret = pci_register_driver(&e1000_driver); 5423 ret = pci_register_driver(&e1000_driver);
5420 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name, 5424
5421 PM_QOS_DEFAULT_VALUE);
5422
5423 return ret; 5425 return ret;
5424} 5426}
5425module_init(e1000_init_module); 5427module_init(e1000_init_module);
@@ -5433,7 +5435,6 @@ module_init(e1000_init_module);
5433static void __exit e1000_exit_module(void) 5435static void __exit e1000_exit_module(void)
5434{ 5436{
5435 pci_unregister_driver(&e1000_driver); 5437 pci_unregister_driver(&e1000_driver);
5436 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, e1000e_driver_name);
5437} 5438}
5438module_exit(e1000_exit_module); 5439module_exit(e1000_exit_module);
5439 5440
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 1342e0b1815c..2e399778cae5 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 85f955f70417..5cd01c691c53 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation. 4 Copyright(c) 1999 - 2009 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -130,7 +130,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
130 u16 phy_id; 130 u16 phy_id;
131 u16 retry_count = 0; 131 u16 retry_count = 0;
132 132
133 if (!(phy->ops.read_phy_reg)) 133 if (!(phy->ops.read_reg))
134 goto out; 134 goto out;
135 135
136 while (retry_count < 2) { 136 while (retry_count < 2) {
@@ -156,24 +156,24 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
156 * MDIC mode. No harm in trying again in this case since 156 * MDIC mode. No harm in trying again in this case since
157 * the PHY ID is unknown at this point anyway 157 * the PHY ID is unknown at this point anyway
158 */ 158 */
159 ret_val = phy->ops.acquire_phy(hw); 159 ret_val = phy->ops.acquire(hw);
160 if (ret_val) 160 if (ret_val)
161 goto out; 161 goto out;
162 ret_val = e1000_set_mdio_slow_mode_hv(hw, true); 162 ret_val = e1000_set_mdio_slow_mode_hv(hw, true);
163 if (ret_val) 163 if (ret_val)
164 goto out; 164 goto out;
165 phy->ops.release_phy(hw); 165 phy->ops.release(hw);
166 166
167 retry_count++; 167 retry_count++;
168 } 168 }
169out: 169out:
170 /* Revert to MDIO fast mode, if applicable */ 170 /* Revert to MDIO fast mode, if applicable */
171 if (retry_count) { 171 if (retry_count) {
172 ret_val = phy->ops.acquire_phy(hw); 172 ret_val = phy->ops.acquire(hw);
173 if (ret_val) 173 if (ret_val)
174 return ret_val; 174 return ret_val;
175 ret_val = e1000_set_mdio_slow_mode_hv(hw, false); 175 ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
176 phy->ops.release_phy(hw); 176 phy->ops.release(hw);
177 } 177 }
178 178
179 return ret_val; 179 return ret_val;
@@ -211,7 +211,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
211 u32 i, mdic = 0; 211 u32 i, mdic = 0;
212 212
213 if (offset > MAX_PHY_REG_ADDRESS) { 213 if (offset > MAX_PHY_REG_ADDRESS) {
214 hw_dbg(hw, "PHY Address %d is out of range\n", offset); 214 e_dbg("PHY Address %d is out of range\n", offset);
215 return -E1000_ERR_PARAM; 215 return -E1000_ERR_PARAM;
216 } 216 }
217 217
@@ -238,11 +238,11 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
238 break; 238 break;
239 } 239 }
240 if (!(mdic & E1000_MDIC_READY)) { 240 if (!(mdic & E1000_MDIC_READY)) {
241 hw_dbg(hw, "MDI Read did not complete\n"); 241 e_dbg("MDI Read did not complete\n");
242 return -E1000_ERR_PHY; 242 return -E1000_ERR_PHY;
243 } 243 }
244 if (mdic & E1000_MDIC_ERROR) { 244 if (mdic & E1000_MDIC_ERROR) {
245 hw_dbg(hw, "MDI Error\n"); 245 e_dbg("MDI Error\n");
246 return -E1000_ERR_PHY; 246 return -E1000_ERR_PHY;
247 } 247 }
248 *data = (u16) mdic; 248 *data = (u16) mdic;
@@ -264,7 +264,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
264 u32 i, mdic = 0; 264 u32 i, mdic = 0;
265 265
266 if (offset > MAX_PHY_REG_ADDRESS) { 266 if (offset > MAX_PHY_REG_ADDRESS) {
267 hw_dbg(hw, "PHY Address %d is out of range\n", offset); 267 e_dbg("PHY Address %d is out of range\n", offset);
268 return -E1000_ERR_PARAM; 268 return -E1000_ERR_PARAM;
269 } 269 }
270 270
@@ -292,11 +292,11 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
292 break; 292 break;
293 } 293 }
294 if (!(mdic & E1000_MDIC_READY)) { 294 if (!(mdic & E1000_MDIC_READY)) {
295 hw_dbg(hw, "MDI Write did not complete\n"); 295 e_dbg("MDI Write did not complete\n");
296 return -E1000_ERR_PHY; 296 return -E1000_ERR_PHY;
297 } 297 }
298 if (mdic & E1000_MDIC_ERROR) { 298 if (mdic & E1000_MDIC_ERROR) {
299 hw_dbg(hw, "MDI Error\n"); 299 e_dbg("MDI Error\n");
300 return -E1000_ERR_PHY; 300 return -E1000_ERR_PHY;
301 } 301 }
302 302
@@ -317,14 +317,14 @@ s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
317{ 317{
318 s32 ret_val; 318 s32 ret_val;
319 319
320 ret_val = hw->phy.ops.acquire_phy(hw); 320 ret_val = hw->phy.ops.acquire(hw);
321 if (ret_val) 321 if (ret_val)
322 return ret_val; 322 return ret_val;
323 323
324 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 324 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
325 data); 325 data);
326 326
327 hw->phy.ops.release_phy(hw); 327 hw->phy.ops.release(hw);
328 328
329 return ret_val; 329 return ret_val;
330} 330}
@@ -342,14 +342,14 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
342{ 342{
343 s32 ret_val; 343 s32 ret_val;
344 344
345 ret_val = hw->phy.ops.acquire_phy(hw); 345 ret_val = hw->phy.ops.acquire(hw);
346 if (ret_val) 346 if (ret_val)
347 return ret_val; 347 return ret_val;
348 348
349 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 349 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
350 data); 350 data);
351 351
352 hw->phy.ops.release_phy(hw); 352 hw->phy.ops.release(hw);
353 353
354 return ret_val; 354 return ret_val;
355} 355}
@@ -371,10 +371,10 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
371 s32 ret_val = 0; 371 s32 ret_val = 0;
372 372
373 if (!locked) { 373 if (!locked) {
374 if (!(hw->phy.ops.acquire_phy)) 374 if (!(hw->phy.ops.acquire))
375 goto out; 375 goto out;
376 376
377 ret_val = hw->phy.ops.acquire_phy(hw); 377 ret_val = hw->phy.ops.acquire(hw);
378 if (ret_val) 378 if (ret_val)
379 goto out; 379 goto out;
380 } 380 }
@@ -392,7 +392,7 @@ static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
392 392
393release: 393release:
394 if (!locked) 394 if (!locked)
395 hw->phy.ops.release_phy(hw); 395 hw->phy.ops.release(hw);
396out: 396out:
397 return ret_val; 397 return ret_val;
398} 398}
@@ -442,10 +442,10 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
442 s32 ret_val = 0; 442 s32 ret_val = 0;
443 443
444 if (!locked) { 444 if (!locked) {
445 if (!(hw->phy.ops.acquire_phy)) 445 if (!(hw->phy.ops.acquire))
446 goto out; 446 goto out;
447 447
448 ret_val = hw->phy.ops.acquire_phy(hw); 448 ret_val = hw->phy.ops.acquire(hw);
449 if (ret_val) 449 if (ret_val)
450 goto out; 450 goto out;
451 } 451 }
@@ -463,7 +463,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
463 463
464release: 464release:
465 if (!locked) 465 if (!locked)
466 hw->phy.ops.release_phy(hw); 466 hw->phy.ops.release(hw);
467 467
468out: 468out:
469 return ret_val; 469 return ret_val;
@@ -515,10 +515,10 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
515 s32 ret_val = 0; 515 s32 ret_val = 0;
516 516
517 if (!locked) { 517 if (!locked) {
518 if (!(hw->phy.ops.acquire_phy)) 518 if (!(hw->phy.ops.acquire))
519 goto out; 519 goto out;
520 520
521 ret_val = hw->phy.ops.acquire_phy(hw); 521 ret_val = hw->phy.ops.acquire(hw);
522 if (ret_val) 522 if (ret_val)
523 goto out; 523 goto out;
524 } 524 }
@@ -533,7 +533,7 @@ static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
533 *data = (u16)kmrnctrlsta; 533 *data = (u16)kmrnctrlsta;
534 534
535 if (!locked) 535 if (!locked)
536 hw->phy.ops.release_phy(hw); 536 hw->phy.ops.release(hw);
537 537
538out: 538out:
539 return ret_val; 539 return ret_val;
@@ -587,10 +587,10 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
587 s32 ret_val = 0; 587 s32 ret_val = 0;
588 588
589 if (!locked) { 589 if (!locked) {
590 if (!(hw->phy.ops.acquire_phy)) 590 if (!(hw->phy.ops.acquire))
591 goto out; 591 goto out;
592 592
593 ret_val = hw->phy.ops.acquire_phy(hw); 593 ret_val = hw->phy.ops.acquire(hw);
594 if (ret_val) 594 if (ret_val)
595 goto out; 595 goto out;
596 } 596 }
@@ -602,7 +602,7 @@ static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
602 udelay(2); 602 udelay(2);
603 603
604 if (!locked) 604 if (!locked)
605 hw->phy.ops.release_phy(hw); 605 hw->phy.ops.release(hw);
606 606
607out: 607out:
608 return ret_val; 608 return ret_val;
@@ -649,7 +649,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
649 u16 phy_data; 649 u16 phy_data;
650 650
651 /* Enable CRS on TX. This must be set for half-duplex operation. */ 651 /* Enable CRS on TX. This must be set for half-duplex operation. */
652 ret_val = phy->ops.read_phy_reg(hw, I82577_CFG_REG, &phy_data); 652 ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
653 if (ret_val) 653 if (ret_val)
654 goto out; 654 goto out;
655 655
@@ -658,7 +658,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
658 /* Enable downshift */ 658 /* Enable downshift */
659 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 659 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
660 660
661 ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data); 661 ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
662 662
663out: 663out:
664 return ret_val; 664 return ret_val;
@@ -776,12 +776,12 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
776 /* Commit the changes. */ 776 /* Commit the changes. */
777 ret_val = e1000e_commit_phy(hw); 777 ret_val = e1000e_commit_phy(hw);
778 if (ret_val) { 778 if (ret_val) {
779 hw_dbg(hw, "Error committing the PHY changes\n"); 779 e_dbg("Error committing the PHY changes\n");
780 return ret_val; 780 return ret_val;
781 } 781 }
782 782
783 if (phy->type == e1000_phy_82578) { 783 if (phy->type == e1000_phy_82578) {
784 ret_val = phy->ops.read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 784 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
785 &phy_data); 785 &phy_data);
786 if (ret_val) 786 if (ret_val)
787 return ret_val; 787 return ret_val;
@@ -789,7 +789,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
789 /* 82578 PHY - set the downshift count to 1x. */ 789 /* 82578 PHY - set the downshift count to 1x. */
790 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; 790 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
791 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; 791 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
792 ret_val = phy->ops.write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 792 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
793 phy_data); 793 phy_data);
794 if (ret_val) 794 if (ret_val)
795 return ret_val; 795 return ret_val;
@@ -813,7 +813,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
813 813
814 ret_val = e1000_phy_hw_reset(hw); 814 ret_val = e1000_phy_hw_reset(hw);
815 if (ret_val) { 815 if (ret_val) {
816 hw_dbg(hw, "Error resetting the PHY.\n"); 816 e_dbg("Error resetting the PHY.\n");
817 return ret_val; 817 return ret_val;
818 } 818 }
819 819
@@ -824,9 +824,9 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
824 msleep(100); 824 msleep(100);
825 825
826 /* disable lplu d0 during driver init */ 826 /* disable lplu d0 during driver init */
827 ret_val = e1000_set_d0_lplu_state(hw, 0); 827 ret_val = e1000_set_d0_lplu_state(hw, false);
828 if (ret_val) { 828 if (ret_val) {
829 hw_dbg(hw, "Error Disabling LPLU D0\n"); 829 e_dbg("Error Disabling LPLU D0\n");
830 return ret_val; 830 return ret_val;
831 } 831 }
832 /* Configure mdi-mdix settings */ 832 /* Configure mdi-mdix settings */
@@ -962,39 +962,39 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
962 NWAY_AR_10T_HD_CAPS); 962 NWAY_AR_10T_HD_CAPS);
963 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); 963 mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
964 964
965 hw_dbg(hw, "autoneg_advertised %x\n", phy->autoneg_advertised); 965 e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
966 966
967 /* Do we want to advertise 10 Mb Half Duplex? */ 967 /* Do we want to advertise 10 Mb Half Duplex? */
968 if (phy->autoneg_advertised & ADVERTISE_10_HALF) { 968 if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
969 hw_dbg(hw, "Advertise 10mb Half duplex\n"); 969 e_dbg("Advertise 10mb Half duplex\n");
970 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; 970 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
971 } 971 }
972 972
973 /* Do we want to advertise 10 Mb Full Duplex? */ 973 /* Do we want to advertise 10 Mb Full Duplex? */
974 if (phy->autoneg_advertised & ADVERTISE_10_FULL) { 974 if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
975 hw_dbg(hw, "Advertise 10mb Full duplex\n"); 975 e_dbg("Advertise 10mb Full duplex\n");
976 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; 976 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
977 } 977 }
978 978
979 /* Do we want to advertise 100 Mb Half Duplex? */ 979 /* Do we want to advertise 100 Mb Half Duplex? */
980 if (phy->autoneg_advertised & ADVERTISE_100_HALF) { 980 if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
981 hw_dbg(hw, "Advertise 100mb Half duplex\n"); 981 e_dbg("Advertise 100mb Half duplex\n");
982 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; 982 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
983 } 983 }
984 984
985 /* Do we want to advertise 100 Mb Full Duplex? */ 985 /* Do we want to advertise 100 Mb Full Duplex? */
986 if (phy->autoneg_advertised & ADVERTISE_100_FULL) { 986 if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
987 hw_dbg(hw, "Advertise 100mb Full duplex\n"); 987 e_dbg("Advertise 100mb Full duplex\n");
988 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; 988 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
989 } 989 }
990 990
991 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ 991 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
992 if (phy->autoneg_advertised & ADVERTISE_1000_HALF) 992 if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
993 hw_dbg(hw, "Advertise 1000mb Half duplex request denied!\n"); 993 e_dbg("Advertise 1000mb Half duplex request denied!\n");
994 994
995 /* Do we want to advertise 1000 Mb Full Duplex? */ 995 /* Do we want to advertise 1000 Mb Full Duplex? */
996 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { 996 if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
997 hw_dbg(hw, "Advertise 1000mb Full duplex\n"); 997 e_dbg("Advertise 1000mb Full duplex\n");
998 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 998 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
999 } 999 }
1000 1000
@@ -1053,7 +1053,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1053 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1053 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1054 break; 1054 break;
1055 default: 1055 default:
1056 hw_dbg(hw, "Flow control param set incorrectly\n"); 1056 e_dbg("Flow control param set incorrectly\n");
1057 ret_val = -E1000_ERR_CONFIG; 1057 ret_val = -E1000_ERR_CONFIG;
1058 return ret_val; 1058 return ret_val;
1059 } 1059 }
@@ -1062,7 +1062,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1062 if (ret_val) 1062 if (ret_val)
1063 return ret_val; 1063 return ret_val;
1064 1064
1065 hw_dbg(hw, "Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1065 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1066 1066
1067 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 1067 if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
1068 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1068 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
@@ -1099,13 +1099,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1099 if (phy->autoneg_advertised == 0) 1099 if (phy->autoneg_advertised == 0)
1100 phy->autoneg_advertised = phy->autoneg_mask; 1100 phy->autoneg_advertised = phy->autoneg_mask;
1101 1101
1102 hw_dbg(hw, "Reconfiguring auto-neg advertisement params\n"); 1102 e_dbg("Reconfiguring auto-neg advertisement params\n");
1103 ret_val = e1000_phy_setup_autoneg(hw); 1103 ret_val = e1000_phy_setup_autoneg(hw);
1104 if (ret_val) { 1104 if (ret_val) {
1105 hw_dbg(hw, "Error Setting up Auto-Negotiation\n"); 1105 e_dbg("Error Setting up Auto-Negotiation\n");
1106 return ret_val; 1106 return ret_val;
1107 } 1107 }
1108 hw_dbg(hw, "Restarting Auto-Neg\n"); 1108 e_dbg("Restarting Auto-Neg\n");
1109 1109
1110 /* 1110 /*
1111 * Restart auto-negotiation by setting the Auto Neg Enable bit and 1111 * Restart auto-negotiation by setting the Auto Neg Enable bit and
@@ -1127,7 +1127,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1127 if (phy->autoneg_wait_to_complete) { 1127 if (phy->autoneg_wait_to_complete) {
1128 ret_val = e1000_wait_autoneg(hw); 1128 ret_val = e1000_wait_autoneg(hw);
1129 if (ret_val) { 1129 if (ret_val) {
1130 hw_dbg(hw, "Error while waiting for " 1130 e_dbg("Error while waiting for "
1131 "autoneg to complete\n"); 1131 "autoneg to complete\n");
1132 return ret_val; 1132 return ret_val;
1133 } 1133 }
@@ -1165,10 +1165,10 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
1165 * PHY will be set to 10H, 10F, 100H or 100F 1165 * PHY will be set to 10H, 10F, 100H or 100F
1166 * depending on user settings. 1166 * depending on user settings.
1167 */ 1167 */
1168 hw_dbg(hw, "Forcing Speed and Duplex\n"); 1168 e_dbg("Forcing Speed and Duplex\n");
1169 ret_val = e1000_phy_force_speed_duplex(hw); 1169 ret_val = e1000_phy_force_speed_duplex(hw);
1170 if (ret_val) { 1170 if (ret_val) {
1171 hw_dbg(hw, "Error Forcing Speed and Duplex\n"); 1171 e_dbg("Error Forcing Speed and Duplex\n");
1172 return ret_val; 1172 return ret_val;
1173 } 1173 }
1174 } 1174 }
@@ -1185,11 +1185,11 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
1185 return ret_val; 1185 return ret_val;
1186 1186
1187 if (link) { 1187 if (link) {
1188 hw_dbg(hw, "Valid link established!!!\n"); 1188 e_dbg("Valid link established!!!\n");
1189 e1000e_config_collision_dist(hw); 1189 e1000e_config_collision_dist(hw);
1190 ret_val = e1000e_config_fc_after_link_up(hw); 1190 ret_val = e1000e_config_fc_after_link_up(hw);
1191 } else { 1191 } else {
1192 hw_dbg(hw, "Unable to establish link!!!\n"); 1192 e_dbg("Unable to establish link!!!\n");
1193 } 1193 }
1194 1194
1195 return ret_val; 1195 return ret_val;
@@ -1235,12 +1235,12 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1235 if (ret_val) 1235 if (ret_val)
1236 return ret_val; 1236 return ret_val;
1237 1237
1238 hw_dbg(hw, "IGP PSCR: %X\n", phy_data); 1238 e_dbg("IGP PSCR: %X\n", phy_data);
1239 1239
1240 udelay(1); 1240 udelay(1);
1241 1241
1242 if (phy->autoneg_wait_to_complete) { 1242 if (phy->autoneg_wait_to_complete) {
1243 hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); 1243 e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
1244 1244
1245 ret_val = e1000e_phy_has_link_generic(hw, 1245 ret_val = e1000e_phy_has_link_generic(hw,
1246 PHY_FORCE_LIMIT, 1246 PHY_FORCE_LIMIT,
@@ -1250,7 +1250,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
1250 return ret_val; 1250 return ret_val;
1251 1251
1252 if (!link) 1252 if (!link)
1253 hw_dbg(hw, "Link taking longer than expected.\n"); 1253 e_dbg("Link taking longer than expected.\n");
1254 1254
1255 /* Try once more */ 1255 /* Try once more */
1256 ret_val = e1000e_phy_has_link_generic(hw, 1256 ret_val = e1000e_phy_has_link_generic(hw,
@@ -1294,7 +1294,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1294 if (ret_val) 1294 if (ret_val)
1295 return ret_val; 1295 return ret_val;
1296 1296
1297 hw_dbg(hw, "M88E1000 PSCR: %X\n", phy_data); 1297 e_dbg("M88E1000 PSCR: %X\n", phy_data);
1298 1298
1299 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data); 1299 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
1300 if (ret_val) 1300 if (ret_val)
@@ -1312,7 +1312,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1312 return ret_val; 1312 return ret_val;
1313 1313
1314 if (phy->autoneg_wait_to_complete) { 1314 if (phy->autoneg_wait_to_complete) {
1315 hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); 1315 e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
1316 1316
1317 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 1317 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1318 100000, &link); 1318 100000, &link);
@@ -1403,11 +1403,11 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1403 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { 1403 if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
1404 ctrl &= ~E1000_CTRL_FD; 1404 ctrl &= ~E1000_CTRL_FD;
1405 *phy_ctrl &= ~MII_CR_FULL_DUPLEX; 1405 *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
1406 hw_dbg(hw, "Half Duplex\n"); 1406 e_dbg("Half Duplex\n");
1407 } else { 1407 } else {
1408 ctrl |= E1000_CTRL_FD; 1408 ctrl |= E1000_CTRL_FD;
1409 *phy_ctrl |= MII_CR_FULL_DUPLEX; 1409 *phy_ctrl |= MII_CR_FULL_DUPLEX;
1410 hw_dbg(hw, "Full Duplex\n"); 1410 e_dbg("Full Duplex\n");
1411 } 1411 }
1412 1412
1413 /* Forcing 10mb or 100mb? */ 1413 /* Forcing 10mb or 100mb? */
@@ -1415,12 +1415,12 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1415 ctrl |= E1000_CTRL_SPD_100; 1415 ctrl |= E1000_CTRL_SPD_100;
1416 *phy_ctrl |= MII_CR_SPEED_100; 1416 *phy_ctrl |= MII_CR_SPEED_100;
1417 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); 1417 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1418 hw_dbg(hw, "Forcing 100mb\n"); 1418 e_dbg("Forcing 100mb\n");
1419 } else { 1419 } else {
1420 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1420 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1421 *phy_ctrl |= MII_CR_SPEED_10; 1421 *phy_ctrl |= MII_CR_SPEED_10;
1422 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); 1422 *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1423 hw_dbg(hw, "Forcing 10mb\n"); 1423 e_dbg("Forcing 10mb\n");
1424 } 1424 }
1425 1425
1426 e1000e_config_collision_dist(hw); 1426 e1000e_config_collision_dist(hw);
@@ -1535,7 +1535,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1535 break; 1535 break;
1536 default: 1536 default:
1537 /* speed downshift not supported */ 1537 /* speed downshift not supported */
1538 phy->speed_downgraded = 0; 1538 phy->speed_downgraded = false;
1539 return 0; 1539 return 0;
1540 } 1540 }
1541 1541
@@ -1816,7 +1816,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1816 bool link; 1816 bool link;
1817 1817
1818 if (hw->phy.media_type != e1000_media_type_copper) { 1818 if (hw->phy.media_type != e1000_media_type_copper) {
1819 hw_dbg(hw, "Phy info is only valid for copper media\n"); 1819 e_dbg("Phy info is only valid for copper media\n");
1820 return -E1000_ERR_CONFIG; 1820 return -E1000_ERR_CONFIG;
1821 } 1821 }
1822 1822
@@ -1825,7 +1825,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1825 return ret_val; 1825 return ret_val;
1826 1826
1827 if (!link) { 1827 if (!link) {
1828 hw_dbg(hw, "Phy info is only valid if link is up\n"); 1828 e_dbg("Phy info is only valid if link is up\n");
1829 return -E1000_ERR_CONFIG; 1829 return -E1000_ERR_CONFIG;
1830 } 1830 }
1831 1831
@@ -1893,11 +1893,11 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
1893 return ret_val; 1893 return ret_val;
1894 1894
1895 if (!link) { 1895 if (!link) {
1896 hw_dbg(hw, "Phy info is only valid if link is up\n"); 1896 e_dbg("Phy info is only valid if link is up\n");
1897 return -E1000_ERR_CONFIG; 1897 return -E1000_ERR_CONFIG;
1898 } 1898 }
1899 1899
1900 phy->polarity_correction = 1; 1900 phy->polarity_correction = true;
1901 1901
1902 ret_val = e1000_check_polarity_igp(hw); 1902 ret_val = e1000_check_polarity_igp(hw);
1903 if (ret_val) 1903 if (ret_val)
@@ -1980,7 +1980,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
1980 if (ret_val) 1980 if (ret_val)
1981 return 0; 1981 return 0;
1982 1982
1983 ret_val = phy->ops.acquire_phy(hw); 1983 ret_val = phy->ops.acquire(hw);
1984 if (ret_val) 1984 if (ret_val)
1985 return ret_val; 1985 return ret_val;
1986 1986
@@ -1995,7 +1995,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
1995 1995
1996 udelay(150); 1996 udelay(150);
1997 1997
1998 phy->ops.release_phy(hw); 1998 phy->ops.release(hw);
1999 1999
2000 return e1000_get_phy_cfg_done(hw); 2000 return e1000_get_phy_cfg_done(hw);
2001} 2001}
@@ -2021,7 +2021,7 @@ s32 e1000e_get_cfg_done(struct e1000_hw *hw)
2021 **/ 2021 **/
2022s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) 2022s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
2023{ 2023{
2024 hw_dbg(hw, "Running IGP 3 PHY init script\n"); 2024 e_dbg("Running IGP 3 PHY init script\n");
2025 2025
2026 /* PHY init IGP 3 */ 2026 /* PHY init IGP 3 */
2027 /* Enable rise/fall, 10-mode work in class-A */ 2027 /* Enable rise/fall, 10-mode work in class-A */
@@ -2246,7 +2246,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2246 u32 page = offset >> IGP_PAGE_SHIFT; 2246 u32 page = offset >> IGP_PAGE_SHIFT;
2247 u32 page_shift = 0; 2247 u32 page_shift = 0;
2248 2248
2249 ret_val = hw->phy.ops.acquire_phy(hw); 2249 ret_val = hw->phy.ops.acquire(hw);
2250 if (ret_val) 2250 if (ret_val)
2251 return ret_val; 2251 return ret_val;
2252 2252
@@ -2284,7 +2284,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2284 data); 2284 data);
2285 2285
2286out: 2286out:
2287 hw->phy.ops.release_phy(hw); 2287 hw->phy.ops.release(hw);
2288 return ret_val; 2288 return ret_val;
2289} 2289}
2290 2290
@@ -2305,7 +2305,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2305 u32 page = offset >> IGP_PAGE_SHIFT; 2305 u32 page = offset >> IGP_PAGE_SHIFT;
2306 u32 page_shift = 0; 2306 u32 page_shift = 0;
2307 2307
2308 ret_val = hw->phy.ops.acquire_phy(hw); 2308 ret_val = hw->phy.ops.acquire(hw);
2309 if (ret_val) 2309 if (ret_val)
2310 return ret_val; 2310 return ret_val;
2311 2311
@@ -2342,7 +2342,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2342 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2342 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2343 data); 2343 data);
2344out: 2344out:
2345 hw->phy.ops.release_phy(hw); 2345 hw->phy.ops.release(hw);
2346 return ret_val; 2346 return ret_val;
2347} 2347}
2348 2348
@@ -2361,7 +2361,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2361 s32 ret_val; 2361 s32 ret_val;
2362 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2362 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2363 2363
2364 ret_val = hw->phy.ops.acquire_phy(hw); 2364 ret_val = hw->phy.ops.acquire(hw);
2365 if (ret_val) 2365 if (ret_val)
2366 return ret_val; 2366 return ret_val;
2367 2367
@@ -2387,7 +2387,7 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
2387 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2387 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2388 data); 2388 data);
2389out: 2389out:
2390 hw->phy.ops.release_phy(hw); 2390 hw->phy.ops.release(hw);
2391 return ret_val; 2391 return ret_val;
2392} 2392}
2393 2393
@@ -2405,7 +2405,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2405 s32 ret_val; 2405 s32 ret_val;
2406 u16 page = (u16)(offset >> IGP_PAGE_SHIFT); 2406 u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
2407 2407
2408 ret_val = hw->phy.ops.acquire_phy(hw); 2408 ret_val = hw->phy.ops.acquire(hw);
2409 if (ret_val) 2409 if (ret_val)
2410 return ret_val; 2410 return ret_val;
2411 2411
@@ -2431,7 +2431,7 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
2431 data); 2431 data);
2432 2432
2433out: 2433out:
2434 hw->phy.ops.release_phy(hw); 2434 hw->phy.ops.release(hw);
2435 return ret_val; 2435 return ret_val;
2436} 2436}
2437 2437
@@ -2464,7 +2464,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2464 /* Gig must be disabled for MDIO accesses to page 800 */ 2464 /* Gig must be disabled for MDIO accesses to page 800 */
2465 if ((hw->mac.type == e1000_pchlan) && 2465 if ((hw->mac.type == e1000_pchlan) &&
2466 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) 2466 (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
2467 hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); 2467 e_dbg("Attempting to access page 800 while gig enabled\n");
2468 2468
2469 /* All operations in this function are phy address 1 */ 2469 /* All operations in this function are phy address 1 */
2470 hw->phy.addr = 1; 2470 hw->phy.addr = 1;
@@ -2534,8 +2534,8 @@ out:
2534 **/ 2534 **/
2535s32 e1000e_commit_phy(struct e1000_hw *hw) 2535s32 e1000e_commit_phy(struct e1000_hw *hw)
2536{ 2536{
2537 if (hw->phy.ops.commit_phy) 2537 if (hw->phy.ops.commit)
2538 return hw->phy.ops.commit_phy(hw); 2538 return hw->phy.ops.commit(hw);
2539 2539
2540 return 0; 2540 return 0;
2541} 2541}
@@ -2614,7 +2614,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2614 bool in_slow_mode = false; 2614 bool in_slow_mode = false;
2615 2615
2616 if (!locked) { 2616 if (!locked) {
2617 ret_val = hw->phy.ops.acquire_phy(hw); 2617 ret_val = hw->phy.ops.acquire(hw);
2618 if (ret_val) 2618 if (ret_val)
2619 return ret_val; 2619 return ret_val;
2620 } 2620 }
@@ -2670,7 +2670,7 @@ out:
2670 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); 2670 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2671 2671
2672 if (!locked) 2672 if (!locked)
2673 hw->phy.ops.release_phy(hw); 2673 hw->phy.ops.release(hw);
2674 2674
2675 return ret_val; 2675 return ret_val;
2676} 2676}
@@ -2723,7 +2723,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2723 bool in_slow_mode = false; 2723 bool in_slow_mode = false;
2724 2724
2725 if (!locked) { 2725 if (!locked) {
2726 ret_val = hw->phy.ops.acquire_phy(hw); 2726 ret_val = hw->phy.ops.acquire(hw);
2727 if (ret_val) 2727 if (ret_val)
2728 return ret_val; 2728 return ret_val;
2729 } 2729 }
@@ -2796,7 +2796,7 @@ out:
2796 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false); 2796 ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
2797 2797
2798 if (!locked) 2798 if (!locked)
2799 hw->phy.ops.release_phy(hw); 2799 hw->phy.ops.release(hw);
2800 2800
2801 return ret_val; 2801 return ret_val;
2802} 2802}
@@ -2872,7 +2872,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2872 /* masking with 0x3F to remove the page from offset */ 2872 /* masking with 0x3F to remove the page from offset */
2873 ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); 2873 ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
2874 if (ret_val) { 2874 if (ret_val) {
2875 hw_dbg(hw, "Could not write PHY the HV address register\n"); 2875 e_dbg("Could not write PHY the HV address register\n");
2876 goto out; 2876 goto out;
2877 } 2877 }
2878 2878
@@ -2883,7 +2883,7 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2883 ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); 2883 ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
2884 2884
2885 if (ret_val) { 2885 if (ret_val) {
2886 hw_dbg(hw, "Could not read data value from HV data register\n"); 2886 e_dbg("Could not read data value from HV data register\n");
2887 goto out; 2887 goto out;
2888 } 2888 }
2889 2889
@@ -2911,12 +2911,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
2911 goto out; 2911 goto out;
2912 2912
2913 /* Do not apply workaround if in PHY loopback bit 14 set */ 2913 /* Do not apply workaround if in PHY loopback bit 14 set */
2914 hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &data); 2914 hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
2915 if (data & PHY_CONTROL_LB) 2915 if (data & PHY_CONTROL_LB)
2916 goto out; 2916 goto out;
2917 2917
2918 /* check if link is up and at 1Gbps */ 2918 /* check if link is up and at 1Gbps */
2919 ret_val = hw->phy.ops.read_phy_reg(hw, BM_CS_STATUS, &data); 2919 ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
2920 if (ret_val) 2920 if (ret_val)
2921 goto out; 2921 goto out;
2922 2922
@@ -2932,13 +2932,13 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
2932 mdelay(200); 2932 mdelay(200);
2933 2933
2934 /* flush the packets in the fifo buffer */ 2934 /* flush the packets in the fifo buffer */
2935 ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, 2935 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
2936 HV_MUX_DATA_CTRL_GEN_TO_MAC | 2936 HV_MUX_DATA_CTRL_GEN_TO_MAC |
2937 HV_MUX_DATA_CTRL_FORCE_SPEED); 2937 HV_MUX_DATA_CTRL_FORCE_SPEED);
2938 if (ret_val) 2938 if (ret_val)
2939 goto out; 2939 goto out;
2940 2940
2941 ret_val = hw->phy.ops.write_phy_reg(hw, HV_MUX_DATA_CTRL, 2941 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
2942 HV_MUX_DATA_CTRL_GEN_TO_MAC); 2942 HV_MUX_DATA_CTRL_GEN_TO_MAC);
2943 2943
2944out: 2944out:
@@ -2959,7 +2959,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
2959 s32 ret_val; 2959 s32 ret_val;
2960 u16 data; 2960 u16 data;
2961 2961
2962 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); 2962 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
2963 2963
2964 if (!ret_val) 2964 if (!ret_val)
2965 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) 2965 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -2984,13 +2984,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
2984 u16 phy_data; 2984 u16 phy_data;
2985 bool link; 2985 bool link;
2986 2986
2987 ret_val = phy->ops.read_phy_reg(hw, PHY_CONTROL, &phy_data); 2987 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
2988 if (ret_val) 2988 if (ret_val)
2989 goto out; 2989 goto out;
2990 2990
2991 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 2991 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
2992 2992
2993 ret_val = phy->ops.write_phy_reg(hw, PHY_CONTROL, phy_data); 2993 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
2994 if (ret_val) 2994 if (ret_val)
2995 goto out; 2995 goto out;
2996 2996
@@ -2998,23 +2998,23 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
2998 * Clear Auto-Crossover to force MDI manually. 82577 requires MDI 2998 * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
2999 * forced whenever speed and duplex are forced. 2999 * forced whenever speed and duplex are forced.
3000 */ 3000 */
3001 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_CTRL_2, &phy_data); 3001 ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
3002 if (ret_val) 3002 if (ret_val)
3003 goto out; 3003 goto out;
3004 3004
3005 phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX; 3005 phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
3006 phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX; 3006 phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
3007 3007
3008 ret_val = phy->ops.write_phy_reg(hw, I82577_PHY_CTRL_2, phy_data); 3008 ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
3009 if (ret_val) 3009 if (ret_val)
3010 goto out; 3010 goto out;
3011 3011
3012 hw_dbg(hw, "I82577_PHY_CTRL_2: %X\n", phy_data); 3012 e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
3013 3013
3014 udelay(1); 3014 udelay(1);
3015 3015
3016 if (phy->autoneg_wait_to_complete) { 3016 if (phy->autoneg_wait_to_complete) {
3017 hw_dbg(hw, "Waiting for forced speed/duplex link on 82577 phy\n"); 3017 e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
3018 3018
3019 ret_val = e1000e_phy_has_link_generic(hw, 3019 ret_val = e1000e_phy_has_link_generic(hw,
3020 PHY_FORCE_LIMIT, 3020 PHY_FORCE_LIMIT,
@@ -3024,7 +3024,7 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3024 goto out; 3024 goto out;
3025 3025
3026 if (!link) 3026 if (!link)
3027 hw_dbg(hw, "Link taking longer than expected.\n"); 3027 e_dbg("Link taking longer than expected.\n");
3028 3028
3029 /* Try once more */ 3029 /* Try once more */
3030 ret_val = e1000e_phy_has_link_generic(hw, 3030 ret_val = e1000e_phy_has_link_generic(hw,
@@ -3060,7 +3060,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3060 goto out; 3060 goto out;
3061 3061
3062 if (!link) { 3062 if (!link) {
3063 hw_dbg(hw, "Phy info is only valid if link is up\n"); 3063 e_dbg("Phy info is only valid if link is up\n");
3064 ret_val = -E1000_ERR_CONFIG; 3064 ret_val = -E1000_ERR_CONFIG;
3065 goto out; 3065 goto out;
3066 } 3066 }
@@ -3071,7 +3071,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3071 if (ret_val) 3071 if (ret_val)
3072 goto out; 3072 goto out;
3073 3073
3074 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_STATUS_2, &data); 3074 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
3075 if (ret_val) 3075 if (ret_val)
3076 goto out; 3076 goto out;
3077 3077
@@ -3083,7 +3083,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3083 if (ret_val) 3083 if (ret_val)
3084 goto out; 3084 goto out;
3085 3085
3086 ret_val = phy->ops.read_phy_reg(hw, PHY_1000T_STATUS, &data); 3086 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
3087 if (ret_val) 3087 if (ret_val)
3088 goto out; 3088 goto out;
3089 3089
@@ -3117,7 +3117,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3117 s32 ret_val; 3117 s32 ret_val;
3118 u16 phy_data, length; 3118 u16 phy_data, length;
3119 3119
3120 ret_val = phy->ops.read_phy_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); 3120 ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
3121 if (ret_val) 3121 if (ret_val)
3122 goto out; 3122 goto out;
3123 3123
@@ -3125,7 +3125,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3125 I82577_DSTATUS_CABLE_LENGTH_SHIFT; 3125 I82577_DSTATUS_CABLE_LENGTH_SHIFT;
3126 3126
3127 if (length == E1000_CABLE_LENGTH_UNDEFINED) 3127 if (length == E1000_CABLE_LENGTH_UNDEFINED)
3128 ret_val = E1000_ERR_PHY; 3128 ret_val = -E1000_ERR_PHY;
3129 3129
3130 phy->cable_length = length; 3130 phy->cable_length = length;
3131 3131
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 1e934160062c..94c59498cdb6 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -990,7 +990,7 @@ static int eepro_open(struct net_device *dev)
990 return -EAGAIN; 990 return -EAGAIN;
991 } 991 }
992 992
993 if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) { 993 if (request_irq(dev->irq , eepro_interrupt, 0, dev->name, dev)) {
994 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); 994 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
995 return -EAGAIN; 995 return -EAGAIN;
996 } 996 }
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 592de8f1668a..6fbfc8eee632 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -457,7 +457,7 @@ static int eexp_open(struct net_device *dev)
457 if (!dev->irq || !irqrmap[dev->irq]) 457 if (!dev->irq || !irqrmap[dev->irq])
458 return -ENXIO; 458 return -ENXIO;
459 459
460 ret = request_irq(dev->irq, &eexp_irq, 0, dev->name, dev); 460 ret = request_irq(dev->irq, eexp_irq, 0, dev->name, dev);
461 if (ret) 461 if (ret)
462 return ret; 462 return ret;
463 463
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 41bd7aeafd82..7f8fcc2fa748 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -447,7 +447,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
447 max_index_mask = q_skba->len - 1; 447 max_index_mask = q_skba->len - 1;
448 for (i = 0; i < fill_wqes; i++) { 448 for (i = 0; i < fill_wqes; i++) {
449 u64 tmp_addr; 449 u64 tmp_addr;
450 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 450 struct sk_buff *skb;
451
452 skb = netdev_alloc_skb_ip_align(dev, packet_size);
451 if (!skb) { 453 if (!skb) {
452 q_skba->os_skbs = fill_wqes - i; 454 q_skba->os_skbs = fill_wqes - i;
453 if (q_skba->os_skbs == q_skba->len - 2) { 455 if (q_skba->os_skbs == q_skba->len - 2) {
@@ -457,7 +459,6 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
457 } 459 }
458 break; 460 break;
459 } 461 }
460 skb_reserve(skb, NET_IP_ALIGN);
461 462
462 skb_arr[index] = skb; 463 skb_arr[index] = skb;
463 tmp_addr = ehea_map_vaddr(skb->data); 464 tmp_addr = ehea_map_vaddr(skb->data);
@@ -500,7 +501,7 @@ static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
500{ 501{
501 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, 502 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
502 nr_of_wqes, EHEA_RWQE2_TYPE, 503 nr_of_wqes, EHEA_RWQE2_TYPE,
503 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); 504 EHEA_RQ2_PKT_SIZE);
504} 505}
505 506
506 507
@@ -508,7 +509,7 @@ static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
508{ 509{
509 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, 510 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
510 nr_of_wqes, EHEA_RWQE3_TYPE, 511 nr_of_wqes, EHEA_RWQE3_TYPE,
511 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); 512 EHEA_MAX_PACKET_SIZE);
512} 513}
513 514
514static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) 515static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d69d52ed7726..f875751af15e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -870,19 +870,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
870 dev_kfree_skb_any(buf->os_buf); 870 dev_kfree_skb_any(buf->os_buf);
871} 871}
872 872
873static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
874 unsigned int size)
875{
876 struct sk_buff *skb;
877
878 skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
879
880 if (skb)
881 skb_reserve(skb, NET_IP_ALIGN);
882
883 return skb;
884}
885
886static int enic_rq_alloc_buf(struct vnic_rq *rq) 873static int enic_rq_alloc_buf(struct vnic_rq *rq)
887{ 874{
888 struct enic *enic = vnic_dev_priv(rq->vdev); 875 struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -892,7 +879,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
892 unsigned int os_buf_index = 0; 879 unsigned int os_buf_index = 0;
893 dma_addr_t dma_addr; 880 dma_addr_t dma_addr;
894 881
895 skb = enic_rq_alloc_skb(netdev, len); 882 skb = netdev_alloc_skb_ip_align(netdev, len);
896 if (!skb) 883 if (!skb)
897 return -ENOMEM; 884 return -ENOMEM;
898 885
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 641a10d2e843..703b4c8e9b4d 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -668,7 +668,7 @@ static int epic_open(struct net_device *dev)
668 outl(0x4001, ioaddr + GENCTL); 668 outl(0x4001, ioaddr + GENCTL);
669 669
670 napi_enable(&ep->napi); 670 napi_enable(&ep->napi);
671 if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) { 671 if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) {
672 napi_disable(&ep->napi); 672 napi_disable(&ep->napi);
673 return retval; 673 return retval;
674 } 674 }
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index f7d9ac8324cb..bd1db92aec1b 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -406,10 +406,10 @@ static int ethoc_rx(struct net_device *dev, int limit)
406 406
407 if (ethoc_update_rx_stats(priv, &bd) == 0) { 407 if (ethoc_update_rx_stats(priv, &bd) == 0) {
408 int size = bd.stat >> 16; 408 int size = bd.stat >> 16;
409 struct sk_buff *skb = netdev_alloc_skb(dev, size); 409 struct sk_buff *skb;
410 410
411 size -= 4; /* strip the CRC */ 411 size -= 4; /* strip the CRC */
412 skb_reserve(skb, 2); /* align TCP/IP header */ 412 skb = netdev_alloc_skb_ip_align(dev, size);
413 413
414 if (likely(skb)) { 414 if (likely(skb)) {
415 void *src = phys_to_virt(bd.addr); 415 void *src = phys_to_virt(bd.addr);
@@ -641,7 +641,7 @@ static int ethoc_mdio_probe(struct net_device *dev)
641 return -ENXIO; 641 return -ENXIO;
642 } 642 }
643 643
644 phy = phy_connect(dev, dev_name(&phy->dev), &ethoc_mdio_poll, 0, 644 phy = phy_connect(dev, dev_name(&phy->dev), ethoc_mdio_poll, 0,
645 PHY_INTERFACE_MODE_GMII); 645 PHY_INTERFACE_MODE_GMII);
646 if (IS_ERR(phy)) { 646 if (IS_ERR(phy)) {
647 dev_err(&dev->dev, "could not attach to PHY\n"); 647 dev_err(&dev->dev, "could not attach to PHY\n");
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 18d5fbb9673e..e173515790c0 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -839,7 +839,7 @@ static int netdev_open(struct net_device *dev)
839 839
840 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 840 iowrite32(0x00000001, ioaddr + BCR); /* Reset */
841 841
842 if (request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev)) 842 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev))
843 return -EAGAIN; 843 return -EAGAIN;
844 844
845 for (i = 0; i < 3; i++) 845 for (i = 0; i < 3; i++)
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 66dace6d324f..6407672b28e9 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -226,17 +226,17 @@ static int mpc52xx_fec_open(struct net_device *dev)
226 phy_start(priv->phydev); 226 phy_start(priv->phydev);
227 } 227 }
228 228
229 if (request_irq(dev->irq, &mpc52xx_fec_interrupt, IRQF_SHARED, 229 if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
230 DRIVER_NAME "_ctrl", dev)) { 230 DRIVER_NAME "_ctrl", dev)) {
231 dev_err(&dev->dev, "ctrl interrupt request failed\n"); 231 dev_err(&dev->dev, "ctrl interrupt request failed\n");
232 goto free_phy; 232 goto free_phy;
233 } 233 }
234 if (request_irq(priv->r_irq, &mpc52xx_fec_rx_interrupt, 0, 234 if (request_irq(priv->r_irq, mpc52xx_fec_rx_interrupt, 0,
235 DRIVER_NAME "_rx", dev)) { 235 DRIVER_NAME "_rx", dev)) {
236 dev_err(&dev->dev, "rx interrupt request failed\n"); 236 dev_err(&dev->dev, "rx interrupt request failed\n");
237 goto free_ctrl_irq; 237 goto free_ctrl_irq;
238 } 238 }
239 if (request_irq(priv->t_irq, &mpc52xx_fec_tx_interrupt, 0, 239 if (request_irq(priv->t_irq, mpc52xx_fec_tx_interrupt, 0,
240 DRIVER_NAME "_tx", dev)) { 240 DRIVER_NAME "_tx", dev)) {
241 dev_err(&dev->dev, "tx interrupt request failed\n"); 241 dev_err(&dev->dev, "tx interrupt request failed\n");
242 goto free_2irqs; 242 goto free_2irqs;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3116601dbfea..3c340489804a 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -4004,7 +4004,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4004 /* Request irq for rx handling */ 4004 /* Request irq for rx handling */
4005 sprintf(np->name_rx, "%s-rx", dev->name); 4005 sprintf(np->name_rx, "%s-rx", dev->name);
4006 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 4006 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
4007 &nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 4007 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
4008 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 4008 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
4009 pci_disable_msix(np->pci_dev); 4009 pci_disable_msix(np->pci_dev);
4010 np->msi_flags &= ~NV_MSI_X_ENABLED; 4010 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4013,7 +4013,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4013 /* Request irq for tx handling */ 4013 /* Request irq for tx handling */
4014 sprintf(np->name_tx, "%s-tx", dev->name); 4014 sprintf(np->name_tx, "%s-tx", dev->name);
4015 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 4015 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
4016 &nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 4016 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
4017 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 4017 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
4018 pci_disable_msix(np->pci_dev); 4018 pci_disable_msix(np->pci_dev);
4019 np->msi_flags &= ~NV_MSI_X_ENABLED; 4019 np->msi_flags &= ~NV_MSI_X_ENABLED;
@@ -4022,7 +4022,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4022 /* Request irq for link and timer handling */ 4022 /* Request irq for link and timer handling */
4023 sprintf(np->name_other, "%s-other", dev->name); 4023 sprintf(np->name_other, "%s-other", dev->name);
4024 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 4024 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
4025 &nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 4025 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
4026 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 4026 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
4027 pci_disable_msix(np->pci_dev); 4027 pci_disable_msix(np->pci_dev);
4028 np->msi_flags &= ~NV_MSI_X_ENABLED; 4028 np->msi_flags &= ~NV_MSI_X_ENABLED;
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index efbf67689eca..25fabb3eedc5 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -3,8 +3,9 @@
3 * Provides Bus interface for MIIM regs 3 * Provides Bus interface for MIIM regs
4 * 4 *
5 * Author: Andy Fleming <afleming@freescale.com> 5 * Author: Andy Fleming <afleming@freescale.com>
6 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
6 * 7 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc. 8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
8 * 9 *
9 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips) 10 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
10 * 11 *
@@ -102,13 +103,18 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
102 return value; 103 return value;
103} 104}
104 105
106static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
107{
108 return (void __iomem __force *)bus->priv;
109}
110
105/* 111/*
106 * Write value to the PHY at mii_id at register regnum, 112 * Write value to the PHY at mii_id at register regnum,
107 * on the bus, waiting until the write is done before returning. 113 * on the bus, waiting until the write is done before returning.
108 */ 114 */
109int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) 115int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
110{ 116{
111 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv; 117 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
112 118
113 /* Write to the local MII regs */ 119 /* Write to the local MII regs */
114 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value)); 120 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
@@ -120,7 +126,7 @@ int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
120 */ 126 */
121int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 127int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
122{ 128{
123 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv; 129 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
124 130
125 /* Read the local MII regs */ 131 /* Read the local MII regs */
126 return(fsl_pq_local_mdio_read(regs, mii_id, regnum)); 132 return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
@@ -129,7 +135,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
129/* Reset the MIIM registers, and wait for the bus to free */ 135/* Reset the MIIM registers, and wait for the bus to free */
130static int fsl_pq_mdio_reset(struct mii_bus *bus) 136static int fsl_pq_mdio_reset(struct mii_bus *bus)
131{ 137{
132 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv; 138 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
133 int timeout = PHY_INIT_TIMEOUT; 139 int timeout = PHY_INIT_TIMEOUT;
134 140
135 mutex_lock(&bus->mdio_lock); 141 mutex_lock(&bus->mdio_lock);
@@ -189,19 +195,29 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
189 195
190 196
191#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 197#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) 198static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
193{ 199{
194 struct gfar __iomem *enet_regs; 200 struct gfar __iomem *enet_regs;
201 u32 __iomem *ioremap_tbipa;
202 u64 addr, size;
195 203
196 /* 204 /*
197 * This is mildly evil, but so is our hardware for doing this. 205 * This is mildly evil, but so is our hardware for doing this.
198 * Also, we have to cast back to struct gfar because of 206 * Also, we have to cast back to struct gfar because of
199 * definition weirdness done in gianfar.h. 207 * definition weirdness done in gianfar.h.
200 */ 208 */
201 enet_regs = (struct gfar __iomem *) 209 if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
202 ((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs)); 210 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
203 211 of_device_is_compatible(np, "gianfar")) {
204 return &enet_regs->tbipa; 212 enet_regs = (struct gfar __iomem *)regs;
213 return &enet_regs->tbipa;
214 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
215 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
216 addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
217 ioremap_tbipa = ioremap(addr, size);
218 return ioremap_tbipa;
219 } else
220 return NULL;
205} 221}
206#endif 222#endif
207 223
@@ -250,11 +266,12 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
250{ 266{
251 struct device_node *np = ofdev->node; 267 struct device_node *np = ofdev->node;
252 struct device_node *tbi; 268 struct device_node *tbi;
253 struct fsl_pq_mdio __iomem *regs; 269 struct fsl_pq_mdio __iomem *regs = NULL;
270 void __iomem *map;
254 u32 __iomem *tbipa; 271 u32 __iomem *tbipa;
255 struct mii_bus *new_bus; 272 struct mii_bus *new_bus;
256 int tbiaddr = -1; 273 int tbiaddr = -1;
257 u64 addr, size; 274 u64 addr = 0, size = 0;
258 int err = 0; 275 int err = 0;
259 276
260 new_bus = mdiobus_alloc(); 277 new_bus = mdiobus_alloc();
@@ -269,13 +286,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
269 286
270 /* Set the PHY base address */ 287 /* Set the PHY base address */
271 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 288 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
272 regs = ioremap(addr, size); 289 map = ioremap(addr, size);
273 290 if (!map) {
274 if (NULL == regs) {
275 err = -ENOMEM; 291 err = -ENOMEM;
276 goto err_free_bus; 292 goto err_free_bus;
277 } 293 }
278 294
295 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
296 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
297 of_device_is_compatible(np, "fsl,ucc-mdio") ||
298 of_device_is_compatible(np, "ucc_geth_phy"))
299 map -= offsetof(struct fsl_pq_mdio, miimcfg);
300 regs = map;
301
279 new_bus->priv = (void __force *)regs; 302 new_bus->priv = (void __force *)regs;
280 303
281 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); 304 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -290,9 +313,15 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
290 313
291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 314 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
292 of_device_is_compatible(np, "fsl,gianfar-tbi") || 315 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
316 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
317 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
293 of_device_is_compatible(np, "gianfar")) { 318 of_device_is_compatible(np, "gianfar")) {
294#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 319#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
295 tbipa = get_gfar_tbipa(regs); 320 tbipa = get_gfar_tbipa(regs, np);
321 if (!tbipa) {
322 err = -EINVAL;
323 goto err_free_irqs;
324 }
296#else 325#else
297 err = -ENODEV; 326 err = -ENODEV;
298 goto err_free_irqs; 327 goto err_free_irqs;
@@ -380,7 +409,7 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
380 409
381 dev_set_drvdata(device, NULL); 410 dev_set_drvdata(device, NULL);
382 411
383 iounmap((void __iomem *)bus->priv); 412 iounmap(fsl_pq_mdio_get_regs(bus));
384 bus->priv = NULL; 413 bus->priv = NULL;
385 mdiobus_free(bus); 414 mdiobus_free(bus);
386 415
@@ -405,6 +434,12 @@ static struct of_device_id fsl_pq_mdio_match[] = {
405 { 434 {
406 .compatible = "fsl,gianfar-mdio", 435 .compatible = "fsl,gianfar-mdio",
407 }, 436 },
437 {
438 .compatible = "fsl,etsec2-tbi",
439 },
440 {
441 .compatible = "fsl,etsec2-mdio",
442 },
408 {}, 443 {},
409}; 444};
410MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); 445MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 36dad527410b..1f7d865cedb6 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -3,8 +3,9 @@
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors 3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 * 4 *
5 * Author: Andy Fleming 5 * Author: Andy Fleming
6 * Modifier: Sandeep Gopalpet
6 * 7 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc. 8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
@@ -23,6 +24,12 @@
23#define MII_READ_COMMAND 0x00000001 24#define MII_READ_COMMAND 0x00000001
24 25
25struct fsl_pq_mdio { 26struct fsl_pq_mdio {
27 u8 res1[16];
28 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
29 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
30 u8 res2[4];
31 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
32 u8 res3[1280];
26 u32 miimcfg; /* MII management configuration reg */ 33 u32 miimcfg; /* MII management configuration reg */
27 u32 miimcom; /* MII management command reg */ 34 u32 miimcom; /* MII management command reg */
28 u32 miimadd; /* MII management address reg */ 35 u32 miimadd; /* MII management address reg */
@@ -31,9 +38,9 @@ struct fsl_pq_mdio {
31 u32 miimind; /* MII management indication reg */ 38 u32 miimind; /* MII management indication reg */
32 u8 reserved[28]; /* Space holder */ 39 u8 reserved[28]; /* Space holder */
33 u32 utbipar; /* TBI phy address reg (only on UCC) */ 40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728];
34} __attribute__ ((packed)); 42} __attribute__ ((packed));
35 43
36
37int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
38int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
39int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 46int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5bf31f1509c9..16def131c390 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,11 +143,277 @@ void gfar_start(struct net_device *dev);
142static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
143static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
144static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
145 147
146MODULE_AUTHOR("Freescale Semiconductor, Inc"); 148MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 150MODULE_LICENSE("GPL");
149 151
152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 dma_addr_t buf)
154{
155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
168static int gfar_init_bds(struct net_device *ndev)
169{
170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
175 int i, j;
176
177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
185
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
192 }
193
194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
214 pr_err("%s: Can't allocate RX buffers\n",
215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
226 }
227
228 return 0;
229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
233}
234
235static int gfar_alloc_skb_resources(struct net_device *ndev)
236{
237 void *vaddr;
238 dma_addr_t addr;
239 int i, j, k;
240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252
253 /* Allocate memory for the buffer descriptors */
254 vaddr = dma_alloc_coherent(dev,
255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
257 &addr, GFP_KERNEL);
258 if (!vaddr) {
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
261 ndev->name);
262 return -ENOMEM;
263 }
264
265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
274
275 /* Start the rx descriptor ring where the tx ring leaves off */
276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
284
285 /* Setup the skbuff rings */
286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
289 tx_queue->tx_ring_size, GFP_KERNEL);
290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
299 }
300
301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 rx_queue->rx_ring_size, GFP_KERNEL);
305
306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
316
317 if (gfar_init_bds(ndev))
318 goto cleanup;
319
320 return 0;
321
322cleanup:
323 free_skb_resources(priv);
324 return -ENOMEM;
325}
326
327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
330 u32 __iomem *baddr;
331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
346static void gfar_init_mac(struct net_device *ndev)
347{
348 struct gfar_private *priv = netdev_priv(ndev);
349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
350 u32 rctrl = 0;
351 u32 tctrl = 0;
352 u32 attrs = 0;
353
354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
356
357 /* Configure the coalescing support */
358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
359
360 if (priv->rx_filer_enable)
361 rctrl |= RCTRL_FILREN;
362
363 if (priv->rx_csum_enable)
364 rctrl |= RCTRL_CHECKSUMMING;
365
366 if (priv->extended_hash) {
367 rctrl |= RCTRL_EXTHASH;
368
369 gfar_clear_exact_match(ndev);
370 rctrl |= RCTRL_EMEN;
371 }
372
373 if (priv->padding) {
374 rctrl &= ~RCTRL_PAL_MASK;
375 rctrl |= RCTRL_PADDING(priv->padding);
376 }
377
378 /* keep vlan related bits if it's enabled */
379 if (priv->vlgrp) {
380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
381 tctrl |= TCTRL_VLINS;
382 }
383
384 /* Init rctrl based on our settings */
385 gfar_write(&regs->rctrl, rctrl);
386
387 if (ndev->features & NETIF_F_IP_CSUM)
388 tctrl |= TCTRL_INIT_CSUM;
389
390 tctrl |= TCTRL_TXSCHED_PRIO;
391
392 gfar_write(&regs->tctrl, tctrl);
393
394 /* Set the extraction length and index */
395 attrs = ATTRELI_EL(priv->rx_stash_size) |
396 ATTRELI_EI(priv->rx_stash_index);
397
398 gfar_write(&regs->attreli, attrs);
399
400 /* Start with defaults, and add stashing or locking
401 * depending on the approprate variables */
402 attrs = ATTR_INIT_SETTINGS;
403
404 if (priv->bd_stash_en)
405 attrs |= ATTR_BDSTASH;
406
407 if (priv->rx_stash_size != 0)
408 attrs |= ATTR_BUFSTASH;
409
410 gfar_write(&regs->attr, attrs);
411
412 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
413 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
414 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
415}
416
150static const struct net_device_ops gfar_netdev_ops = { 417static const struct net_device_ops gfar_netdev_ops = {
151 .ndo_open = gfar_enet_open, 418 .ndo_open = gfar_enet_open,
152 .ndo_start_xmit = gfar_start_xmit, 419 .ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = {
155 .ndo_set_multicast_list = gfar_set_multi, 422 .ndo_set_multicast_list = gfar_set_multi,
156 .ndo_tx_timeout = gfar_timeout, 423 .ndo_tx_timeout = gfar_timeout,
157 .ndo_do_ioctl = gfar_ioctl, 424 .ndo_do_ioctl = gfar_ioctl,
425 .ndo_select_queue = gfar_select_queue,
158 .ndo_vlan_rx_register = gfar_vlan_rx_register, 426 .ndo_vlan_rx_register = gfar_vlan_rx_register,
159 .ndo_set_mac_address = eth_mac_addr, 427 .ndo_set_mac_address = eth_mac_addr,
160 .ndo_validate_addr = eth_validate_addr, 428 .ndo_validate_addr = eth_validate_addr,
@@ -163,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = {
163#endif 431#endif
164}; 432};
165 433
434unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
435unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
436
437void lock_rx_qs(struct gfar_private *priv)
438{
439 int i = 0x0;
440
441 for (i = 0; i < priv->num_rx_queues; i++)
442 spin_lock(&priv->rx_queue[i]->rxlock);
443}
444
445void lock_tx_qs(struct gfar_private *priv)
446{
447 int i = 0x0;
448
449 for (i = 0; i < priv->num_tx_queues; i++)
450 spin_lock(&priv->tx_queue[i]->txlock);
451}
452
453void unlock_rx_qs(struct gfar_private *priv)
454{
455 int i = 0x0;
456
457 for (i = 0; i < priv->num_rx_queues; i++)
458 spin_unlock(&priv->rx_queue[i]->rxlock);
459}
460
461void unlock_tx_qs(struct gfar_private *priv)
462{
463 int i = 0x0;
464
465 for (i = 0; i < priv->num_tx_queues; i++)
466 spin_unlock(&priv->tx_queue[i]->txlock);
467}
468
166/* Returns 1 if incoming frames use an FCB */ 469/* Returns 1 if incoming frames use an FCB */
167static inline int gfar_uses_fcb(struct gfar_private *priv) 470static inline int gfar_uses_fcb(struct gfar_private *priv)
168{ 471{
169 return priv->vlgrp || priv->rx_csum_enable; 472 return priv->vlgrp || priv->rx_csum_enable;
170} 473}
171 474
172static int gfar_of_init(struct net_device *dev) 475u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
476{
477 return skb_get_queue_mapping(skb);
478}
479static void free_tx_pointers(struct gfar_private *priv)
480{
481 int i = 0;
482
483 for (i = 0; i < priv->num_tx_queues; i++)
484 kfree(priv->tx_queue[i]);
485}
486
487static void free_rx_pointers(struct gfar_private *priv)
488{
489 int i = 0;
490
491 for (i = 0; i < priv->num_rx_queues; i++)
492 kfree(priv->rx_queue[i]);
493}
494
495static void unmap_group_regs(struct gfar_private *priv)
496{
497 int i = 0;
498
499 for (i = 0; i < MAXGROUPS; i++)
500 if (priv->gfargrp[i].regs)
501 iounmap(priv->gfargrp[i].regs);
502}
503
504static void disable_napi(struct gfar_private *priv)
505{
506 int i = 0;
507
508 for (i = 0; i < priv->num_grps; i++)
509 napi_disable(&priv->gfargrp[i].napi);
510}
511
512static void enable_napi(struct gfar_private *priv)
513{
514 int i = 0;
515
516 for (i = 0; i < priv->num_grps; i++)
517 napi_enable(&priv->gfargrp[i].napi);
518}
519
520static int gfar_parse_group(struct device_node *np,
521 struct gfar_private *priv, const char *model)
522{
523 u32 *queue_mask;
524 u64 addr, size;
525
526 addr = of_translate_address(np,
527 of_get_address(np, 0, &size, NULL));
528 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
529
530 if (!priv->gfargrp[priv->num_grps].regs)
531 return -ENOMEM;
532
533 priv->gfargrp[priv->num_grps].interruptTransmit =
534 irq_of_parse_and_map(np, 0);
535
536 /* If we aren't the FEC we have multiple interrupts */
537 if (model && strcasecmp(model, "FEC")) {
538 priv->gfargrp[priv->num_grps].interruptReceive =
539 irq_of_parse_and_map(np, 1);
540 priv->gfargrp[priv->num_grps].interruptError =
541 irq_of_parse_and_map(np,2);
542 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
543 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
544 priv->gfargrp[priv->num_grps].interruptError < 0) {
545 return -EINVAL;
546 }
547 }
548
549 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
550 priv->gfargrp[priv->num_grps].priv = priv;
551 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
552 if(priv->mode == MQ_MG_MODE) {
553 queue_mask = (u32 *)of_get_property(np,
554 "fsl,rx-bit-map", NULL);
555 priv->gfargrp[priv->num_grps].rx_bit_map =
556 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
557 queue_mask = (u32 *)of_get_property(np,
558 "fsl,tx-bit-map", NULL);
559 priv->gfargrp[priv->num_grps].tx_bit_map =
560 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
561 } else {
562 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
563 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
564 }
565 priv->num_grps++;
566
567 return 0;
568}
569
570static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
173{ 571{
174 const char *model; 572 const char *model;
175 const char *ctype; 573 const char *ctype;
176 const void *mac_addr; 574 const void *mac_addr;
177 u64 addr, size; 575 int err = 0, i;
178 int err = 0; 576 struct net_device *dev = NULL;
179 struct gfar_private *priv = netdev_priv(dev); 577 struct gfar_private *priv = NULL;
180 struct device_node *np = priv->node; 578 struct device_node *np = ofdev->node;
579 struct device_node *child = NULL;
181 const u32 *stash; 580 const u32 *stash;
182 const u32 *stash_len; 581 const u32 *stash_len;
183 const u32 *stash_idx; 582 const u32 *stash_idx;
583 unsigned int num_tx_qs, num_rx_qs;
584 u32 *tx_queues, *rx_queues;
184 585
185 if (!np || !of_device_is_available(np)) 586 if (!np || !of_device_is_available(np))
186 return -ENODEV; 587 return -ENODEV;
187 588
188 /* get a pointer to the register memory */ 589 /* parse the num of tx and rx queues */
189 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 590 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
190 priv->regs = ioremap(addr, size); 591 num_tx_qs = tx_queues ? *tx_queues : 1;
592
593 if (num_tx_qs > MAX_TX_QS) {
594 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
595 num_tx_qs, MAX_TX_QS);
596 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
597 return -EINVAL;
598 }
599
600 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
601 num_rx_qs = rx_queues ? *rx_queues : 1;
191 602
192 if (priv->regs == NULL) 603 if (num_rx_qs > MAX_RX_QS) {
604 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
605 num_tx_qs, MAX_TX_QS);
606 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
607 return -EINVAL;
608 }
609
610 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
611 dev = *pdev;
612 if (NULL == dev)
193 return -ENOMEM; 613 return -ENOMEM;
194 614
195 priv->interruptTransmit = irq_of_parse_and_map(np, 0); 615 priv = netdev_priv(dev);
616 priv->node = ofdev->node;
617 priv->ndev = dev;
618
619 dev->num_tx_queues = num_tx_qs;
620 dev->real_num_tx_queues = num_tx_qs;
621 priv->num_tx_queues = num_tx_qs;
622 priv->num_rx_queues = num_rx_qs;
623 priv->num_grps = 0x0;
196 624
197 model = of_get_property(np, "model", NULL); 625 model = of_get_property(np, "model", NULL);
198 626
199 /* If we aren't the FEC we have multiple interrupts */ 627 for (i = 0; i < MAXGROUPS; i++)
200 if (model && strcasecmp(model, "FEC")) { 628 priv->gfargrp[i].regs = NULL;
201 priv->interruptReceive = irq_of_parse_and_map(np, 1); 629
630 /* Parse and initialize group specific information */
631 if (of_device_is_compatible(np, "fsl,etsec2")) {
632 priv->mode = MQ_MG_MODE;
633 for_each_child_of_node(np, child) {
634 err = gfar_parse_group(child, priv, model);
635 if (err)
636 goto err_grp_init;
637 }
638 } else {
639 priv->mode = SQ_SG_MODE;
640 err = gfar_parse_group(np, priv, model);
641 if(err)
642 goto err_grp_init;
643 }
202 644
203 priv->interruptError = irq_of_parse_and_map(np, 2); 645 for (i = 0; i < priv->num_tx_queues; i++)
646 priv->tx_queue[i] = NULL;
647 for (i = 0; i < priv->num_rx_queues; i++)
648 priv->rx_queue[i] = NULL;
649
650 for (i = 0; i < priv->num_tx_queues; i++) {
651 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
652 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
653 if (!priv->tx_queue[i]) {
654 err = -ENOMEM;
655 goto tx_alloc_failed;
656 }
657 priv->tx_queue[i]->tx_skbuff = NULL;
658 priv->tx_queue[i]->qindex = i;
659 priv->tx_queue[i]->dev = dev;
660 spin_lock_init(&(priv->tx_queue[i]->txlock));
661 }
204 662
205 if (priv->interruptTransmit < 0 || 663 for (i = 0; i < priv->num_rx_queues; i++) {
206 priv->interruptReceive < 0 || 664 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
207 priv->interruptError < 0) { 665 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
208 err = -EINVAL; 666 if (!priv->rx_queue[i]) {
209 goto err_out; 667 err = -ENOMEM;
668 goto rx_alloc_failed;
210 } 669 }
670 priv->rx_queue[i]->rx_skbuff = NULL;
671 priv->rx_queue[i]->qindex = i;
672 priv->rx_queue[i]->dev = dev;
673 spin_lock_init(&(priv->rx_queue[i]->rxlock));
211 } 674 }
212 675
676
213 stash = of_get_property(np, "bd-stash", NULL); 677 stash = of_get_property(np, "bd-stash", NULL);
214 678
215 if(stash) { 679 if (stash) {
216 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 680 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
217 priv->bd_stash_en = 1; 681 priv->bd_stash_en = 1;
218 } 682 }
@@ -270,8 +734,13 @@ static int gfar_of_init(struct net_device *dev)
270 734
271 return 0; 735 return 0;
272 736
273err_out: 737rx_alloc_failed:
274 iounmap(priv->regs); 738 free_rx_pointers(priv);
739tx_alloc_failed:
740 free_tx_pointers(priv);
741err_grp_init:
742 unmap_group_regs(priv);
743 free_netdev(dev);
275 return err; 744 return err;
276} 745}
277 746
@@ -289,6 +758,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
289 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 758 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
290} 759}
291 760
761static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
762{
763 unsigned int new_bit_map = 0x0;
764 int mask = 0x1 << (max_qs - 1), i;
765 for (i = 0; i < max_qs; i++) {
766 if (bit_map & mask)
767 new_bit_map = new_bit_map + (1 << i);
768 mask = mask >> 0x1;
769 }
770 return new_bit_map;
771}
772
773static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
774 u32 class)
775{
776 u32 rqfpr = FPR_FILER_MASK;
777 u32 rqfcr = 0x0;
778
779 rqfar--;
780 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
781 ftp_rqfpr[rqfar] = rqfpr;
782 ftp_rqfcr[rqfar] = rqfcr;
783 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
784
785 rqfar--;
786 rqfcr = RQFCR_CMP_NOMATCH;
787 ftp_rqfpr[rqfar] = rqfpr;
788 ftp_rqfcr[rqfar] = rqfcr;
789 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
790
791 rqfar--;
792 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
793 rqfpr = class;
794 ftp_rqfcr[rqfar] = rqfcr;
795 ftp_rqfpr[rqfar] = rqfpr;
796 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
797
798 rqfar--;
799 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
800 rqfpr = class;
801 ftp_rqfcr[rqfar] = rqfcr;
802 ftp_rqfpr[rqfar] = rqfpr;
803 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
804
805 return rqfar;
806}
807
808static void gfar_init_filer_table(struct gfar_private *priv)
809{
810 int i = 0x0;
811 u32 rqfar = MAX_FILER_IDX;
812 u32 rqfcr = 0x0;
813 u32 rqfpr = FPR_FILER_MASK;
814
815 /* Default rule */
816 rqfcr = RQFCR_CMP_MATCH;
817 ftp_rqfcr[rqfar] = rqfcr;
818 ftp_rqfpr[rqfar] = rqfpr;
819 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
820
821 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
822 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
823 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
824 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
825 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
826 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
827
828 /* cur_filer_idx indicated the fisrt non-masked rule */
829 priv->cur_filer_idx = rqfar;
830
831 /* Rest are masked rules */
832 rqfcr = RQFCR_CMP_NOMATCH;
833 for (i = 0; i < rqfar; i++) {
834 ftp_rqfcr[i] = rqfcr;
835 ftp_rqfpr[i] = rqfpr;
836 gfar_write_filer(priv, i, rqfcr, rqfpr);
837 }
838}
839
292/* Set up the ethernet device structure, private data, 840/* Set up the ethernet device structure, private data,
293 * and anything else we need before we start */ 841 * and anything else we need before we start */
294static int gfar_probe(struct of_device *ofdev, 842static int gfar_probe(struct of_device *ofdev,
@@ -297,14 +845,17 @@ static int gfar_probe(struct of_device *ofdev,
297 u32 tempval; 845 u32 tempval;
298 struct net_device *dev = NULL; 846 struct net_device *dev = NULL;
299 struct gfar_private *priv = NULL; 847 struct gfar_private *priv = NULL;
300 int err = 0; 848 struct gfar __iomem *regs = NULL;
849 int err = 0, i, grp_idx = 0;
301 int len_devname; 850 int len_devname;
851 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
852 u32 isrg = 0;
853 u32 __iomem *baddr;
302 854
303 /* Create an ethernet device instance */ 855 err = gfar_of_init(ofdev, &dev);
304 dev = alloc_etherdev(sizeof (*priv));
305 856
306 if (NULL == dev) 857 if (err)
307 return -ENOMEM; 858 return err;
308 859
309 priv = netdev_priv(dev); 860 priv = netdev_priv(dev);
310 priv->ndev = dev; 861 priv->ndev = dev;
@@ -312,50 +863,46 @@ static int gfar_probe(struct of_device *ofdev,
312 priv->node = ofdev->node; 863 priv->node = ofdev->node;
313 SET_NETDEV_DEV(dev, &ofdev->dev); 864 SET_NETDEV_DEV(dev, &ofdev->dev);
314 865
315 err = gfar_of_init(dev);
316
317 if (err)
318 goto regs_fail;
319
320 spin_lock_init(&priv->txlock);
321 spin_lock_init(&priv->rxlock);
322 spin_lock_init(&priv->bflock); 866 spin_lock_init(&priv->bflock);
323 INIT_WORK(&priv->reset_task, gfar_reset_task); 867 INIT_WORK(&priv->reset_task, gfar_reset_task);
324 868
325 dev_set_drvdata(&ofdev->dev, priv); 869 dev_set_drvdata(&ofdev->dev, priv);
870 regs = priv->gfargrp[0].regs;
326 871
327 /* Stop the DMA engine now, in case it was running before */ 872 /* Stop the DMA engine now, in case it was running before */
328 /* (The firmware could have used it, and left it running). */ 873 /* (The firmware could have used it, and left it running). */
329 gfar_halt(dev); 874 gfar_halt(dev);
330 875
331 /* Reset MAC layer */ 876 /* Reset MAC layer */
332 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 877 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
333 878
334 /* We need to delay at least 3 TX clocks */ 879 /* We need to delay at least 3 TX clocks */
335 udelay(2); 880 udelay(2);
336 881
337 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 882 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
338 gfar_write(&priv->regs->maccfg1, tempval); 883 gfar_write(&regs->maccfg1, tempval);
339 884
340 /* Initialize MACCFG2. */ 885 /* Initialize MACCFG2. */
341 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 886 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
342 887
343 /* Initialize ECNTRL */ 888 /* Initialize ECNTRL */
344 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 889 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
345 890
346 /* Set the dev->base_addr to the gfar reg region */ 891 /* Set the dev->base_addr to the gfar reg region */
347 dev->base_addr = (unsigned long) (priv->regs); 892 dev->base_addr = (unsigned long) regs;
348 893
349 SET_NETDEV_DEV(dev, &ofdev->dev); 894 SET_NETDEV_DEV(dev, &ofdev->dev);
350 895
351 /* Fill in the dev structure */ 896 /* Fill in the dev structure */
352 dev->watchdog_timeo = TX_TIMEOUT; 897 dev->watchdog_timeo = TX_TIMEOUT;
353 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
354 dev->mtu = 1500; 898 dev->mtu = 1500;
355
356 dev->netdev_ops = &gfar_netdev_ops; 899 dev->netdev_ops = &gfar_netdev_ops;
357 dev->ethtool_ops = &gfar_ethtool_ops; 900 dev->ethtool_ops = &gfar_ethtool_ops;
358 901
902 /* Register for napi ...We are registering NAPI for each grp */
903 for (i = 0; i < priv->num_grps; i++)
904 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
905
359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 906 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
360 priv->rx_csum_enable = 1; 907 priv->rx_csum_enable = 1;
361 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 908 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -371,35 +918,35 @@ static int gfar_probe(struct of_device *ofdev,
371 priv->extended_hash = 1; 918 priv->extended_hash = 1;
372 priv->hash_width = 9; 919 priv->hash_width = 9;
373 920
374 priv->hash_regs[0] = &priv->regs->igaddr0; 921 priv->hash_regs[0] = &regs->igaddr0;
375 priv->hash_regs[1] = &priv->regs->igaddr1; 922 priv->hash_regs[1] = &regs->igaddr1;
376 priv->hash_regs[2] = &priv->regs->igaddr2; 923 priv->hash_regs[2] = &regs->igaddr2;
377 priv->hash_regs[3] = &priv->regs->igaddr3; 924 priv->hash_regs[3] = &regs->igaddr3;
378 priv->hash_regs[4] = &priv->regs->igaddr4; 925 priv->hash_regs[4] = &regs->igaddr4;
379 priv->hash_regs[5] = &priv->regs->igaddr5; 926 priv->hash_regs[5] = &regs->igaddr5;
380 priv->hash_regs[6] = &priv->regs->igaddr6; 927 priv->hash_regs[6] = &regs->igaddr6;
381 priv->hash_regs[7] = &priv->regs->igaddr7; 928 priv->hash_regs[7] = &regs->igaddr7;
382 priv->hash_regs[8] = &priv->regs->gaddr0; 929 priv->hash_regs[8] = &regs->gaddr0;
383 priv->hash_regs[9] = &priv->regs->gaddr1; 930 priv->hash_regs[9] = &regs->gaddr1;
384 priv->hash_regs[10] = &priv->regs->gaddr2; 931 priv->hash_regs[10] = &regs->gaddr2;
385 priv->hash_regs[11] = &priv->regs->gaddr3; 932 priv->hash_regs[11] = &regs->gaddr3;
386 priv->hash_regs[12] = &priv->regs->gaddr4; 933 priv->hash_regs[12] = &regs->gaddr4;
387 priv->hash_regs[13] = &priv->regs->gaddr5; 934 priv->hash_regs[13] = &regs->gaddr5;
388 priv->hash_regs[14] = &priv->regs->gaddr6; 935 priv->hash_regs[14] = &regs->gaddr6;
389 priv->hash_regs[15] = &priv->regs->gaddr7; 936 priv->hash_regs[15] = &regs->gaddr7;
390 937
391 } else { 938 } else {
392 priv->extended_hash = 0; 939 priv->extended_hash = 0;
393 priv->hash_width = 8; 940 priv->hash_width = 8;
394 941
395 priv->hash_regs[0] = &priv->regs->gaddr0; 942 priv->hash_regs[0] = &regs->gaddr0;
396 priv->hash_regs[1] = &priv->regs->gaddr1; 943 priv->hash_regs[1] = &regs->gaddr1;
397 priv->hash_regs[2] = &priv->regs->gaddr2; 944 priv->hash_regs[2] = &regs->gaddr2;
398 priv->hash_regs[3] = &priv->regs->gaddr3; 945 priv->hash_regs[3] = &regs->gaddr3;
399 priv->hash_regs[4] = &priv->regs->gaddr4; 946 priv->hash_regs[4] = &regs->gaddr4;
400 priv->hash_regs[5] = &priv->regs->gaddr5; 947 priv->hash_regs[5] = &regs->gaddr5;
401 priv->hash_regs[6] = &priv->regs->gaddr6; 948 priv->hash_regs[6] = &regs->gaddr6;
402 priv->hash_regs[7] = &priv->regs->gaddr7; 949 priv->hash_regs[7] = &regs->gaddr7;
403 } 950 }
404 951
405 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 952 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -410,15 +957,70 @@ static int gfar_probe(struct of_device *ofdev,
410 if (dev->features & NETIF_F_IP_CSUM) 957 if (dev->features & NETIF_F_IP_CSUM)
411 dev->hard_header_len += GMAC_FCB_LEN; 958 dev->hard_header_len += GMAC_FCB_LEN;
412 959
960 /* Program the isrg regs only if number of grps > 1 */
961 if (priv->num_grps > 1) {
962 baddr = &regs->isrg0;
963 for (i = 0; i < priv->num_grps; i++) {
964 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
965 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
966 gfar_write(baddr, isrg);
967 baddr++;
968 isrg = 0x0;
969 }
970 }
971
972 /* Need to reverse the bit maps as bit_map's MSB is q0
973 * but, for_each_bit parses from right to left, which
974 * basically reverses the queue numbers */
975 for (i = 0; i< priv->num_grps; i++) {
976 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
977 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
978 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
979 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
980 }
981
982 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
983 * also assign queues to groups */
984 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
985 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
986 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
987 priv->num_rx_queues) {
988 priv->gfargrp[grp_idx].num_rx_queues++;
989 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
990 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
991 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
992 }
993 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
994 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
995 priv->num_tx_queues) {
996 priv->gfargrp[grp_idx].num_tx_queues++;
997 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
998 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
999 tqueue = tqueue | (TQUEUE_EN0 >> i);
1000 }
1001 priv->gfargrp[grp_idx].rstat = rstat;
1002 priv->gfargrp[grp_idx].tstat = tstat;
1003 rstat = tstat =0;
1004 }
1005
1006 gfar_write(&regs->rqueue, rqueue);
1007 gfar_write(&regs->tqueue, tqueue);
1008
413 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1009 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
414 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
415 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
416 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
417 1010
418 priv->txcoalescing = DEFAULT_TX_COALESCE; 1011 /* Initializing some of the rx/tx queue level parameters */
419 priv->txic = DEFAULT_TXIC; 1012 for (i = 0; i < priv->num_tx_queues; i++) {
420 priv->rxcoalescing = DEFAULT_RX_COALESCE; 1013 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
421 priv->rxic = DEFAULT_RXIC; 1014 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1015 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1016 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1017 }
1018
1019 for (i = 0; i < priv->num_rx_queues; i++) {
1020 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1021 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1022 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1023 }
422 1024
423 /* Enable most messages by default */ 1025 /* Enable most messages by default */
424 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1026 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -439,20 +1041,43 @@ static int gfar_probe(struct of_device *ofdev,
439 1041
440 /* fill out IRQ number and name fields */ 1042 /* fill out IRQ number and name fields */
441 len_devname = strlen(dev->name); 1043 len_devname = strlen(dev->name);
442 strncpy(&priv->int_name_tx[0], dev->name, len_devname); 1044 for (i = 0; i < priv->num_grps; i++) {
443 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1045 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
444 strncpy(&priv->int_name_tx[len_devname], 1046 len_devname);
445 "_tx", sizeof("_tx") + 1); 1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
446 1048 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
447 strncpy(&priv->int_name_rx[0], dev->name, len_devname); 1049 "_g", sizeof("_g"));
448 strncpy(&priv->int_name_rx[len_devname], 1050 priv->gfargrp[i].int_name_tx[
449 "_rx", sizeof("_rx") + 1); 1051 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1052 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1053 priv->gfargrp[i].int_name_tx)],
1054 "_tx", sizeof("_tx") + 1);
1055
1056 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1057 len_devname);
1058 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1059 "_g", sizeof("_g"));
1060 priv->gfargrp[i].int_name_rx[
1061 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1062 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1063 priv->gfargrp[i].int_name_rx)],
1064 "_rx", sizeof("_rx") + 1);
1065
1066 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1067 len_devname);
1068 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1069 "_g", sizeof("_g"));
1070 priv->gfargrp[i].int_name_er[strlen(
1071 priv->gfargrp[i].int_name_er)] = i+48;
1072 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1073 priv->gfargrp[i].int_name_er)],
1074 "_er", sizeof("_er") + 1);
1075 } else
1076 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1077 }
450 1078
451 strncpy(&priv->int_name_er[0], dev->name, len_devname); 1079 /* Initialize the filer table */
452 strncpy(&priv->int_name_er[len_devname], 1080 gfar_init_filer_table(priv);
453 "_er", sizeof("_er") + 1);
454 } else
455 priv->int_name_tx[len_devname] = '\0';
456 1081
457 /* Create all the sysfs files */ 1082 /* Create all the sysfs files */
458 gfar_init_sysfs(dev); 1083 gfar_init_sysfs(dev);
@@ -463,14 +1088,19 @@ static int gfar_probe(struct of_device *ofdev,
463 /* Even more device info helps when determining which kernel */ 1088 /* Even more device info helps when determining which kernel */
464 /* provided which set of benchmarks. */ 1089 /* provided which set of benchmarks. */
465 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1090 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
466 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 1091 for (i = 0; i < priv->num_rx_queues; i++)
467 dev->name, priv->rx_ring_size, priv->tx_ring_size); 1092 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1093 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1094 for(i = 0; i < priv->num_tx_queues; i++)
1095 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1096 dev->name, i, priv->tx_queue[i]->tx_ring_size);
468 1097
469 return 0; 1098 return 0;
470 1099
471register_fail: 1100register_fail:
472 iounmap(priv->regs); 1101 unmap_group_regs(priv);
473regs_fail: 1102 free_tx_pointers(priv);
1103 free_rx_pointers(priv);
474 if (priv->phy_node) 1104 if (priv->phy_node)
475 of_node_put(priv->phy_node); 1105 of_node_put(priv->phy_node);
476 if (priv->tbi_node) 1106 if (priv->tbi_node)
@@ -491,54 +1121,59 @@ static int gfar_remove(struct of_device *ofdev)
491 dev_set_drvdata(&ofdev->dev, NULL); 1121 dev_set_drvdata(&ofdev->dev, NULL);
492 1122
493 unregister_netdev(priv->ndev); 1123 unregister_netdev(priv->ndev);
494 iounmap(priv->regs); 1124 unmap_group_regs(priv);
495 free_netdev(priv->ndev); 1125 free_netdev(priv->ndev);
496 1126
497 return 0; 1127 return 0;
498} 1128}
499 1129
500#ifdef CONFIG_PM 1130#ifdef CONFIG_PM
501static int gfar_suspend(struct of_device *ofdev, pm_message_t state) 1131
1132static int gfar_suspend(struct device *dev)
502{ 1133{
503 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1134 struct gfar_private *priv = dev_get_drvdata(dev);
504 struct net_device *dev = priv->ndev; 1135 struct net_device *ndev = priv->ndev;
1136 struct gfar __iomem *regs = priv->gfargrp[0].regs;
505 unsigned long flags; 1137 unsigned long flags;
506 u32 tempval; 1138 u32 tempval;
507 1139
508 int magic_packet = priv->wol_en && 1140 int magic_packet = priv->wol_en &&
509 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1141 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
510 1142
511 netif_device_detach(dev); 1143 netif_device_detach(ndev);
512 1144
513 if (netif_running(dev)) { 1145 if (netif_running(ndev)) {
514 spin_lock_irqsave(&priv->txlock, flags);
515 spin_lock(&priv->rxlock);
516 1146
517 gfar_halt_nodisable(dev); 1147 local_irq_save(flags);
1148 lock_tx_qs(priv);
1149 lock_rx_qs(priv);
1150
1151 gfar_halt_nodisable(ndev);
518 1152
519 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1153 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
520 tempval = gfar_read(&priv->regs->maccfg1); 1154 tempval = gfar_read(&regs->maccfg1);
521 1155
522 tempval &= ~MACCFG1_TX_EN; 1156 tempval &= ~MACCFG1_TX_EN;
523 1157
524 if (!magic_packet) 1158 if (!magic_packet)
525 tempval &= ~MACCFG1_RX_EN; 1159 tempval &= ~MACCFG1_RX_EN;
526 1160
527 gfar_write(&priv->regs->maccfg1, tempval); 1161 gfar_write(&regs->maccfg1, tempval);
528 1162
529 spin_unlock(&priv->rxlock); 1163 unlock_rx_qs(priv);
530 spin_unlock_irqrestore(&priv->txlock, flags); 1164 unlock_tx_qs(priv);
1165 local_irq_restore(flags);
531 1166
532 napi_disable(&priv->napi); 1167 disable_napi(priv);
533 1168
534 if (magic_packet) { 1169 if (magic_packet) {
535 /* Enable interrupt on Magic Packet */ 1170 /* Enable interrupt on Magic Packet */
536 gfar_write(&priv->regs->imask, IMASK_MAG); 1171 gfar_write(&regs->imask, IMASK_MAG);
537 1172
538 /* Enable Magic Packet mode */ 1173 /* Enable Magic Packet mode */
539 tempval = gfar_read(&priv->regs->maccfg2); 1174 tempval = gfar_read(&regs->maccfg2);
540 tempval |= MACCFG2_MPEN; 1175 tempval |= MACCFG2_MPEN;
541 gfar_write(&priv->regs->maccfg2, tempval); 1176 gfar_write(&regs->maccfg2, tempval);
542 } else { 1177 } else {
543 phy_stop(priv->phydev); 1178 phy_stop(priv->phydev);
544 } 1179 }
@@ -547,17 +1182,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
547 return 0; 1182 return 0;
548} 1183}
549 1184
550static int gfar_resume(struct of_device *ofdev) 1185static int gfar_resume(struct device *dev)
551{ 1186{
552 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1187 struct gfar_private *priv = dev_get_drvdata(dev);
553 struct net_device *dev = priv->ndev; 1188 struct net_device *ndev = priv->ndev;
1189 struct gfar __iomem *regs = priv->gfargrp[0].regs;
554 unsigned long flags; 1190 unsigned long flags;
555 u32 tempval; 1191 u32 tempval;
556 int magic_packet = priv->wol_en && 1192 int magic_packet = priv->wol_en &&
557 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1193 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
558 1194
559 if (!netif_running(dev)) { 1195 if (!netif_running(ndev)) {
560 netif_device_attach(dev); 1196 netif_device_attach(ndev);
561 return 0; 1197 return 0;
562 } 1198 }
563 1199
@@ -567,28 +1203,80 @@ static int gfar_resume(struct of_device *ofdev)
567 /* Disable Magic Packet mode, in case something 1203 /* Disable Magic Packet mode, in case something
568 * else woke us up. 1204 * else woke us up.
569 */ 1205 */
1206 local_irq_save(flags);
1207 lock_tx_qs(priv);
1208 lock_rx_qs(priv);
570 1209
571 spin_lock_irqsave(&priv->txlock, flags); 1210 tempval = gfar_read(&regs->maccfg2);
572 spin_lock(&priv->rxlock);
573
574 tempval = gfar_read(&priv->regs->maccfg2);
575 tempval &= ~MACCFG2_MPEN; 1211 tempval &= ~MACCFG2_MPEN;
576 gfar_write(&priv->regs->maccfg2, tempval); 1212 gfar_write(&regs->maccfg2, tempval);
577 1213
578 gfar_start(dev); 1214 gfar_start(ndev);
579 1215
580 spin_unlock(&priv->rxlock); 1216 unlock_rx_qs(priv);
581 spin_unlock_irqrestore(&priv->txlock, flags); 1217 unlock_tx_qs(priv);
1218 local_irq_restore(flags);
582 1219
583 netif_device_attach(dev); 1220 netif_device_attach(ndev);
584 1221
585 napi_enable(&priv->napi); 1222 enable_napi(priv);
586 1223
587 return 0; 1224 return 0;
588} 1225}
1226
1227static int gfar_restore(struct device *dev)
1228{
1229 struct gfar_private *priv = dev_get_drvdata(dev);
1230 struct net_device *ndev = priv->ndev;
1231
1232 if (!netif_running(ndev))
1233 return 0;
1234
1235 gfar_init_bds(ndev);
1236 init_registers(ndev);
1237 gfar_set_mac_address(ndev);
1238 gfar_init_mac(ndev);
1239 gfar_start(ndev);
1240
1241 priv->oldlink = 0;
1242 priv->oldspeed = 0;
1243 priv->oldduplex = -1;
1244
1245 if (priv->phydev)
1246 phy_start(priv->phydev);
1247
1248 netif_device_attach(ndev);
1249 enable_napi(priv);
1250
1251 return 0;
1252}
1253
1254static struct dev_pm_ops gfar_pm_ops = {
1255 .suspend = gfar_suspend,
1256 .resume = gfar_resume,
1257 .freeze = gfar_suspend,
1258 .thaw = gfar_resume,
1259 .restore = gfar_restore,
1260};
1261
1262#define GFAR_PM_OPS (&gfar_pm_ops)
1263
1264static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1265{
1266 return gfar_suspend(&ofdev->dev);
1267}
1268
1269static int gfar_legacy_resume(struct of_device *ofdev)
1270{
1271 return gfar_resume(&ofdev->dev);
1272}
1273
589#else 1274#else
590#define gfar_suspend NULL 1275
591#define gfar_resume NULL 1276#define GFAR_PM_OPS NULL
1277#define gfar_legacy_suspend NULL
1278#define gfar_legacy_resume NULL
1279
592#endif 1280#endif
593 1281
594/* Reads the controller's registers to determine what interface 1282/* Reads the controller's registers to determine what interface
@@ -597,7 +1285,10 @@ static int gfar_resume(struct of_device *ofdev)
597static phy_interface_t gfar_get_interface(struct net_device *dev) 1285static phy_interface_t gfar_get_interface(struct net_device *dev)
598{ 1286{
599 struct gfar_private *priv = netdev_priv(dev); 1287 struct gfar_private *priv = netdev_priv(dev);
600 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 1288 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1289 u32 ecntrl;
1290
1291 ecntrl = gfar_read(&regs->ecntrl);
601 1292
602 if (ecntrl & ECNTRL_SGMII_MODE) 1293 if (ecntrl & ECNTRL_SGMII_MODE)
603 return PHY_INTERFACE_MODE_SGMII; 1294 return PHY_INTERFACE_MODE_SGMII;
@@ -719,46 +1410,52 @@ static void gfar_configure_serdes(struct net_device *dev)
719static void init_registers(struct net_device *dev) 1410static void init_registers(struct net_device *dev)
720{ 1411{
721 struct gfar_private *priv = netdev_priv(dev); 1412 struct gfar_private *priv = netdev_priv(dev);
1413 struct gfar __iomem *regs = NULL;
1414 int i = 0;
722 1415
723 /* Clear IEVENT */ 1416 for (i = 0; i < priv->num_grps; i++) {
724 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 1417 regs = priv->gfargrp[i].regs;
1418 /* Clear IEVENT */
1419 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
725 1420
726 /* Initialize IMASK */ 1421 /* Initialize IMASK */
727 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 1422 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1423 }
728 1424
1425 regs = priv->gfargrp[0].regs;
729 /* Init hash registers to zero */ 1426 /* Init hash registers to zero */
730 gfar_write(&priv->regs->igaddr0, 0); 1427 gfar_write(&regs->igaddr0, 0);
731 gfar_write(&priv->regs->igaddr1, 0); 1428 gfar_write(&regs->igaddr1, 0);
732 gfar_write(&priv->regs->igaddr2, 0); 1429 gfar_write(&regs->igaddr2, 0);
733 gfar_write(&priv->regs->igaddr3, 0); 1430 gfar_write(&regs->igaddr3, 0);
734 gfar_write(&priv->regs->igaddr4, 0); 1431 gfar_write(&regs->igaddr4, 0);
735 gfar_write(&priv->regs->igaddr5, 0); 1432 gfar_write(&regs->igaddr5, 0);
736 gfar_write(&priv->regs->igaddr6, 0); 1433 gfar_write(&regs->igaddr6, 0);
737 gfar_write(&priv->regs->igaddr7, 0); 1434 gfar_write(&regs->igaddr7, 0);
738 1435
739 gfar_write(&priv->regs->gaddr0, 0); 1436 gfar_write(&regs->gaddr0, 0);
740 gfar_write(&priv->regs->gaddr1, 0); 1437 gfar_write(&regs->gaddr1, 0);
741 gfar_write(&priv->regs->gaddr2, 0); 1438 gfar_write(&regs->gaddr2, 0);
742 gfar_write(&priv->regs->gaddr3, 0); 1439 gfar_write(&regs->gaddr3, 0);
743 gfar_write(&priv->regs->gaddr4, 0); 1440 gfar_write(&regs->gaddr4, 0);
744 gfar_write(&priv->regs->gaddr5, 0); 1441 gfar_write(&regs->gaddr5, 0);
745 gfar_write(&priv->regs->gaddr6, 0); 1442 gfar_write(&regs->gaddr6, 0);
746 gfar_write(&priv->regs->gaddr7, 0); 1443 gfar_write(&regs->gaddr7, 0);
747 1444
748 /* Zero out the rmon mib registers if it has them */ 1445 /* Zero out the rmon mib registers if it has them */
749 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1446 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
750 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 1447 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
751 1448
752 /* Mask off the CAM interrupts */ 1449 /* Mask off the CAM interrupts */
753 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 1450 gfar_write(&regs->rmon.cam1, 0xffffffff);
754 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 1451 gfar_write(&regs->rmon.cam2, 0xffffffff);
755 } 1452 }
756 1453
757 /* Initialize the max receive buffer length */ 1454 /* Initialize the max receive buffer length */
758 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1455 gfar_write(&regs->mrblr, priv->rx_buffer_size);
759 1456
760 /* Initialize the Minimum Frame Length Register */ 1457 /* Initialize the Minimum Frame Length Register */
761 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 1458 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
762} 1459}
763 1460
764 1461
@@ -766,23 +1463,28 @@ static void init_registers(struct net_device *dev)
766static void gfar_halt_nodisable(struct net_device *dev) 1463static void gfar_halt_nodisable(struct net_device *dev)
767{ 1464{
768 struct gfar_private *priv = netdev_priv(dev); 1465 struct gfar_private *priv = netdev_priv(dev);
769 struct gfar __iomem *regs = priv->regs; 1466 struct gfar __iomem *regs = NULL;
770 u32 tempval; 1467 u32 tempval;
1468 int i = 0;
771 1469
772 /* Mask all interrupts */ 1470 for (i = 0; i < priv->num_grps; i++) {
773 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1471 regs = priv->gfargrp[i].regs;
1472 /* Mask all interrupts */
1473 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
774 1474
775 /* Clear all interrupts */ 1475 /* Clear all interrupts */
776 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1476 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1477 }
777 1478
1479 regs = priv->gfargrp[0].regs;
778 /* Stop the DMA, and wait for it to stop */ 1480 /* Stop the DMA, and wait for it to stop */
779 tempval = gfar_read(&priv->regs->dmactrl); 1481 tempval = gfar_read(&regs->dmactrl);
780 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1482 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
781 != (DMACTRL_GRS | DMACTRL_GTS)) { 1483 != (DMACTRL_GRS | DMACTRL_GTS)) {
782 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1484 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
783 gfar_write(&priv->regs->dmactrl, tempval); 1485 gfar_write(&regs->dmactrl, tempval);
784 1486
785 while (!(gfar_read(&priv->regs->ievent) & 1487 while (!(gfar_read(&regs->ievent) &
786 (IEVENT_GRSC | IEVENT_GTSC))) 1488 (IEVENT_GRSC | IEVENT_GTSC)))
787 cpu_relax(); 1489 cpu_relax();
788 } 1490 }
@@ -792,7 +1494,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
792void gfar_halt(struct net_device *dev) 1494void gfar_halt(struct net_device *dev)
793{ 1495{
794 struct gfar_private *priv = netdev_priv(dev); 1496 struct gfar_private *priv = netdev_priv(dev);
795 struct gfar __iomem *regs = priv->regs; 1497 struct gfar __iomem *regs = priv->gfargrp[0].regs;
796 u32 tempval; 1498 u32 tempval;
797 1499
798 gfar_halt_nodisable(dev); 1500 gfar_halt_nodisable(dev);
@@ -803,101 +1505,131 @@ void gfar_halt(struct net_device *dev)
803 gfar_write(&regs->maccfg1, tempval); 1505 gfar_write(&regs->maccfg1, tempval);
804} 1506}
805 1507
1508static void free_grp_irqs(struct gfar_priv_grp *grp)
1509{
1510 free_irq(grp->interruptError, grp);
1511 free_irq(grp->interruptTransmit, grp);
1512 free_irq(grp->interruptReceive, grp);
1513}
1514
806void stop_gfar(struct net_device *dev) 1515void stop_gfar(struct net_device *dev)
807{ 1516{
808 struct gfar_private *priv = netdev_priv(dev); 1517 struct gfar_private *priv = netdev_priv(dev);
809 struct gfar __iomem *regs = priv->regs;
810 unsigned long flags; 1518 unsigned long flags;
1519 int i;
811 1520
812 phy_stop(priv->phydev); 1521 phy_stop(priv->phydev);
813 1522
1523
814 /* Lock it down */ 1524 /* Lock it down */
815 spin_lock_irqsave(&priv->txlock, flags); 1525 local_irq_save(flags);
816 spin_lock(&priv->rxlock); 1526 lock_tx_qs(priv);
1527 lock_rx_qs(priv);
817 1528
818 gfar_halt(dev); 1529 gfar_halt(dev);
819 1530
820 spin_unlock(&priv->rxlock); 1531 unlock_rx_qs(priv);
821 spin_unlock_irqrestore(&priv->txlock, flags); 1532 unlock_tx_qs(priv);
1533 local_irq_restore(flags);
822 1534
823 /* Free the IRQs */ 1535 /* Free the IRQs */
824 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1536 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
825 free_irq(priv->interruptError, dev); 1537 for (i = 0; i < priv->num_grps; i++)
826 free_irq(priv->interruptTransmit, dev); 1538 free_grp_irqs(&priv->gfargrp[i]);
827 free_irq(priv->interruptReceive, dev);
828 } else { 1539 } else {
829 free_irq(priv->interruptTransmit, dev); 1540 for (i = 0; i < priv->num_grps; i++)
1541 free_irq(priv->gfargrp[i].interruptTransmit,
1542 &priv->gfargrp[i]);
830 } 1543 }
831 1544
832 free_skb_resources(priv); 1545 free_skb_resources(priv);
833
834 dma_free_coherent(&priv->ofdev->dev,
835 sizeof(struct txbd8)*priv->tx_ring_size
836 + sizeof(struct rxbd8)*priv->rx_ring_size,
837 priv->tx_bd_base,
838 gfar_read(&regs->tbase0));
839} 1546}
840 1547
841/* If there are any tx skbs or rx skbs still around, free them. 1548static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
842 * Then free tx_skbuff and rx_skbuff */
843static void free_skb_resources(struct gfar_private *priv)
844{ 1549{
845 struct rxbd8 *rxbdp;
846 struct txbd8 *txbdp; 1550 struct txbd8 *txbdp;
1551 struct gfar_private *priv = netdev_priv(tx_queue->dev);
847 int i, j; 1552 int i, j;
848 1553
849 /* Go through all the buffer descriptors and free their data buffers */ 1554 txbdp = tx_queue->tx_bd_base;
850 txbdp = priv->tx_bd_base;
851 1555
852 for (i = 0; i < priv->tx_ring_size; i++) { 1556 for (i = 0; i < tx_queue->tx_ring_size; i++) {
853 if (!priv->tx_skbuff[i]) 1557 if (!tx_queue->tx_skbuff[i])
854 continue; 1558 continue;
855 1559
856 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1560 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
857 txbdp->length, DMA_TO_DEVICE); 1561 txbdp->length, DMA_TO_DEVICE);
858 txbdp->lstatus = 0; 1562 txbdp->lstatus = 0;
859 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1563 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1564 j++) {
860 txbdp++; 1565 txbdp++;
861 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1566 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
862 txbdp->length, DMA_TO_DEVICE); 1567 txbdp->length, DMA_TO_DEVICE);
863 } 1568 }
864 txbdp++; 1569 txbdp++;
865 dev_kfree_skb_any(priv->tx_skbuff[i]); 1570 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
866 priv->tx_skbuff[i] = NULL; 1571 tx_queue->tx_skbuff[i] = NULL;
867 } 1572 }
1573 kfree(tx_queue->tx_skbuff);
1574}
868 1575
869 kfree(priv->tx_skbuff); 1576static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
870 1577{
871 rxbdp = priv->rx_bd_base; 1578 struct rxbd8 *rxbdp;
1579 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1580 int i;
872 1581
873 /* rx_skbuff is not guaranteed to be allocated, so only 1582 rxbdp = rx_queue->rx_bd_base;
874 * free it and its contents if it is allocated */
875 if(priv->rx_skbuff != NULL) {
876 for (i = 0; i < priv->rx_ring_size; i++) {
877 if (priv->rx_skbuff[i]) {
878 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
879 priv->rx_buffer_size,
880 DMA_FROM_DEVICE);
881 1583
882 dev_kfree_skb_any(priv->rx_skbuff[i]); 1584 for (i = 0; i < rx_queue->rx_ring_size; i++) {
883 priv->rx_skbuff[i] = NULL; 1585 if (rx_queue->rx_skbuff[i]) {
884 } 1586 dma_unmap_single(&priv->ofdev->dev,
1587 rxbdp->bufPtr, priv->rx_buffer_size,
1588 DMA_FROM_DEVICE);
1589 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1590 rx_queue->rx_skbuff[i] = NULL;
1591 }
1592 rxbdp->lstatus = 0;
1593 rxbdp->bufPtr = 0;
1594 rxbdp++;
1595 }
1596 kfree(rx_queue->rx_skbuff);
1597}
885 1598
886 rxbdp->lstatus = 0; 1599/* If there are any tx skbs or rx skbs still around, free them.
887 rxbdp->bufPtr = 0; 1600 * Then free tx_skbuff and rx_skbuff */
1601static void free_skb_resources(struct gfar_private *priv)
1602{
1603 struct gfar_priv_tx_q *tx_queue = NULL;
1604 struct gfar_priv_rx_q *rx_queue = NULL;
1605 int i;
888 1606
889 rxbdp++; 1607 /* Go through all the buffer descriptors and free their data buffers */
890 } 1608 for (i = 0; i < priv->num_tx_queues; i++) {
1609 tx_queue = priv->tx_queue[i];
1610 if(!tx_queue->tx_skbuff)
1611 free_skb_tx_queue(tx_queue);
1612 }
891 1613
892 kfree(priv->rx_skbuff); 1614 for (i = 0; i < priv->num_rx_queues; i++) {
1615 rx_queue = priv->rx_queue[i];
1616 if(!rx_queue->rx_skbuff)
1617 free_skb_rx_queue(rx_queue);
893 } 1618 }
1619
1620 dma_free_coherent(&priv->ofdev->dev,
1621 sizeof(struct txbd8) * priv->total_tx_ring_size +
1622 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1623 priv->tx_queue[0]->tx_bd_base,
1624 priv->tx_queue[0]->tx_bd_dma_base);
894} 1625}
895 1626
896void gfar_start(struct net_device *dev) 1627void gfar_start(struct net_device *dev)
897{ 1628{
898 struct gfar_private *priv = netdev_priv(dev); 1629 struct gfar_private *priv = netdev_priv(dev);
899 struct gfar __iomem *regs = priv->regs; 1630 struct gfar __iomem *regs = priv->gfargrp[0].regs;
900 u32 tempval; 1631 u32 tempval;
1632 int i = 0;
901 1633
902 /* Enable Rx and Tx in MACCFG1 */ 1634 /* Enable Rx and Tx in MACCFG1 */
903 tempval = gfar_read(&regs->maccfg1); 1635 tempval = gfar_read(&regs->maccfg1);
@@ -905,269 +1637,159 @@ void gfar_start(struct net_device *dev)
905 gfar_write(&regs->maccfg1, tempval); 1637 gfar_write(&regs->maccfg1, tempval);
906 1638
907 /* Initialize DMACTRL to have WWR and WOP */ 1639 /* Initialize DMACTRL to have WWR and WOP */
908 tempval = gfar_read(&priv->regs->dmactrl); 1640 tempval = gfar_read(&regs->dmactrl);
909 tempval |= DMACTRL_INIT_SETTINGS; 1641 tempval |= DMACTRL_INIT_SETTINGS;
910 gfar_write(&priv->regs->dmactrl, tempval); 1642 gfar_write(&regs->dmactrl, tempval);
911 1643
912 /* Make sure we aren't stopped */ 1644 /* Make sure we aren't stopped */
913 tempval = gfar_read(&priv->regs->dmactrl); 1645 tempval = gfar_read(&regs->dmactrl);
914 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1646 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
915 gfar_write(&priv->regs->dmactrl, tempval); 1647 gfar_write(&regs->dmactrl, tempval);
916 1648
917 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1649 for (i = 0; i < priv->num_grps; i++) {
918 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1650 regs = priv->gfargrp[i].regs;
919 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 1651 /* Clear THLT/RHLT, so that the DMA starts polling now */
920 1652 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
921 /* Unmask the interrupts we look for */ 1653 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
922 gfar_write(&regs->imask, IMASK_DEFAULT); 1654 /* Unmask the interrupts we look for */
1655 gfar_write(&regs->imask, IMASK_DEFAULT);
1656 }
923 1657
924 dev->trans_start = jiffies; 1658 dev->trans_start = jiffies;
925} 1659}
926 1660
927/* Bring the controller up and running */ 1661void gfar_configure_coalescing(struct gfar_private *priv,
928int startup_gfar(struct net_device *dev) 1662 unsigned long tx_mask, unsigned long rx_mask)
929{ 1663{
930 struct txbd8 *txbdp; 1664 struct gfar __iomem *regs = priv->gfargrp[0].regs;
931 struct rxbd8 *rxbdp; 1665 u32 __iomem *baddr;
932 dma_addr_t addr = 0; 1666 int i = 0;
933 unsigned long vaddr;
934 int i;
935 struct gfar_private *priv = netdev_priv(dev);
936 struct gfar __iomem *regs = priv->regs;
937 int err = 0;
938 u32 rctrl = 0;
939 u32 tctrl = 0;
940 u32 attrs = 0;
941
942 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
943 1667
944 /* Allocate memory for the buffer descriptors */ 1668 /* Backward compatible case ---- even if we enable
945 vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev, 1669 * multiple queues, there's only single reg to program
946 sizeof (struct txbd8) * priv->tx_ring_size + 1670 */
947 sizeof (struct rxbd8) * priv->rx_ring_size, 1671 gfar_write(&regs->txic, 0);
948 &addr, GFP_KERNEL); 1672 if(likely(priv->tx_queue[0]->txcoalescing))
949 1673 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
950 if (vaddr == 0) {
951 if (netif_msg_ifup(priv))
952 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
953 dev->name);
954 return -ENOMEM;
955 }
956
957 priv->tx_bd_base = (struct txbd8 *) vaddr;
958
959 /* enet DMA only understands physical addresses */
960 gfar_write(&regs->tbase0, addr);
961
962 /* Start the rx descriptor ring where the tx ring leaves off */
963 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
964 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
965 priv->rx_bd_base = (struct rxbd8 *) vaddr;
966 gfar_write(&regs->rbase0, addr);
967
968 /* Setup the skbuff rings */
969 priv->tx_skbuff =
970 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
971 priv->tx_ring_size, GFP_KERNEL);
972
973 if (NULL == priv->tx_skbuff) {
974 if (netif_msg_ifup(priv))
975 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
976 dev->name);
977 err = -ENOMEM;
978 goto tx_skb_fail;
979 }
980
981 for (i = 0; i < priv->tx_ring_size; i++)
982 priv->tx_skbuff[i] = NULL;
983
984 priv->rx_skbuff =
985 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
986 priv->rx_ring_size, GFP_KERNEL);
987
988 if (NULL == priv->rx_skbuff) {
989 if (netif_msg_ifup(priv))
990 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
991 dev->name);
992 err = -ENOMEM;
993 goto rx_skb_fail;
994 }
995
996 for (i = 0; i < priv->rx_ring_size; i++)
997 priv->rx_skbuff[i] = NULL;
998
999 /* Initialize some variables in our dev structure */
1000 priv->num_txbdfree = priv->tx_ring_size;
1001 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
1002 priv->cur_rx = priv->rx_bd_base;
1003 priv->skb_curtx = priv->skb_dirtytx = 0;
1004 priv->skb_currx = 0;
1005
1006 /* Initialize Transmit Descriptor Ring */
1007 txbdp = priv->tx_bd_base;
1008 for (i = 0; i < priv->tx_ring_size; i++) {
1009 txbdp->lstatus = 0;
1010 txbdp->bufPtr = 0;
1011 txbdp++;
1012 }
1013
1014 /* Set the last descriptor in the ring to indicate wrap */
1015 txbdp--;
1016 txbdp->status |= TXBD_WRAP;
1017
1018 rxbdp = priv->rx_bd_base;
1019 for (i = 0; i < priv->rx_ring_size; i++) {
1020 struct sk_buff *skb;
1021
1022 skb = gfar_new_skb(dev);
1023
1024 if (!skb) {
1025 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
1026 dev->name);
1027 1674
1028 goto err_rxalloc_fail; 1675 gfar_write(&regs->rxic, 0);
1676 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1677 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1678
1679 if (priv->mode == MQ_MG_MODE) {
1680 baddr = &regs->txic0;
1681 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1682 if (likely(priv->tx_queue[i]->txcoalescing)) {
1683 gfar_write(baddr + i, 0);
1684 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1685 }
1029 } 1686 }
1030 1687
1031 priv->rx_skbuff[i] = skb; 1688 baddr = &regs->rxic0;
1032 1689 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1033 gfar_new_rxbdp(dev, rxbdp, skb); 1690 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1034 1691 gfar_write(baddr + i, 0);
1035 rxbdp++; 1692 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1693 }
1694 }
1036 } 1695 }
1696}
1037 1697
1038 /* Set the last descriptor in the ring to wrap */ 1698static int register_grp_irqs(struct gfar_priv_grp *grp)
1039 rxbdp--; 1699{
1040 rxbdp->status |= RXBD_WRAP; 1700 struct gfar_private *priv = grp->priv;
1701 struct net_device *dev = priv->ndev;
1702 int err;
1041 1703
1042 /* If the device has multiple interrupts, register for 1704 /* If the device has multiple interrupts, register for
1043 * them. Otherwise, only register for the one */ 1705 * them. Otherwise, only register for the one */
1044 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1706 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1045 /* Install our interrupt handlers for Error, 1707 /* Install our interrupt handlers for Error,
1046 * Transmit, and Receive */ 1708 * Transmit, and Receive */
1047 if (request_irq(priv->interruptError, gfar_error, 1709 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1048 0, priv->int_name_er, dev) < 0) { 1710 grp->int_name_er,grp)) < 0) {
1049 if (netif_msg_intr(priv)) 1711 if (netif_msg_intr(priv))
1050 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1712 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1051 dev->name, priv->interruptError); 1713 dev->name, grp->interruptError);
1052 1714
1053 err = -1; 1715 goto err_irq_fail;
1054 goto err_irq_fail;
1055 } 1716 }
1056 1717
1057 if (request_irq(priv->interruptTransmit, gfar_transmit, 1718 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1058 0, priv->int_name_tx, dev) < 0) { 1719 0, grp->int_name_tx, grp)) < 0) {
1059 if (netif_msg_intr(priv)) 1720 if (netif_msg_intr(priv))
1060 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1721 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1061 dev->name, priv->interruptTransmit); 1722 dev->name, grp->interruptTransmit);
1062
1063 err = -1;
1064
1065 goto tx_irq_fail; 1723 goto tx_irq_fail;
1066 } 1724 }
1067 1725
1068 if (request_irq(priv->interruptReceive, gfar_receive, 1726 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1069 0, priv->int_name_rx, dev) < 0) { 1727 grp->int_name_rx, grp)) < 0) {
1070 if (netif_msg_intr(priv)) 1728 if (netif_msg_intr(priv))
1071 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 1729 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1072 dev->name, priv->interruptReceive); 1730 dev->name, grp->interruptReceive);
1073
1074 err = -1;
1075 goto rx_irq_fail; 1731 goto rx_irq_fail;
1076 } 1732 }
1077 } else { 1733 } else {
1078 if (request_irq(priv->interruptTransmit, gfar_interrupt, 1734 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1079 0, priv->int_name_tx, dev) < 0) { 1735 grp->int_name_tx, grp)) < 0) {
1080 if (netif_msg_intr(priv)) 1736 if (netif_msg_intr(priv))
1081 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1737 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1082 dev->name, priv->interruptTransmit); 1738 dev->name, grp->interruptTransmit);
1083
1084 err = -1;
1085 goto err_irq_fail; 1739 goto err_irq_fail;
1086 } 1740 }
1087 } 1741 }
1088 1742
1089 phy_start(priv->phydev); 1743 return 0;
1090
1091 /* Configure the coalescing support */
1092 gfar_write(&regs->txic, 0);
1093 if (priv->txcoalescing)
1094 gfar_write(&regs->txic, priv->txic);
1095
1096 gfar_write(&regs->rxic, 0);
1097 if (priv->rxcoalescing)
1098 gfar_write(&regs->rxic, priv->rxic);
1099
1100 if (priv->rx_csum_enable)
1101 rctrl |= RCTRL_CHECKSUMMING;
1102 1744
1103 if (priv->extended_hash) { 1745rx_irq_fail:
1104 rctrl |= RCTRL_EXTHASH; 1746 free_irq(grp->interruptTransmit, grp);
1747tx_irq_fail:
1748 free_irq(grp->interruptError, grp);
1749err_irq_fail:
1750 return err;
1105 1751
1106 gfar_clear_exact_match(dev); 1752}
1107 rctrl |= RCTRL_EMEN;
1108 }
1109 1753
1110 if (priv->padding) { 1754/* Bring the controller up and running */
1111 rctrl &= ~RCTRL_PAL_MASK; 1755int startup_gfar(struct net_device *ndev)
1112 rctrl |= RCTRL_PADDING(priv->padding); 1756{
1113 } 1757 struct gfar_private *priv = netdev_priv(ndev);
1758 struct gfar __iomem *regs = NULL;
1759 int err, i, j;
1114 1760
1115 /* keep vlan related bits if it's enabled */ 1761 for (i = 0; i < priv->num_grps; i++) {
1116 if (priv->vlgrp) { 1762 regs= priv->gfargrp[i].regs;
1117 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 1763 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1118 tctrl |= TCTRL_VLINS;
1119 } 1764 }
1120 1765
1121 /* Init rctrl based on our settings */ 1766 regs= priv->gfargrp[0].regs;
1122 gfar_write(&priv->regs->rctrl, rctrl); 1767 err = gfar_alloc_skb_resources(ndev);
1123 1768 if (err)
1124 if (dev->features & NETIF_F_IP_CSUM) 1769 return err;
1125 tctrl |= TCTRL_INIT_CSUM;
1126
1127 gfar_write(&priv->regs->tctrl, tctrl);
1128
1129 /* Set the extraction length and index */
1130 attrs = ATTRELI_EL(priv->rx_stash_size) |
1131 ATTRELI_EI(priv->rx_stash_index);
1132
1133 gfar_write(&priv->regs->attreli, attrs);
1134
1135 /* Start with defaults, and add stashing or locking
1136 * depending on the approprate variables */
1137 attrs = ATTR_INIT_SETTINGS;
1138 1770
1139 if (priv->bd_stash_en) 1771 gfar_init_mac(ndev);
1140 attrs |= ATTR_BDSTASH;
1141 1772
1142 if (priv->rx_stash_size != 0) 1773 for (i = 0; i < priv->num_grps; i++) {
1143 attrs |= ATTR_BUFSTASH; 1774 err = register_grp_irqs(&priv->gfargrp[i]);
1775 if (err) {
1776 for (j = 0; j < i; j++)
1777 free_grp_irqs(&priv->gfargrp[j]);
1778 goto irq_fail;
1779 }
1780 }
1144 1781
1145 gfar_write(&priv->regs->attr, attrs); 1782 /* Start the controller */
1783 gfar_start(ndev);
1146 1784
1147 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 1785 phy_start(priv->phydev);
1148 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1149 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1150 1786
1151 /* Start the controller */ 1787 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1152 gfar_start(dev);
1153 1788
1154 return 0; 1789 return 0;
1155 1790
1156rx_irq_fail: 1791irq_fail:
1157 free_irq(priv->interruptTransmit, dev);
1158tx_irq_fail:
1159 free_irq(priv->interruptError, dev);
1160err_irq_fail:
1161err_rxalloc_fail:
1162rx_skb_fail:
1163 free_skb_resources(priv); 1792 free_skb_resources(priv);
1164tx_skb_fail:
1165 dma_free_coherent(&priv->ofdev->dev,
1166 sizeof(struct txbd8)*priv->tx_ring_size
1167 + sizeof(struct rxbd8)*priv->rx_ring_size,
1168 priv->tx_bd_base,
1169 gfar_read(&regs->tbase0));
1170
1171 return err; 1793 return err;
1172} 1794}
1173 1795
@@ -1178,7 +1800,7 @@ static int gfar_enet_open(struct net_device *dev)
1178 struct gfar_private *priv = netdev_priv(dev); 1800 struct gfar_private *priv = netdev_priv(dev);
1179 int err; 1801 int err;
1180 1802
1181 napi_enable(&priv->napi); 1803 enable_napi(priv);
1182 1804
1183 skb_queue_head_init(&priv->rx_recycle); 1805 skb_queue_head_init(&priv->rx_recycle);
1184 1806
@@ -1189,18 +1811,18 @@ static int gfar_enet_open(struct net_device *dev)
1189 1811
1190 err = init_phy(dev); 1812 err = init_phy(dev);
1191 1813
1192 if(err) { 1814 if (err) {
1193 napi_disable(&priv->napi); 1815 disable_napi(priv);
1194 return err; 1816 return err;
1195 } 1817 }
1196 1818
1197 err = startup_gfar(dev); 1819 err = startup_gfar(dev);
1198 if (err) { 1820 if (err) {
1199 napi_disable(&priv->napi); 1821 disable_napi(priv);
1200 return err; 1822 return err;
1201 } 1823 }
1202 1824
1203 netif_start_queue(dev); 1825 netif_tx_start_all_queues(dev);
1204 1826
1205 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1827 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1206 1828
@@ -1269,15 +1891,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1269static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1891static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1270{ 1892{
1271 struct gfar_private *priv = netdev_priv(dev); 1893 struct gfar_private *priv = netdev_priv(dev);
1894 struct gfar_priv_tx_q *tx_queue = NULL;
1895 struct netdev_queue *txq;
1896 struct gfar __iomem *regs = NULL;
1272 struct txfcb *fcb = NULL; 1897 struct txfcb *fcb = NULL;
1273 struct txbd8 *txbdp, *txbdp_start, *base; 1898 struct txbd8 *txbdp, *txbdp_start, *base;
1274 u32 lstatus; 1899 u32 lstatus;
1275 int i; 1900 int i, rq = 0;
1276 u32 bufaddr; 1901 u32 bufaddr;
1277 unsigned long flags; 1902 unsigned long flags;
1278 unsigned int nr_frags, length; 1903 unsigned int nr_frags, length;
1279 1904
1280 base = priv->tx_bd_base; 1905
1906 rq = skb->queue_mapping;
1907 tx_queue = priv->tx_queue[rq];
1908 txq = netdev_get_tx_queue(dev, rq);
1909 base = tx_queue->tx_bd_base;
1910 regs = tx_queue->grp->regs;
1281 1911
1282 /* make space for additional header when fcb is needed */ 1912 /* make space for additional header when fcb is needed */
1283 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1913 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1298,21 +1928,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1298 /* total number of fragments in the SKB */ 1928 /* total number of fragments in the SKB */
1299 nr_frags = skb_shinfo(skb)->nr_frags; 1929 nr_frags = skb_shinfo(skb)->nr_frags;
1300 1930
1301 spin_lock_irqsave(&priv->txlock, flags);
1302
1303 /* check if there is space to queue this packet */ 1931 /* check if there is space to queue this packet */
1304 if ((nr_frags+1) > priv->num_txbdfree) { 1932 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1305 /* no space, stop the queue */ 1933 /* no space, stop the queue */
1306 netif_stop_queue(dev); 1934 netif_tx_stop_queue(txq);
1307 dev->stats.tx_fifo_errors++; 1935 dev->stats.tx_fifo_errors++;
1308 spin_unlock_irqrestore(&priv->txlock, flags);
1309 return NETDEV_TX_BUSY; 1936 return NETDEV_TX_BUSY;
1310 } 1937 }
1311 1938
1312 /* Update transmit stats */ 1939 /* Update transmit stats */
1313 dev->stats.tx_bytes += skb->len; 1940 dev->stats.tx_bytes += skb->len;
1314 1941
1315 txbdp = txbdp_start = priv->cur_tx; 1942 txbdp = txbdp_start = tx_queue->cur_tx;
1316 1943
1317 if (nr_frags == 0) { 1944 if (nr_frags == 0) {
1318 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1945 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1320,7 +1947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1320 /* Place the fragment addresses and lengths into the TxBDs */ 1947 /* Place the fragment addresses and lengths into the TxBDs */
1321 for (i = 0; i < nr_frags; i++) { 1948 for (i = 0; i < nr_frags; i++) {
1322 /* Point at the next BD, wrapping as needed */ 1949 /* Point at the next BD, wrapping as needed */
1323 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1950 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1324 1951
1325 length = skb_shinfo(skb)->frags[i].size; 1952 length = skb_shinfo(skb)->frags[i].size;
1326 1953
@@ -1362,13 +1989,27 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1362 } 1989 }
1363 1990
1364 /* setup the TxBD length and buffer pointer for the first BD */ 1991 /* setup the TxBD length and buffer pointer for the first BD */
1365 priv->tx_skbuff[priv->skb_curtx] = skb; 1992 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1366 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 1993 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1367 skb_headlen(skb), DMA_TO_DEVICE); 1994 skb_headlen(skb), DMA_TO_DEVICE);
1368 1995
1369 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 1996 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1370 1997
1371 /* 1998 /*
1999 * We can work in parallel with gfar_clean_tx_ring(), except
2000 * when modifying num_txbdfree. Note that we didn't grab the lock
2001 * when we were reading the num_txbdfree and checking for available
2002 * space, that's because outside of this function it can only grow,
2003 * and once we've got needed space, it cannot suddenly disappear.
2004 *
2005 * The lock also protects us from gfar_error(), which can modify
2006 * regs->tstat and thus retrigger the transfers, which is why we
2007 * also must grab the lock before setting ready bit for the first
2008 * to be transmitted BD.
2009 */
2010 spin_lock_irqsave(&tx_queue->txlock, flags);
2011
2012 /*
1372 * The powerpc-specific eieio() is used, as wmb() has too strong 2013 * The powerpc-specific eieio() is used, as wmb() has too strong
1373 * semantics (it requires synchronization between cacheable and 2014 * semantics (it requires synchronization between cacheable and
1374 * uncacheable mappings, which eieio doesn't provide and which we 2015 * uncacheable mappings, which eieio doesn't provide and which we
@@ -1382,29 +2023,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1382 2023
1383 /* Update the current skb pointer to the next entry we will use 2024 /* Update the current skb pointer to the next entry we will use
1384 * (wrapping if necessary) */ 2025 * (wrapping if necessary) */
1385 priv->skb_curtx = (priv->skb_curtx + 1) & 2026 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1386 TX_RING_MOD_MASK(priv->tx_ring_size); 2027 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1387 2028
1388 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 2029 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1389 2030
1390 /* reduce TxBD free count */ 2031 /* reduce TxBD free count */
1391 priv->num_txbdfree -= (nr_frags + 1); 2032 tx_queue->num_txbdfree -= (nr_frags + 1);
1392 2033
1393 dev->trans_start = jiffies; 2034 dev->trans_start = jiffies;
1394 2035
1395 /* If the next BD still needs to be cleaned up, then the bds 2036 /* If the next BD still needs to be cleaned up, then the bds
1396 are full. We need to tell the kernel to stop sending us stuff. */ 2037 are full. We need to tell the kernel to stop sending us stuff. */
1397 if (!priv->num_txbdfree) { 2038 if (!tx_queue->num_txbdfree) {
1398 netif_stop_queue(dev); 2039 netif_tx_stop_queue(txq);
1399 2040
1400 dev->stats.tx_fifo_errors++; 2041 dev->stats.tx_fifo_errors++;
1401 } 2042 }
1402 2043
1403 /* Tell the DMA to go go go */ 2044 /* Tell the DMA to go go go */
1404 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2045 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1405 2046
1406 /* Unlock priv */ 2047 /* Unlock priv */
1407 spin_unlock_irqrestore(&priv->txlock, flags); 2048 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1408 2049
1409 return NETDEV_TX_OK; 2050 return NETDEV_TX_OK;
1410} 2051}
@@ -1414,7 +2055,7 @@ static int gfar_close(struct net_device *dev)
1414{ 2055{
1415 struct gfar_private *priv = netdev_priv(dev); 2056 struct gfar_private *priv = netdev_priv(dev);
1416 2057
1417 napi_disable(&priv->napi); 2058 disable_napi(priv);
1418 2059
1419 skb_queue_purge(&priv->rx_recycle); 2060 skb_queue_purge(&priv->rx_recycle);
1420 cancel_work_sync(&priv->reset_task); 2061 cancel_work_sync(&priv->reset_task);
@@ -1424,7 +2065,7 @@ static int gfar_close(struct net_device *dev)
1424 phy_disconnect(priv->phydev); 2065 phy_disconnect(priv->phydev);
1425 priv->phydev = NULL; 2066 priv->phydev = NULL;
1426 2067
1427 netif_stop_queue(dev); 2068 netif_tx_stop_all_queues(dev);
1428 2069
1429 return 0; 2070 return 0;
1430} 2071}
@@ -1443,50 +2084,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1443 struct vlan_group *grp) 2084 struct vlan_group *grp)
1444{ 2085{
1445 struct gfar_private *priv = netdev_priv(dev); 2086 struct gfar_private *priv = netdev_priv(dev);
2087 struct gfar __iomem *regs = NULL;
1446 unsigned long flags; 2088 unsigned long flags;
1447 u32 tempval; 2089 u32 tempval;
1448 2090
1449 spin_lock_irqsave(&priv->rxlock, flags); 2091 regs = priv->gfargrp[0].regs;
2092 local_irq_save(flags);
2093 lock_rx_qs(priv);
1450 2094
1451 priv->vlgrp = grp; 2095 priv->vlgrp = grp;
1452 2096
1453 if (grp) { 2097 if (grp) {
1454 /* Enable VLAN tag insertion */ 2098 /* Enable VLAN tag insertion */
1455 tempval = gfar_read(&priv->regs->tctrl); 2099 tempval = gfar_read(&regs->tctrl);
1456 tempval |= TCTRL_VLINS; 2100 tempval |= TCTRL_VLINS;
1457 2101
1458 gfar_write(&priv->regs->tctrl, tempval); 2102 gfar_write(&regs->tctrl, tempval);
1459 2103
1460 /* Enable VLAN tag extraction */ 2104 /* Enable VLAN tag extraction */
1461 tempval = gfar_read(&priv->regs->rctrl); 2105 tempval = gfar_read(&regs->rctrl);
1462 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2106 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1463 gfar_write(&priv->regs->rctrl, tempval); 2107 gfar_write(&regs->rctrl, tempval);
1464 } else { 2108 } else {
1465 /* Disable VLAN tag insertion */ 2109 /* Disable VLAN tag insertion */
1466 tempval = gfar_read(&priv->regs->tctrl); 2110 tempval = gfar_read(&regs->tctrl);
1467 tempval &= ~TCTRL_VLINS; 2111 tempval &= ~TCTRL_VLINS;
1468 gfar_write(&priv->regs->tctrl, tempval); 2112 gfar_write(&regs->tctrl, tempval);
1469 2113
1470 /* Disable VLAN tag extraction */ 2114 /* Disable VLAN tag extraction */
1471 tempval = gfar_read(&priv->regs->rctrl); 2115 tempval = gfar_read(&regs->rctrl);
1472 tempval &= ~RCTRL_VLEX; 2116 tempval &= ~RCTRL_VLEX;
1473 /* If parse is no longer required, then disable parser */ 2117 /* If parse is no longer required, then disable parser */
1474 if (tempval & RCTRL_REQ_PARSER) 2118 if (tempval & RCTRL_REQ_PARSER)
1475 tempval |= RCTRL_PRSDEP_INIT; 2119 tempval |= RCTRL_PRSDEP_INIT;
1476 else 2120 else
1477 tempval &= ~RCTRL_PRSDEP_INIT; 2121 tempval &= ~RCTRL_PRSDEP_INIT;
1478 gfar_write(&priv->regs->rctrl, tempval); 2122 gfar_write(&regs->rctrl, tempval);
1479 } 2123 }
1480 2124
1481 gfar_change_mtu(dev, dev->mtu); 2125 gfar_change_mtu(dev, dev->mtu);
1482 2126
1483 spin_unlock_irqrestore(&priv->rxlock, flags); 2127 unlock_rx_qs(priv);
2128 local_irq_restore(flags);
1484} 2129}
1485 2130
1486static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2131static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1487{ 2132{
1488 int tempsize, tempval; 2133 int tempsize, tempval;
1489 struct gfar_private *priv = netdev_priv(dev); 2134 struct gfar_private *priv = netdev_priv(dev);
2135 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1490 int oldsize = priv->rx_buffer_size; 2136 int oldsize = priv->rx_buffer_size;
1491 int frame_size = new_mtu + ETH_HLEN; 2137 int frame_size = new_mtu + ETH_HLEN;
1492 2138
@@ -1518,20 +2164,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1518 2164
1519 dev->mtu = new_mtu; 2165 dev->mtu = new_mtu;
1520 2166
1521 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 2167 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1522 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 2168 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1523 2169
1524 /* If the mtu is larger than the max size for standard 2170 /* If the mtu is larger than the max size for standard
1525 * ethernet frames (ie, a jumbo frame), then set maccfg2 2171 * ethernet frames (ie, a jumbo frame), then set maccfg2
1526 * to allow huge frames, and to check the length */ 2172 * to allow huge frames, and to check the length */
1527 tempval = gfar_read(&priv->regs->maccfg2); 2173 tempval = gfar_read(&regs->maccfg2);
1528 2174
1529 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2175 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1530 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2176 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1531 else 2177 else
1532 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2178 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1533 2179
1534 gfar_write(&priv->regs->maccfg2, tempval); 2180 gfar_write(&regs->maccfg2, tempval);
1535 2181
1536 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2182 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1537 startup_gfar(dev); 2183 startup_gfar(dev);
@@ -1551,10 +2197,10 @@ static void gfar_reset_task(struct work_struct *work)
1551 struct net_device *dev = priv->ndev; 2197 struct net_device *dev = priv->ndev;
1552 2198
1553 if (dev->flags & IFF_UP) { 2199 if (dev->flags & IFF_UP) {
1554 netif_stop_queue(dev); 2200 netif_tx_stop_all_queues(dev);
1555 stop_gfar(dev); 2201 stop_gfar(dev);
1556 startup_gfar(dev); 2202 startup_gfar(dev);
1557 netif_start_queue(dev); 2203 netif_tx_start_all_queues(dev);
1558 } 2204 }
1559 2205
1560 netif_tx_schedule_all(dev); 2206 netif_tx_schedule_all(dev);
@@ -1569,24 +2215,29 @@ static void gfar_timeout(struct net_device *dev)
1569} 2215}
1570 2216
1571/* Interrupt Handler for Transmit complete */ 2217/* Interrupt Handler for Transmit complete */
1572static int gfar_clean_tx_ring(struct net_device *dev) 2218static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1573{ 2219{
2220 struct net_device *dev = tx_queue->dev;
1574 struct gfar_private *priv = netdev_priv(dev); 2221 struct gfar_private *priv = netdev_priv(dev);
2222 struct gfar_priv_rx_q *rx_queue = NULL;
1575 struct txbd8 *bdp; 2223 struct txbd8 *bdp;
1576 struct txbd8 *lbdp = NULL; 2224 struct txbd8 *lbdp = NULL;
1577 struct txbd8 *base = priv->tx_bd_base; 2225 struct txbd8 *base = tx_queue->tx_bd_base;
1578 struct sk_buff *skb; 2226 struct sk_buff *skb;
1579 int skb_dirtytx; 2227 int skb_dirtytx;
1580 int tx_ring_size = priv->tx_ring_size; 2228 int tx_ring_size = tx_queue->tx_ring_size;
1581 int frags = 0; 2229 int frags = 0;
1582 int i; 2230 int i;
1583 int howmany = 0; 2231 int howmany = 0;
1584 u32 lstatus; 2232 u32 lstatus;
1585 2233
1586 bdp = priv->dirty_tx; 2234 rx_queue = priv->rx_queue[tx_queue->qindex];
1587 skb_dirtytx = priv->skb_dirtytx; 2235 bdp = tx_queue->dirty_tx;
2236 skb_dirtytx = tx_queue->skb_dirtytx;
2237
2238 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2239 unsigned long flags;
1588 2240
1589 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1590 frags = skb_shinfo(skb)->nr_frags; 2241 frags = skb_shinfo(skb)->nr_frags;
1591 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2242 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1592 2243
@@ -1618,82 +2269,73 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1618 * If there's room in the queue (limit it to rx_buffer_size) 2269 * If there's room in the queue (limit it to rx_buffer_size)
1619 * we add this skb back into the pool, if it's the right size 2270 * we add this skb back into the pool, if it's the right size
1620 */ 2271 */
1621 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 2272 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1622 skb_recycle_check(skb, priv->rx_buffer_size + 2273 skb_recycle_check(skb, priv->rx_buffer_size +
1623 RXBUF_ALIGNMENT)) 2274 RXBUF_ALIGNMENT))
1624 __skb_queue_head(&priv->rx_recycle, skb); 2275 __skb_queue_head(&priv->rx_recycle, skb);
1625 else 2276 else
1626 dev_kfree_skb_any(skb); 2277 dev_kfree_skb_any(skb);
1627 2278
1628 priv->tx_skbuff[skb_dirtytx] = NULL; 2279 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1629 2280
1630 skb_dirtytx = (skb_dirtytx + 1) & 2281 skb_dirtytx = (skb_dirtytx + 1) &
1631 TX_RING_MOD_MASK(tx_ring_size); 2282 TX_RING_MOD_MASK(tx_ring_size);
1632 2283
1633 howmany++; 2284 howmany++;
1634 priv->num_txbdfree += frags + 1; 2285 spin_lock_irqsave(&tx_queue->txlock, flags);
2286 tx_queue->num_txbdfree += frags + 1;
2287 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1635 } 2288 }
1636 2289
1637 /* If we freed a buffer, we can restart transmission, if necessary */ 2290 /* If we freed a buffer, we can restart transmission, if necessary */
1638 if (netif_queue_stopped(dev) && priv->num_txbdfree) 2291 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
1639 netif_wake_queue(dev); 2292 netif_wake_subqueue(dev, tx_queue->qindex);
1640 2293
1641 /* Update dirty indicators */ 2294 /* Update dirty indicators */
1642 priv->skb_dirtytx = skb_dirtytx; 2295 tx_queue->skb_dirtytx = skb_dirtytx;
1643 priv->dirty_tx = bdp; 2296 tx_queue->dirty_tx = bdp;
1644 2297
1645 dev->stats.tx_packets += howmany; 2298 dev->stats.tx_packets += howmany;
1646 2299
1647 return howmany; 2300 return howmany;
1648} 2301}
1649 2302
1650static void gfar_schedule_cleanup(struct net_device *dev) 2303static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1651{ 2304{
1652 struct gfar_private *priv = netdev_priv(dev);
1653 unsigned long flags; 2305 unsigned long flags;
1654 2306
1655 spin_lock_irqsave(&priv->txlock, flags); 2307 spin_lock_irqsave(&gfargrp->grplock, flags);
1656 spin_lock(&priv->rxlock); 2308 if (napi_schedule_prep(&gfargrp->napi)) {
1657 2309 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1658 if (napi_schedule_prep(&priv->napi)) { 2310 __napi_schedule(&gfargrp->napi);
1659 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1660 __napi_schedule(&priv->napi);
1661 } else { 2311 } else {
1662 /* 2312 /*
1663 * Clear IEVENT, so interrupts aren't called again 2313 * Clear IEVENT, so interrupts aren't called again
1664 * because of the packets that have already arrived. 2314 * because of the packets that have already arrived.
1665 */ 2315 */
1666 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2316 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1667 } 2317 }
2318 spin_unlock_irqrestore(&gfargrp->grplock, flags);
1668 2319
1669 spin_unlock(&priv->rxlock);
1670 spin_unlock_irqrestore(&priv->txlock, flags);
1671} 2320}
1672 2321
1673/* Interrupt Handler for Transmit complete */ 2322/* Interrupt Handler for Transmit complete */
1674static irqreturn_t gfar_transmit(int irq, void *dev_id) 2323static irqreturn_t gfar_transmit(int irq, void *grp_id)
1675{ 2324{
1676 gfar_schedule_cleanup((struct net_device *)dev_id); 2325 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1677 return IRQ_HANDLED; 2326 return IRQ_HANDLED;
1678} 2327}
1679 2328
1680static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 2329static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1681 struct sk_buff *skb) 2330 struct sk_buff *skb)
1682{ 2331{
2332 struct net_device *dev = rx_queue->dev;
1683 struct gfar_private *priv = netdev_priv(dev); 2333 struct gfar_private *priv = netdev_priv(dev);
1684 u32 lstatus; 2334 dma_addr_t buf;
1685
1686 bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1687 priv->rx_buffer_size, DMA_FROM_DEVICE);
1688
1689 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
1690
1691 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1692 lstatus |= BD_LFLAG(RXBD_WRAP);
1693
1694 eieio();
1695 2335
1696 bdp->lstatus = lstatus; 2336 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2337 priv->rx_buffer_size, DMA_FROM_DEVICE);
2338 gfar_init_rxbdp(rx_queue, bdp, buf);
1697} 2339}
1698 2340
1699 2341
@@ -1760,9 +2402,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
1760 } 2402 }
1761} 2403}
1762 2404
1763irqreturn_t gfar_receive(int irq, void *dev_id) 2405irqreturn_t gfar_receive(int irq, void *grp_id)
1764{ 2406{
1765 gfar_schedule_cleanup((struct net_device *)dev_id); 2407 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1766 return IRQ_HANDLED; 2408 return IRQ_HANDLED;
1767} 2409}
1768 2410
@@ -1792,6 +2434,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1792 fcb = (struct rxfcb *)skb->data; 2434 fcb = (struct rxfcb *)skb->data;
1793 2435
1794 /* Remove the FCB from the skb */ 2436 /* Remove the FCB from the skb */
2437 skb_set_queue_mapping(skb, fcb->rq);
1795 /* Remove the padded bytes, if there are any */ 2438 /* Remove the padded bytes, if there are any */
1796 if (amount_pull) 2439 if (amount_pull)
1797 skb_pull(skb, amount_pull); 2440 skb_pull(skb, amount_pull);
@@ -1818,8 +2461,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1818 * until the budget/quota has been reached. Returns the number 2461 * until the budget/quota has been reached. Returns the number
1819 * of frames handled 2462 * of frames handled
1820 */ 2463 */
1821int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 2464int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1822{ 2465{
2466 struct net_device *dev = rx_queue->dev;
1823 struct rxbd8 *bdp, *base; 2467 struct rxbd8 *bdp, *base;
1824 struct sk_buff *skb; 2468 struct sk_buff *skb;
1825 int pkt_len; 2469 int pkt_len;
@@ -1828,8 +2472,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1828 struct gfar_private *priv = netdev_priv(dev); 2472 struct gfar_private *priv = netdev_priv(dev);
1829 2473
1830 /* Get the first full descriptor */ 2474 /* Get the first full descriptor */
1831 bdp = priv->cur_rx; 2475 bdp = rx_queue->cur_rx;
1832 base = priv->rx_bd_base; 2476 base = rx_queue->rx_bd_base;
1833 2477
1834 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2478 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1835 priv->padding; 2479 priv->padding;
@@ -1841,7 +2485,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1841 /* Add another skb for the future */ 2485 /* Add another skb for the future */
1842 newskb = gfar_new_skb(dev); 2486 newskb = gfar_new_skb(dev);
1843 2487
1844 skb = priv->rx_skbuff[priv->skb_currx]; 2488 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1845 2489
1846 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2490 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1847 priv->rx_buffer_size, DMA_FROM_DEVICE); 2491 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1875,8 +2519,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1875 skb_put(skb, pkt_len); 2519 skb_put(skb, pkt_len);
1876 dev->stats.rx_bytes += pkt_len; 2520 dev->stats.rx_bytes += pkt_len;
1877 2521
1878 if (in_irq() || irqs_disabled())
1879 printk("Interrupt problem!\n");
1880 gfar_process_frame(dev, skb, amount_pull); 2522 gfar_process_frame(dev, skb, amount_pull);
1881 2523
1882 } else { 2524 } else {
@@ -1889,46 +2531,70 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1889 2531
1890 } 2532 }
1891 2533
1892 priv->rx_skbuff[priv->skb_currx] = newskb; 2534 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1893 2535
1894 /* Setup the new bdp */ 2536 /* Setup the new bdp */
1895 gfar_new_rxbdp(dev, bdp, newskb); 2537 gfar_new_rxbdp(rx_queue, bdp, newskb);
1896 2538
1897 /* Update to the next pointer */ 2539 /* Update to the next pointer */
1898 bdp = next_bd(bdp, base, priv->rx_ring_size); 2540 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1899 2541
1900 /* update to point at the next skb */ 2542 /* update to point at the next skb */
1901 priv->skb_currx = 2543 rx_queue->skb_currx =
1902 (priv->skb_currx + 1) & 2544 (rx_queue->skb_currx + 1) &
1903 RX_RING_MOD_MASK(priv->rx_ring_size); 2545 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1904 } 2546 }
1905 2547
1906 /* Update the current rxbd pointer to be the next one */ 2548 /* Update the current rxbd pointer to be the next one */
1907 priv->cur_rx = bdp; 2549 rx_queue->cur_rx = bdp;
1908 2550
1909 return howmany; 2551 return howmany;
1910} 2552}
1911 2553
1912static int gfar_poll(struct napi_struct *napi, int budget) 2554static int gfar_poll(struct napi_struct *napi, int budget)
1913{ 2555{
1914 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2556 struct gfar_priv_grp *gfargrp = container_of(napi,
1915 struct net_device *dev = priv->ndev; 2557 struct gfar_priv_grp, napi);
1916 int tx_cleaned = 0; 2558 struct gfar_private *priv = gfargrp->priv;
1917 int rx_cleaned = 0; 2559 struct gfar __iomem *regs = gfargrp->regs;
1918 unsigned long flags; 2560 struct gfar_priv_tx_q *tx_queue = NULL;
2561 struct gfar_priv_rx_q *rx_queue = NULL;
2562 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2563 int tx_cleaned = 0, i, left_over_budget = budget;
2564 unsigned long serviced_queues = 0;
2565 int num_queues = 0;
2566
2567 num_queues = gfargrp->num_rx_queues;
2568 budget_per_queue = budget/num_queues;
1919 2569
1920 /* Clear IEVENT, so interrupts aren't called again 2570 /* Clear IEVENT, so interrupts aren't called again
1921 * because of the packets that have already arrived */ 2571 * because of the packets that have already arrived */
1922 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2572 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
1923 2573
1924 /* If we fail to get the lock, don't bother with the TX BDs */ 2574 while (num_queues && left_over_budget) {
1925 if (spin_trylock_irqsave(&priv->txlock, flags)) { 2575
1926 tx_cleaned = gfar_clean_tx_ring(dev); 2576 budget_per_queue = left_over_budget/num_queues;
1927 spin_unlock_irqrestore(&priv->txlock, flags); 2577 left_over_budget = 0;
2578
2579 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2580 if (test_bit(i, &serviced_queues))
2581 continue;
2582 rx_queue = priv->rx_queue[i];
2583 tx_queue = priv->tx_queue[rx_queue->qindex];
2584
2585 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2586 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2587 budget_per_queue);
2588 rx_cleaned += rx_cleaned_per_queue;
2589 if(rx_cleaned_per_queue < budget_per_queue) {
2590 left_over_budget = left_over_budget +
2591 (budget_per_queue - rx_cleaned_per_queue);
2592 set_bit(i, &serviced_queues);
2593 num_queues--;
2594 }
2595 }
1928 } 2596 }
1929 2597
1930 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1931
1932 if (tx_cleaned) 2598 if (tx_cleaned)
1933 return budget; 2599 return budget;
1934 2600
@@ -1936,20 +2602,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 2602 napi_complete(napi);
1937 2603
1938 /* Clear the halt bit in RSTAT */ 2604 /* Clear the halt bit in RSTAT */
1939 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2605 gfar_write(&regs->rstat, gfargrp->rstat);
1940 2606
1941 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 2607 gfar_write(&regs->imask, IMASK_DEFAULT);
1942 2608
1943 /* If we are coalescing interrupts, update the timer */ 2609 /* If we are coalescing interrupts, update the timer */
1944 /* Otherwise, clear it */ 2610 /* Otherwise, clear it */
1945 if (likely(priv->rxcoalescing)) { 2611 gfar_configure_coalescing(priv,
1946 gfar_write(&priv->regs->rxic, 0); 2612 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
1947 gfar_write(&priv->regs->rxic, priv->rxic);
1948 }
1949 if (likely(priv->txcoalescing)) {
1950 gfar_write(&priv->regs->txic, 0);
1951 gfar_write(&priv->regs->txic, priv->txic);
1952 }
1953 } 2613 }
1954 2614
1955 return rx_cleaned; 2615 return rx_cleaned;
@@ -1964,44 +2624,49 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1964static void gfar_netpoll(struct net_device *dev) 2624static void gfar_netpoll(struct net_device *dev)
1965{ 2625{
1966 struct gfar_private *priv = netdev_priv(dev); 2626 struct gfar_private *priv = netdev_priv(dev);
2627 int i = 0;
1967 2628
1968 /* If the device has multiple interrupts, run tx/rx */ 2629 /* If the device has multiple interrupts, run tx/rx */
1969 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2630 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1970 disable_irq(priv->interruptTransmit); 2631 for (i = 0; i < priv->num_grps; i++) {
1971 disable_irq(priv->interruptReceive); 2632 disable_irq(priv->gfargrp[i].interruptTransmit);
1972 disable_irq(priv->interruptError); 2633 disable_irq(priv->gfargrp[i].interruptReceive);
1973 gfar_interrupt(priv->interruptTransmit, dev); 2634 disable_irq(priv->gfargrp[i].interruptError);
1974 enable_irq(priv->interruptError); 2635 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
1975 enable_irq(priv->interruptReceive); 2636 &priv->gfargrp[i]);
1976 enable_irq(priv->interruptTransmit); 2637 enable_irq(priv->gfargrp[i].interruptError);
2638 enable_irq(priv->gfargrp[i].interruptReceive);
2639 enable_irq(priv->gfargrp[i].interruptTransmit);
2640 }
1977 } else { 2641 } else {
1978 disable_irq(priv->interruptTransmit); 2642 for (i = 0; i < priv->num_grps; i++) {
1979 gfar_interrupt(priv->interruptTransmit, dev); 2643 disable_irq(priv->gfargrp[i].interruptTransmit);
1980 enable_irq(priv->interruptTransmit); 2644 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2645 &priv->gfargrp[i]);
2646 enable_irq(priv->gfargrp[i].interruptTransmit);
1981 } 2647 }
1982} 2648}
1983#endif 2649#endif
1984 2650
1985/* The interrupt handler for devices with one interrupt */ 2651/* The interrupt handler for devices with one interrupt */
1986static irqreturn_t gfar_interrupt(int irq, void *dev_id) 2652static irqreturn_t gfar_interrupt(int irq, void *grp_id)
1987{ 2653{
1988 struct net_device *dev = dev_id; 2654 struct gfar_priv_grp *gfargrp = grp_id;
1989 struct gfar_private *priv = netdev_priv(dev);
1990 2655
1991 /* Save ievent for future reference */ 2656 /* Save ievent for future reference */
1992 u32 events = gfar_read(&priv->regs->ievent); 2657 u32 events = gfar_read(&gfargrp->regs->ievent);
1993 2658
1994 /* Check for reception */ 2659 /* Check for reception */
1995 if (events & IEVENT_RX_MASK) 2660 if (events & IEVENT_RX_MASK)
1996 gfar_receive(irq, dev_id); 2661 gfar_receive(irq, grp_id);
1997 2662
1998 /* Check for transmit completion */ 2663 /* Check for transmit completion */
1999 if (events & IEVENT_TX_MASK) 2664 if (events & IEVENT_TX_MASK)
2000 gfar_transmit(irq, dev_id); 2665 gfar_transmit(irq, grp_id);
2001 2666
2002 /* Check for errors */ 2667 /* Check for errors */
2003 if (events & IEVENT_ERR_MASK) 2668 if (events & IEVENT_ERR_MASK)
2004 gfar_error(irq, dev_id); 2669 gfar_error(irq, grp_id);
2005 2670
2006 return IRQ_HANDLED; 2671 return IRQ_HANDLED;
2007} 2672}
@@ -2015,12 +2680,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2015static void adjust_link(struct net_device *dev) 2680static void adjust_link(struct net_device *dev)
2016{ 2681{
2017 struct gfar_private *priv = netdev_priv(dev); 2682 struct gfar_private *priv = netdev_priv(dev);
2018 struct gfar __iomem *regs = priv->regs; 2683 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2019 unsigned long flags; 2684 unsigned long flags;
2020 struct phy_device *phydev = priv->phydev; 2685 struct phy_device *phydev = priv->phydev;
2021 int new_state = 0; 2686 int new_state = 0;
2022 2687
2023 spin_lock_irqsave(&priv->txlock, flags); 2688 local_irq_save(flags);
2689 lock_tx_qs(priv);
2690
2024 if (phydev->link) { 2691 if (phydev->link) {
2025 u32 tempval = gfar_read(&regs->maccfg2); 2692 u32 tempval = gfar_read(&regs->maccfg2);
2026 u32 ecntrl = gfar_read(&regs->ecntrl); 2693 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2085,8 +2752,8 @@ static void adjust_link(struct net_device *dev)
2085 2752
2086 if (new_state && netif_msg_link(priv)) 2753 if (new_state && netif_msg_link(priv))
2087 phy_print_status(phydev); 2754 phy_print_status(phydev);
2088 2755 unlock_tx_qs(priv);
2089 spin_unlock_irqrestore(&priv->txlock, flags); 2756 local_irq_restore(flags);
2090} 2757}
2091 2758
2092/* Update the hash table based on the current list of multicast 2759/* Update the hash table based on the current list of multicast
@@ -2097,10 +2764,10 @@ static void gfar_set_multi(struct net_device *dev)
2097{ 2764{
2098 struct dev_mc_list *mc_ptr; 2765 struct dev_mc_list *mc_ptr;
2099 struct gfar_private *priv = netdev_priv(dev); 2766 struct gfar_private *priv = netdev_priv(dev);
2100 struct gfar __iomem *regs = priv->regs; 2767 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2101 u32 tempval; 2768 u32 tempval;
2102 2769
2103 if(dev->flags & IFF_PROMISC) { 2770 if (dev->flags & IFF_PROMISC) {
2104 /* Set RCTRL to PROM */ 2771 /* Set RCTRL to PROM */
2105 tempval = gfar_read(&regs->rctrl); 2772 tempval = gfar_read(&regs->rctrl);
2106 tempval |= RCTRL_PROM; 2773 tempval |= RCTRL_PROM;
@@ -2112,7 +2779,7 @@ static void gfar_set_multi(struct net_device *dev)
2112 gfar_write(&regs->rctrl, tempval); 2779 gfar_write(&regs->rctrl, tempval);
2113 } 2780 }
2114 2781
2115 if(dev->flags & IFF_ALLMULTI) { 2782 if (dev->flags & IFF_ALLMULTI) {
2116 /* Set the hash to rx all multicast frames */ 2783 /* Set the hash to rx all multicast frames */
2117 gfar_write(&regs->igaddr0, 0xffffffff); 2784 gfar_write(&regs->igaddr0, 0xffffffff);
2118 gfar_write(&regs->igaddr1, 0xffffffff); 2785 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2164,7 +2831,7 @@ static void gfar_set_multi(struct net_device *dev)
2164 em_num = 0; 2831 em_num = 0;
2165 } 2832 }
2166 2833
2167 if(dev->mc_count == 0) 2834 if (dev->mc_count == 0)
2168 return; 2835 return;
2169 2836
2170 /* Parse the list, and set the appropriate bits */ 2837 /* Parse the list, and set the appropriate bits */
@@ -2230,10 +2897,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2230static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2897static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2231{ 2898{
2232 struct gfar_private *priv = netdev_priv(dev); 2899 struct gfar_private *priv = netdev_priv(dev);
2900 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2233 int idx; 2901 int idx;
2234 char tmpbuf[MAC_ADDR_LEN]; 2902 char tmpbuf[MAC_ADDR_LEN];
2235 u32 tempval; 2903 u32 tempval;
2236 u32 __iomem *macptr = &priv->regs->macstnaddr1; 2904 u32 __iomem *macptr = &regs->macstnaddr1;
2237 2905
2238 macptr += num*2; 2906 macptr += num*2;
2239 2907
@@ -2250,16 +2918,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2250} 2918}
2251 2919
2252/* GFAR error interrupt handler */ 2920/* GFAR error interrupt handler */
2253static irqreturn_t gfar_error(int irq, void *dev_id) 2921static irqreturn_t gfar_error(int irq, void *grp_id)
2254{ 2922{
2255 struct net_device *dev = dev_id; 2923 struct gfar_priv_grp *gfargrp = grp_id;
2256 struct gfar_private *priv = netdev_priv(dev); 2924 struct gfar __iomem *regs = gfargrp->regs;
2925 struct gfar_private *priv= gfargrp->priv;
2926 struct net_device *dev = priv->ndev;
2257 2927
2258 /* Save ievent for future reference */ 2928 /* Save ievent for future reference */
2259 u32 events = gfar_read(&priv->regs->ievent); 2929 u32 events = gfar_read(&regs->ievent);
2260 2930
2261 /* Clear IEVENT */ 2931 /* Clear IEVENT */
2262 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2932 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2263 2933
2264 /* Magic Packet is not an error. */ 2934 /* Magic Packet is not an error. */
2265 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2935 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2269,7 +2939,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2269 /* Hmm... */ 2939 /* Hmm... */
2270 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2940 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2271 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2941 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2272 dev->name, events, gfar_read(&priv->regs->imask)); 2942 dev->name, events, gfar_read(&regs->imask));
2273 2943
2274 /* Update the error counters */ 2944 /* Update the error counters */
2275 if (events & IEVENT_TXE) { 2945 if (events & IEVENT_TXE) {
@@ -2280,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2280 if (events & IEVENT_CRL) 2950 if (events & IEVENT_CRL)
2281 dev->stats.tx_aborted_errors++; 2951 dev->stats.tx_aborted_errors++;
2282 if (events & IEVENT_XFUN) { 2952 if (events & IEVENT_XFUN) {
2953 unsigned long flags;
2954
2283 if (netif_msg_tx_err(priv)) 2955 if (netif_msg_tx_err(priv))
2284 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2956 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2285 "packet dropped.\n", dev->name); 2957 "packet dropped.\n", dev->name);
2286 dev->stats.tx_dropped++; 2958 dev->stats.tx_dropped++;
2287 priv->extra_stats.tx_underrun++; 2959 priv->extra_stats.tx_underrun++;
2288 2960
2961 local_irq_save(flags);
2962 lock_tx_qs(priv);
2963
2289 /* Reactivate the Tx Queues */ 2964 /* Reactivate the Tx Queues */
2290 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2965 gfar_write(&regs->tstat, gfargrp->tstat);
2966
2967 unlock_tx_qs(priv);
2968 local_irq_restore(flags);
2291 } 2969 }
2292 if (netif_msg_tx_err(priv)) 2970 if (netif_msg_tx_err(priv))
2293 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2971 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2296,11 +2974,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2296 dev->stats.rx_errors++; 2974 dev->stats.rx_errors++;
2297 priv->extra_stats.rx_bsy++; 2975 priv->extra_stats.rx_bsy++;
2298 2976
2299 gfar_receive(irq, dev_id); 2977 gfar_receive(irq, grp_id);
2300 2978
2301 if (netif_msg_rx_err(priv)) 2979 if (netif_msg_rx_err(priv))
2302 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 2980 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2303 dev->name, gfar_read(&priv->regs->rstat)); 2981 dev->name, gfar_read(&regs->rstat));
2304 } 2982 }
2305 if (events & IEVENT_BABR) { 2983 if (events & IEVENT_BABR) {
2306 dev->stats.rx_errors++; 2984 dev->stats.rx_errors++;
@@ -2331,6 +3009,9 @@ static struct of_device_id gfar_match[] =
2331 .type = "network", 3009 .type = "network",
2332 .compatible = "gianfar", 3010 .compatible = "gianfar",
2333 }, 3011 },
3012 {
3013 .compatible = "fsl,etsec2",
3014 },
2334 {}, 3015 {},
2335}; 3016};
2336MODULE_DEVICE_TABLE(of, gfar_match); 3017MODULE_DEVICE_TABLE(of, gfar_match);
@@ -2342,8 +3023,9 @@ static struct of_platform_driver gfar_driver = {
2342 3023
2343 .probe = gfar_probe, 3024 .probe = gfar_probe,
2344 .remove = gfar_remove, 3025 .remove = gfar_remove,
2345 .suspend = gfar_suspend, 3026 .suspend = gfar_legacy_suspend,
2346 .resume = gfar_resume, 3027 .resume = gfar_legacy_resume,
3028 .driver.pm = GFAR_PM_OPS,
2347}; 3029};
2348 3030
2349static int __init gfar_init(void) 3031static int __init gfar_init(void)
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 2cd94338b5d3..cbb451011cb5 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -74,6 +75,13 @@
74extern const char gfar_driver_name[]; 75extern const char gfar_driver_name[];
75extern const char gfar_driver_version[]; 76extern const char gfar_driver_version[];
76 77
78/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
79#define MAX_TX_QS 0x8
80#define MAX_RX_QS 0x8
81
82/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
83#define MAXGROUPS 0x2
84
77/* These need to be powers of 2 for this driver */ 85/* These need to be powers of 2 for this driver */
78#define DEFAULT_TX_RING_SIZE 256 86#define DEFAULT_TX_RING_SIZE 256
79#define DEFAULT_RX_RING_SIZE 256 87#define DEFAULT_RX_RING_SIZE 256
@@ -171,12 +179,63 @@ extern const char gfar_driver_version[];
171 179
172#define MINFLR_INIT_SETTINGS 0x00000040 180#define MINFLR_INIT_SETTINGS 0x00000040
173 181
182/* Tqueue control */
183#define TQUEUE_EN0 0x00008000
184#define TQUEUE_EN1 0x00004000
185#define TQUEUE_EN2 0x00002000
186#define TQUEUE_EN3 0x00001000
187#define TQUEUE_EN4 0x00000800
188#define TQUEUE_EN5 0x00000400
189#define TQUEUE_EN6 0x00000200
190#define TQUEUE_EN7 0x00000100
191#define TQUEUE_EN_ALL 0x0000FF00
192
193#define TR03WT_WT0_MASK 0xFF000000
194#define TR03WT_WT1_MASK 0x00FF0000
195#define TR03WT_WT2_MASK 0x0000FF00
196#define TR03WT_WT3_MASK 0x000000FF
197
198#define TR47WT_WT4_MASK 0xFF000000
199#define TR47WT_WT5_MASK 0x00FF0000
200#define TR47WT_WT6_MASK 0x0000FF00
201#define TR47WT_WT7_MASK 0x000000FF
202
203/* Rqueue control */
204#define RQUEUE_EX0 0x00800000
205#define RQUEUE_EX1 0x00400000
206#define RQUEUE_EX2 0x00200000
207#define RQUEUE_EX3 0x00100000
208#define RQUEUE_EX4 0x00080000
209#define RQUEUE_EX5 0x00040000
210#define RQUEUE_EX6 0x00020000
211#define RQUEUE_EX7 0x00010000
212#define RQUEUE_EX_ALL 0x00FF0000
213
214#define RQUEUE_EN0 0x00000080
215#define RQUEUE_EN1 0x00000040
216#define RQUEUE_EN2 0x00000020
217#define RQUEUE_EN3 0x00000010
218#define RQUEUE_EN4 0x00000008
219#define RQUEUE_EN5 0x00000004
220#define RQUEUE_EN6 0x00000002
221#define RQUEUE_EN7 0x00000001
222#define RQUEUE_EN_ALL 0x000000FF
223
174/* Init to do tx snooping for buffers and descriptors */ 224/* Init to do tx snooping for buffers and descriptors */
175#define DMACTRL_INIT_SETTINGS 0x000000c3 225#define DMACTRL_INIT_SETTINGS 0x000000c3
176#define DMACTRL_GRS 0x00000010 226#define DMACTRL_GRS 0x00000010
177#define DMACTRL_GTS 0x00000008 227#define DMACTRL_GTS 0x00000008
178 228
179#define TSTAT_CLEAR_THALT 0x80000000 229#define TSTAT_CLEAR_THALT_ALL 0xFF000000
230#define TSTAT_CLEAR_THALT 0x80000000
231#define TSTAT_CLEAR_THALT0 0x80000000
232#define TSTAT_CLEAR_THALT1 0x40000000
233#define TSTAT_CLEAR_THALT2 0x20000000
234#define TSTAT_CLEAR_THALT3 0x10000000
235#define TSTAT_CLEAR_THALT4 0x08000000
236#define TSTAT_CLEAR_THALT5 0x04000000
237#define TSTAT_CLEAR_THALT6 0x02000000
238#define TSTAT_CLEAR_THALT7 0x01000000
180 239
181/* Interrupt coalescing macros */ 240/* Interrupt coalescing macros */
182#define IC_ICEN 0x80000000 241#define IC_ICEN 0x80000000
@@ -227,6 +286,13 @@ extern const char gfar_driver_version[];
227#define TCTRL_IPCSEN 0x00004000 286#define TCTRL_IPCSEN 0x00004000
228#define TCTRL_TUCSEN 0x00002000 287#define TCTRL_TUCSEN 0x00002000
229#define TCTRL_VLINS 0x00001000 288#define TCTRL_VLINS 0x00001000
289#define TCTRL_THDF 0x00000800
290#define TCTRL_RFCPAUSE 0x00000010
291#define TCTRL_TFCPAUSE 0x00000008
292#define TCTRL_TXSCHED_MASK 0x00000006
293#define TCTRL_TXSCHED_INIT 0x00000000
294#define TCTRL_TXSCHED_PRIO 0x00000002
295#define TCTRL_TXSCHED_WRRS 0x00000004
230#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) 296#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
231 297
232#define IEVENT_INIT_CLEAR 0xffffffff 298#define IEVENT_INIT_CLEAR 0xffffffff
@@ -315,6 +381,84 @@ extern const char gfar_driver_version[];
315#define BD_LFLAG(flags) ((flags) << 16) 381#define BD_LFLAG(flags) ((flags) << 16)
316#define BD_LENGTH_MASK 0x0000ffff 382#define BD_LENGTH_MASK 0x0000ffff
317 383
384#define CLASS_CODE_UNRECOG 0x00
385#define CLASS_CODE_DUMMY1 0x01
386#define CLASS_CODE_ETHERTYPE1 0x02
387#define CLASS_CODE_ETHERTYPE2 0x03
388#define CLASS_CODE_USER_PROG1 0x04
389#define CLASS_CODE_USER_PROG2 0x05
390#define CLASS_CODE_USER_PROG3 0x06
391#define CLASS_CODE_USER_PROG4 0x07
392#define CLASS_CODE_TCP_IPV4 0x08
393#define CLASS_CODE_UDP_IPV4 0x09
394#define CLASS_CODE_AH_ESP_IPV4 0x0a
395#define CLASS_CODE_SCTP_IPV4 0x0b
396#define CLASS_CODE_TCP_IPV6 0x0c
397#define CLASS_CODE_UDP_IPV6 0x0d
398#define CLASS_CODE_AH_ESP_IPV6 0x0e
399#define CLASS_CODE_SCTP_IPV6 0x0f
400
401#define FPR_FILER_MASK 0xFFFFFFFF
402#define MAX_FILER_IDX 0xFF
403
404/* RQFCR register bits */
405#define RQFCR_GPI 0x80000000
406#define RQFCR_HASHTBL_Q 0x00000000
407#define RQFCR_HASHTBL_0 0x00020000
408#define RQFCR_HASHTBL_1 0x00040000
409#define RQFCR_HASHTBL_2 0x00060000
410#define RQFCR_HASHTBL_3 0x00080000
411#define RQFCR_HASH 0x00010000
412#define RQFCR_CLE 0x00000200
413#define RQFCR_RJE 0x00000100
414#define RQFCR_AND 0x00000080
415#define RQFCR_CMP_EXACT 0x00000000
416#define RQFCR_CMP_MATCH 0x00000020
417#define RQFCR_CMP_NOEXACT 0x00000040
418#define RQFCR_CMP_NOMATCH 0x00000060
419
420/* RQFCR PID values */
421#define RQFCR_PID_MASK 0x00000000
422#define RQFCR_PID_PARSE 0x00000001
423#define RQFCR_PID_ARB 0x00000002
424#define RQFCR_PID_DAH 0x00000003
425#define RQFCR_PID_DAL 0x00000004
426#define RQFCR_PID_SAH 0x00000005
427#define RQFCR_PID_SAL 0x00000006
428#define RQFCR_PID_ETY 0x00000007
429#define RQFCR_PID_VID 0x00000008
430#define RQFCR_PID_PRI 0x00000009
431#define RQFCR_PID_TOS 0x0000000A
432#define RQFCR_PID_L4P 0x0000000B
433#define RQFCR_PID_DIA 0x0000000C
434#define RQFCR_PID_SIA 0x0000000D
435#define RQFCR_PID_DPT 0x0000000E
436#define RQFCR_PID_SPT 0x0000000F
437
438/* RQFPR when PID is 0x0001 */
439#define RQFPR_HDR_GE_512 0x00200000
440#define RQFPR_LERR 0x00100000
441#define RQFPR_RAR 0x00080000
442#define RQFPR_RARQ 0x00040000
443#define RQFPR_AR 0x00020000
444#define RQFPR_ARQ 0x00010000
445#define RQFPR_EBC 0x00008000
446#define RQFPR_VLN 0x00004000
447#define RQFPR_CFI 0x00002000
448#define RQFPR_JUM 0x00001000
449#define RQFPR_IPF 0x00000800
450#define RQFPR_FIF 0x00000400
451#define RQFPR_IPV4 0x00000200
452#define RQFPR_IPV6 0x00000100
453#define RQFPR_ICC 0x00000080
454#define RQFPR_ICV 0x00000040
455#define RQFPR_TCP 0x00000020
456#define RQFPR_UDP 0x00000010
457#define RQFPR_TUC 0x00000008
458#define RQFPR_TUV 0x00000004
459#define RQFPR_PER 0x00000002
460#define RQFPR_EER 0x00000001
461
318/* TxBD status field bits */ 462/* TxBD status field bits */
319#define TXBD_READY 0x8000 463#define TXBD_READY 0x8000
320#define TXBD_PADCRC 0x4000 464#define TXBD_PADCRC 0x4000
@@ -503,25 +647,32 @@ struct gfar_stats {
503 647
504struct gfar { 648struct gfar {
505 u32 tsec_id; /* 0x.000 - Controller ID register */ 649 u32 tsec_id; /* 0x.000 - Controller ID register */
506 u8 res1[12]; 650 u32 tsec_id2; /* 0x.004 - Controller ID2 register */
651 u8 res1[8];
507 u32 ievent; /* 0x.010 - Interrupt Event Register */ 652 u32 ievent; /* 0x.010 - Interrupt Event Register */
508 u32 imask; /* 0x.014 - Interrupt Mask Register */ 653 u32 imask; /* 0x.014 - Interrupt Mask Register */
509 u32 edis; /* 0x.018 - Error Disabled Register */ 654 u32 edis; /* 0x.018 - Error Disabled Register */
510 u8 res2[4]; 655 u32 emapg; /* 0x.01c - Group Error mapping register */
511 u32 ecntrl; /* 0x.020 - Ethernet Control Register */ 656 u32 ecntrl; /* 0x.020 - Ethernet Control Register */
512 u32 minflr; /* 0x.024 - Minimum Frame Length Register */ 657 u32 minflr; /* 0x.024 - Minimum Frame Length Register */
513 u32 ptv; /* 0x.028 - Pause Time Value Register */ 658 u32 ptv; /* 0x.028 - Pause Time Value Register */
514 u32 dmactrl; /* 0x.02c - DMA Control Register */ 659 u32 dmactrl; /* 0x.02c - DMA Control Register */
515 u32 tbipa; /* 0x.030 - TBI PHY Address Register */ 660 u32 tbipa; /* 0x.030 - TBI PHY Address Register */
516 u8 res3[88]; 661 u8 res2[28];
662 u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold
663 register */
664 u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff
665 register */
666 u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold
667 register */
668 u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve
669 shutoff register */
670 u8 res3[44];
517 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ 671 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
518 u8 res4[8]; 672 u8 res4[8];
519 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ 673 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
520 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ 674 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
521 u8 res5[4]; 675 u8 res5[96];
522 u32 fifo_rx_pause; /* 0x.0a4 - FIFO receive pause threshold register */
523 u32 fifo_rx_alarm; /* 0x.0a8 - FIFO receive alarm threshold register */
524 u8 res6[84];
525 u32 tctrl; /* 0x.100 - Transmit Control Register */ 676 u32 tctrl; /* 0x.100 - Transmit Control Register */
526 u32 tstat; /* 0x.104 - Transmit Status Register */ 677 u32 tstat; /* 0x.104 - Transmit Status Register */
527 u32 dfvlan; /* 0x.108 - Default VLAN Control word */ 678 u32 dfvlan; /* 0x.108 - Default VLAN Control word */
@@ -572,7 +723,11 @@ struct gfar {
572 u8 res12[8]; 723 u8 res12[8];
573 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ 724 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
574 u32 rqueue; /* 0x.314 - Receive queue control register */ 725 u32 rqueue; /* 0x.314 - Receive queue control register */
575 u8 res13[24]; 726 u32 rir0; /* 0x.318 - Ring mapping register 0 */
727 u32 rir1; /* 0x.31c - Ring mapping register 1 */
728 u32 rir2; /* 0x.320 - Ring mapping register 2 */
729 u32 rir3; /* 0x.324 - Ring mapping register 3 */
730 u8 res13[8];
576 u32 rbifx; /* 0x.330 - Receive bit field extract control register */ 731 u32 rbifx; /* 0x.330 - Receive bit field extract control register */
577 u32 rqfar; /* 0x.334 - Receive queue filing table address register */ 732 u32 rqfar; /* 0x.334 - Receive queue filing table address register */
578 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */ 733 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
@@ -621,7 +776,7 @@ struct gfar {
621 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ 776 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
622 u8 res18[12]; 777 u8 res18[12];
623 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */ 778 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
624 u8 res19[4]; 779 u32 ifctrl; /* 0x.538 - Interface control register */
625 u32 ifstat; /* 0x.53c - Interface Status Register */ 780 u32 ifstat; /* 0x.53c - Interface Status Register */
626 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ 781 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
627 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ 782 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
@@ -682,8 +837,30 @@ struct gfar {
682 u8 res23c[248]; 837 u8 res23c[248];
683 u32 attr; /* 0x.bf8 - Attributes Register */ 838 u32 attr; /* 0x.bf8 - Attributes Register */
684 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ 839 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
685 u8 res24[1024]; 840 u8 res24[688];
686 841 u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
842 u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
843 u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
844 u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */
845 u8 res25[16];
846 u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */
847 u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */
848 u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */
849 u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */
850 u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */
851 u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */
852 u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */
853 u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */
854 u8 res26[32];
855 u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */
856 u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */
857 u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */
858 u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */
859 u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */
860 u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */
861 u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */
862 u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */
863 u8 res27[208];
687}; 864};
688 865
689/* Flags related to gianfar device features */ 866/* Flags related to gianfar device features */
@@ -699,6 +876,133 @@ struct gfar {
699#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 876#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
700#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 877#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
701 878
879#if (MAXGROUPS == 2)
880#define DEFAULT_MAPPING 0xAA
881#else
882#define DEFAULT_MAPPING 0xFF
883#endif
884
885#define ISRG_SHIFT_TX 0x10
886#define ISRG_SHIFT_RX 0x18
887
888/* The same driver can operate in two modes */
889/* SQ_SG_MODE: Single Queue Single Group Mode
890 * (Backward compatible mode)
891 * MQ_MG_MODE: Multi Queue Multi Group mode
892 */
893enum {
894 SQ_SG_MODE = 0,
895 MQ_MG_MODE
896};
897
898/**
899 * struct gfar_priv_tx_q - per tx queue structure
900 * @txlock: per queue tx spin lock
901 * @tx_skbuff:skb pointers
902 * @skb_curtx: to be used skb pointer
903 * @skb_dirtytx:the last used skb pointer
904 * @qindex: index of this queue
905 * @dev: back pointer to the dev structure
906 * @grp: back pointer to the group to which this queue belongs
907 * @tx_bd_base: First tx buffer descriptor
908 * @cur_tx: Next free ring entry
909 * @dirty_tx: First buffer in line to be transmitted
910 * @tx_ring_size: Tx ring size
911 * @num_txbdfree: number of free TxBds
912 * @txcoalescing: enable/disable tx coalescing
913 * @txic: transmit interrupt coalescing value
914 * @txcount: coalescing value if based on tx frame count
915 * @txtime: coalescing value if based on time
916 */
917struct gfar_priv_tx_q {
918 spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
919 struct sk_buff ** tx_skbuff;
920 /* Buffer descriptor pointers */
921 dma_addr_t tx_bd_dma_base;
922 struct txbd8 *tx_bd_base;
923 struct txbd8 *cur_tx;
924 struct txbd8 *dirty_tx;
925 struct net_device *dev;
926 struct gfar_priv_grp *grp;
927 u16 skb_curtx;
928 u16 skb_dirtytx;
929 u16 qindex;
930 unsigned int tx_ring_size;
931 unsigned int num_txbdfree;
932 /* Configuration info for the coalescing features */
933 unsigned char txcoalescing;
934 unsigned long txic;
935 unsigned short txcount;
936 unsigned short txtime;
937};
938
939/**
940 * struct gfar_priv_rx_q - per rx queue structure
941 * @rxlock: per queue rx spin lock
942 * @rx_skbuff: skb pointers
943 * @skb_currx: currently use skb pointer
944 * @rx_bd_base: First rx buffer descriptor
945 * @cur_rx: Next free rx ring entry
946 * @qindex: index of this queue
947 * @dev: back pointer to the dev structure
948 * @rx_ring_size: Rx ring size
949 * @rxcoalescing: enable/disable rx-coalescing
950 * @rxic: receive interrupt coalescing vlaue
951 */
952
953struct gfar_priv_rx_q {
954 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
955 struct sk_buff ** rx_skbuff;
956 dma_addr_t rx_bd_dma_base;
957 struct rxbd8 *rx_bd_base;
958 struct rxbd8 *cur_rx;
959 struct net_device *dev;
960 struct gfar_priv_grp *grp;
961 u16 skb_currx;
962 u16 qindex;
963 unsigned int rx_ring_size;
964 /* RX Coalescing values */
965 unsigned char rxcoalescing;
966 unsigned long rxic;
967};
968
969/**
970 * struct gfar_priv_grp - per group structure
971 * @napi: the napi poll function
972 * @priv: back pointer to the priv structure
973 * @regs: the ioremapped register space for this group
974 * @grp_id: group id for this group
975 * @interruptTransmit: The TX interrupt number for this group
976 * @interruptReceive: The RX interrupt number for this group
977 * @interruptError: The ERROR interrupt number for this group
978 * @int_name_tx: tx interrupt name for this group
979 * @int_name_rx: rx interrupt name for this group
980 * @int_name_er: er interrupt name for this group
981 */
982
983struct gfar_priv_grp {
984 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
985 struct napi_struct napi;
986 struct gfar_private *priv;
987 struct gfar __iomem *regs;
988 unsigned int grp_id;
989 unsigned long rx_bit_map;
990 unsigned long tx_bit_map;
991 unsigned long num_tx_queues;
992 unsigned long num_rx_queues;
993 unsigned int rstat;
994 unsigned int tstat;
995 unsigned int imask;
996 unsigned int ievent;
997 unsigned int interruptTransmit;
998 unsigned int interruptReceive;
999 unsigned int interruptError;
1000
1001 char int_name_tx[GFAR_INT_NAME_MAX];
1002 char int_name_rx[GFAR_INT_NAME_MAX];
1003 char int_name_er[GFAR_INT_NAME_MAX];
1004};
1005
702/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1006/* Struct stolen almost completely (and shamelessly) from the FCC enet source
703 * (Ok, that's not so true anymore, but there is a family resemblence) 1007 * (Ok, that's not so true anymore, but there is a family resemblence)
704 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1008 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -709,62 +1013,36 @@ struct gfar {
709 * the buffer descriptor determines the actual condition. 1013 * the buffer descriptor determines the actual condition.
710 */ 1014 */
711struct gfar_private { 1015struct gfar_private {
712 /* Fields controlled by TX lock */
713 spinlock_t txlock;
714 1016
715 /* Pointer to the array of skbuffs */ 1017 /* Indicates how many tx, rx queues are enabled */
716 struct sk_buff ** tx_skbuff; 1018 unsigned int num_tx_queues;
1019 unsigned int num_rx_queues;
1020 unsigned int num_grps;
1021 unsigned int mode;
717 1022
718 /* next free skb in the array */ 1023 /* The total tx and rx ring size for the enabled queues */
719 u16 skb_curtx; 1024 unsigned int total_tx_ring_size;
720 1025 unsigned int total_rx_ring_size;
721 /* First skb in line to be transmitted */
722 u16 skb_dirtytx;
723
724 /* Configuration info for the coalescing features */
725 unsigned char txcoalescing;
726 unsigned long txic;
727
728 /* Buffer descriptor pointers */
729 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
730 struct txbd8 *cur_tx; /* Next free ring entry */
731 struct txbd8 *dirty_tx; /* First buffer in line
732 to be transmitted */
733 unsigned int tx_ring_size;
734 unsigned int num_txbdfree; /* number of TxBDs free */
735
736 /* RX Locked fields */
737 spinlock_t rxlock;
738 1026
739 struct device_node *node; 1027 struct device_node *node;
740 struct net_device *ndev; 1028 struct net_device *ndev;
741 struct of_device *ofdev; 1029 struct of_device *ofdev;
742 struct napi_struct napi;
743
744 /* skb array and index */
745 struct sk_buff ** rx_skbuff;
746 u16 skb_currx;
747
748 /* RX Coalescing values */
749 unsigned char rxcoalescing;
750 unsigned long rxic;
751 1030
752 struct rxbd8 *rx_bd_base; /* First Rx buffers */ 1031 struct gfar_priv_grp gfargrp[MAXGROUPS];
753 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 1032 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
1033 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
754 1034
755 /* RX parameters */ 1035 /* RX per device parameters */
756 unsigned int rx_ring_size;
757 unsigned int rx_buffer_size; 1036 unsigned int rx_buffer_size;
758 unsigned int rx_stash_size; 1037 unsigned int rx_stash_size;
759 unsigned int rx_stash_index; 1038 unsigned int rx_stash_index;
760 1039
1040 u32 cur_filer_idx;
1041
761 struct sk_buff_head rx_recycle; 1042 struct sk_buff_head rx_recycle;
762 1043
763 struct vlan_group *vlgrp; 1044 struct vlan_group *vlgrp;
764 1045
765 /* Unprotected fields */
766 /* Pointer to the GFAR memory mapped Registers */
767 struct gfar __iomem *regs;
768 1046
769 /* Hash registers and their width */ 1047 /* Hash registers and their width */
770 u32 __iomem *hash_regs[16]; 1048 u32 __iomem *hash_regs[16];
@@ -785,13 +1063,10 @@ struct gfar_private {
785 unsigned char rx_csum_enable:1, 1063 unsigned char rx_csum_enable:1,
786 extended_hash:1, 1064 extended_hash:1,
787 bd_stash_en:1, 1065 bd_stash_en:1,
1066 rx_filer_enable:1,
788 wol_en:1; /* Wake-on-LAN enabled */ 1067 wol_en:1; /* Wake-on-LAN enabled */
789 unsigned short padding; 1068 unsigned short padding;
790 1069
791 unsigned int interruptTransmit;
792 unsigned int interruptReceive;
793 unsigned int interruptError;
794
795 /* PHY stuff */ 1070 /* PHY stuff */
796 struct phy_device *phydev; 1071 struct phy_device *phydev;
797 struct mii_bus *mii_bus; 1072 struct mii_bus *mii_bus;
@@ -803,14 +1078,13 @@ struct gfar_private {
803 1078
804 struct work_struct reset_task; 1079 struct work_struct reset_task;
805 1080
806 char int_name_tx[GFAR_INT_NAME_MAX];
807 char int_name_rx[GFAR_INT_NAME_MAX];
808 char int_name_er[GFAR_INT_NAME_MAX];
809
810 /* Network Statistics */ 1081 /* Network Statistics */
811 struct gfar_extra_stats extra_stats; 1082 struct gfar_extra_stats extra_stats;
812}; 1083};
813 1084
1085extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1086extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1087
814static inline u32 gfar_read(volatile unsigned __iomem *addr) 1088static inline u32 gfar_read(volatile unsigned __iomem *addr)
815{ 1089{
816 u32 val; 1090 u32 val;
@@ -823,12 +1097,28 @@ static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
823 out_be32(addr, val); 1097 out_be32(addr, val);
824} 1098}
825 1099
1100static inline void gfar_write_filer(struct gfar_private *priv,
1101 unsigned int far, unsigned int fcr, unsigned int fpr)
1102{
1103 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1104
1105 gfar_write(&regs->rqfar, far);
1106 gfar_write(&regs->rqfcr, fcr);
1107 gfar_write(&regs->rqfpr, fpr);
1108}
1109
1110extern void lock_rx_qs(struct gfar_private *priv);
1111extern void lock_tx_qs(struct gfar_private *priv);
1112extern void unlock_rx_qs(struct gfar_private *priv);
1113extern void unlock_tx_qs(struct gfar_private *priv);
826extern irqreturn_t gfar_receive(int irq, void *dev_id); 1114extern irqreturn_t gfar_receive(int irq, void *dev_id);
827extern int startup_gfar(struct net_device *dev); 1115extern int startup_gfar(struct net_device *dev);
828extern void stop_gfar(struct net_device *dev); 1116extern void stop_gfar(struct net_device *dev);
829extern void gfar_halt(struct net_device *dev); 1117extern void gfar_halt(struct net_device *dev);
830extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1118extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
831 int enable, u32 regnum, u32 read); 1119 int enable, u32 regnum, u32 read);
1120extern void gfar_configure_coalescing(struct gfar_private *priv,
1121 unsigned long tx_mask, unsigned long rx_mask);
832void gfar_init_sysfs(struct net_device *dev); 1122void gfar_init_sysfs(struct net_device *dev);
833 1123
834extern const struct ethtool_ops gfar_ethtool_ops; 1124extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..1010367695e4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This software may be used and distributed according to 14 * This software may be used and distributed according to
14 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
41#include "gianfar.h" 42#include "gianfar.h"
42 43
43extern void gfar_start(struct net_device *dev); 44extern void gfar_start(struct net_device *dev);
44extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 45extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
45 46
46#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
47#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +137,11 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
136{ 137{
137 int i; 138 int i;
138 struct gfar_private *priv = netdev_priv(dev); 139 struct gfar_private *priv = netdev_priv(dev);
140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
139 u64 *extra = (u64 *) & priv->extra_stats; 141 u64 *extra = (u64 *) & priv->extra_stats;
140 142
141 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 143 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon; 144 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
143 struct gfar_stats *stats = (struct gfar_stats *) buf; 145 struct gfar_stats *stats = (struct gfar_stats *) buf;
144 146
145 for (i = 0; i < GFAR_RMON_LEN; i++) 147 for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +199,18 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{ 199{
198 struct gfar_private *priv = netdev_priv(dev); 200 struct gfar_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev; 201 struct phy_device *phydev = priv->phydev;
202 struct gfar_priv_rx_q *rx_queue = NULL;
203 struct gfar_priv_tx_q *tx_queue = NULL;
200 204
201 if (NULL == phydev) 205 if (NULL == phydev)
202 return -ENODEV; 206 return -ENODEV;
207 tx_queue = priv->tx_queue[0];
208 rx_queue = priv->rx_queue[0];
203 209
204 cmd->maxtxpkt = get_icft_value(priv->txic); 210 /* etsec-1.7 and older versions have only one txic
205 cmd->maxrxpkt = get_icft_value(priv->rxic); 211 * and rxic regs although they support multiple queues */
212 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
213 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
206 214
207 return phy_ethtool_gset(phydev, cmd); 215 return phy_ethtool_gset(phydev, cmd);
208} 216}
@@ -218,7 +226,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
218{ 226{
219 int i; 227 int i;
220 struct gfar_private *priv = netdev_priv(dev); 228 struct gfar_private *priv = netdev_priv(dev);
221 u32 __iomem *theregs = (u32 __iomem *) priv->regs; 229 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
222 u32 *buf = (u32 *) regbuf; 230 u32 *buf = (u32 *) regbuf;
223 231
224 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 232 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +287,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 287static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 288{
281 struct gfar_private *priv = netdev_priv(dev); 289 struct gfar_private *priv = netdev_priv(dev);
290 struct gfar_priv_rx_q *rx_queue = NULL;
291 struct gfar_priv_tx_q *tx_queue = NULL;
282 unsigned long rxtime; 292 unsigned long rxtime;
283 unsigned long rxcount; 293 unsigned long rxcount;
284 unsigned long txtime; 294 unsigned long txtime;
@@ -290,10 +300,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
290 if (NULL == priv->phydev) 300 if (NULL == priv->phydev)
291 return -ENODEV; 301 return -ENODEV;
292 302
293 rxtime = get_ictt_value(priv->rxic); 303 rx_queue = priv->rx_queue[0];
294 rxcount = get_icft_value(priv->rxic); 304 tx_queue = priv->tx_queue[0];
295 txtime = get_ictt_value(priv->txic); 305
296 txcount = get_icft_value(priv->txic); 306 rxtime = get_ictt_value(rx_queue->rxic);
307 rxcount = get_icft_value(rx_queue->rxic);
308 txtime = get_ictt_value(tx_queue->txic);
309 txcount = get_icft_value(tx_queue->txic);
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 310 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount; 311 cvals->rx_max_coalesced_frames = rxcount;
299 312
@@ -339,16 +352,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
339static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 352static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
340{ 353{
341 struct gfar_private *priv = netdev_priv(dev); 354 struct gfar_private *priv = netdev_priv(dev);
355 int i = 0;
342 356
343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 357 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
344 return -EOPNOTSUPP; 358 return -EOPNOTSUPP;
345 359
346 /* Set up rx coalescing */ 360 /* Set up rx coalescing */
361 /* As of now, we will enable/disable coalescing for all
362 * queues together in case of eTSEC2, this will be modified
363 * along with the ethtool interface */
347 if ((cvals->rx_coalesce_usecs == 0) || 364 if ((cvals->rx_coalesce_usecs == 0) ||
348 (cvals->rx_max_coalesced_frames == 0)) 365 (cvals->rx_max_coalesced_frames == 0)) {
349 priv->rxcoalescing = 0; 366 for (i = 0; i < priv->num_rx_queues; i++)
350 else 367 priv->rx_queue[i]->rxcoalescing = 0;
351 priv->rxcoalescing = 1; 368 } else {
369 for (i = 0; i < priv->num_rx_queues; i++)
370 priv->rx_queue[i]->rxcoalescing = 1;
371 }
352 372
353 if (NULL == priv->phydev) 373 if (NULL == priv->phydev)
354 return -ENODEV; 374 return -ENODEV;
@@ -366,15 +386,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 386 return -EINVAL;
367 } 387 }
368 388
369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 389 for (i = 0; i < priv->num_rx_queues; i++) {
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 390 priv->rx_queue[i]->rxic = mk_ic_value(
391 cvals->rx_max_coalesced_frames,
392 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
393 }
371 394
372 /* Set up tx coalescing */ 395 /* Set up tx coalescing */
373 if ((cvals->tx_coalesce_usecs == 0) || 396 if ((cvals->tx_coalesce_usecs == 0) ||
374 (cvals->tx_max_coalesced_frames == 0)) 397 (cvals->tx_max_coalesced_frames == 0)) {
375 priv->txcoalescing = 0; 398 for (i = 0; i < priv->num_tx_queues; i++)
376 else 399 priv->tx_queue[i]->txcoalescing = 0;
377 priv->txcoalescing = 1; 400 } else {
401 for (i = 0; i < priv->num_tx_queues; i++)
402 priv->tx_queue[i]->txcoalescing = 1;
403 }
378 404
379 /* Check the bounds of the values */ 405 /* Check the bounds of the values */
380 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 406 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +415,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
389 return -EINVAL; 415 return -EINVAL;
390 } 416 }
391 417
392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 418 for (i = 0; i < priv->num_tx_queues; i++) {
393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 419 priv->tx_queue[i]->txic = mk_ic_value(
394 420 cvals->tx_max_coalesced_frames,
395 gfar_write(&priv->regs->rxic, 0); 421 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
396 if (priv->rxcoalescing) 422 }
397 gfar_write(&priv->regs->rxic, priv->rxic);
398 423
399 gfar_write(&priv->regs->txic, 0); 424 gfar_configure_coalescing(priv, 0xFF, 0xFF);
400 if (priv->txcoalescing)
401 gfar_write(&priv->regs->txic, priv->txic);
402 425
403 return 0; 426 return 0;
404} 427}
@@ -409,6 +432,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
409static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 432static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
410{ 433{
411 struct gfar_private *priv = netdev_priv(dev); 434 struct gfar_private *priv = netdev_priv(dev);
435 struct gfar_priv_tx_q *tx_queue = NULL;
436 struct gfar_priv_rx_q *rx_queue = NULL;
437
438 tx_queue = priv->tx_queue[0];
439 rx_queue = priv->rx_queue[0];
412 440
413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 441 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 442 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +446,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
418 /* Values changeable by the user. The valid values are 446 /* Values changeable by the user. The valid values are
419 * in the range 1 to the "*_max_pending" counterpart above. 447 * in the range 1 to the "*_max_pending" counterpart above.
420 */ 448 */
421 rvals->rx_pending = priv->rx_ring_size; 449 rvals->rx_pending = rx_queue->rx_ring_size;
422 rvals->rx_mini_pending = priv->rx_ring_size; 450 rvals->rx_mini_pending = rx_queue->rx_ring_size;
423 rvals->rx_jumbo_pending = priv->rx_ring_size; 451 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
424 rvals->tx_pending = priv->tx_ring_size; 452 rvals->tx_pending = tx_queue->tx_ring_size;
425} 453}
426 454
427/* Change the current ring parameters, stopping the controller if 455/* Change the current ring parameters, stopping the controller if
@@ -431,7 +459,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
431static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 459static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
432{ 460{
433 struct gfar_private *priv = netdev_priv(dev); 461 struct gfar_private *priv = netdev_priv(dev);
434 int err = 0; 462 int err = 0, i = 0;
435 463
436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 464 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
437 return -EINVAL; 465 return -EINVAL;
@@ -451,34 +479,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
451 return -EINVAL; 479 return -EINVAL;
452 } 480 }
453 481
482
454 if (dev->flags & IFF_UP) { 483 if (dev->flags & IFF_UP) {
455 unsigned long flags; 484 unsigned long flags;
456 485
457 /* Halt TX and RX, and process the frames which 486 /* Halt TX and RX, and process the frames which
458 * have already been received */ 487 * have already been received */
459 spin_lock_irqsave(&priv->txlock, flags); 488 local_irq_save(flags);
460 spin_lock(&priv->rxlock); 489 lock_tx_qs(priv);
490 lock_rx_qs(priv);
461 491
462 gfar_halt(dev); 492 gfar_halt(dev);
463 493
464 spin_unlock(&priv->rxlock); 494 unlock_rx_qs(priv);
465 spin_unlock_irqrestore(&priv->txlock, flags); 495 unlock_tx_qs(priv);
496 local_irq_restore(flags);
466 497
467 gfar_clean_rx_ring(dev, priv->rx_ring_size); 498 for (i = 0; i < priv->num_rx_queues; i++)
499 gfar_clean_rx_ring(priv->rx_queue[i],
500 priv->rx_queue[i]->rx_ring_size);
468 501
469 /* Now we take down the rings to rebuild them */ 502 /* Now we take down the rings to rebuild them */
470 stop_gfar(dev); 503 stop_gfar(dev);
471 } 504 }
472 505
473 /* Change the size */ 506 /* Change the size */
474 priv->rx_ring_size = rvals->rx_pending; 507 for (i = 0; i < priv->num_rx_queues; i++) {
475 priv->tx_ring_size = rvals->tx_pending; 508 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
476 priv->num_txbdfree = priv->tx_ring_size; 509 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
510 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
511 }
477 512
478 /* Rebuild the rings with the new size */ 513 /* Rebuild the rings with the new size */
479 if (dev->flags & IFF_UP) { 514 if (dev->flags & IFF_UP) {
480 err = startup_gfar(dev); 515 err = startup_gfar(dev);
481 netif_wake_queue(dev); 516 netif_tx_wake_all_queues(dev);
482 } 517 }
483 return err; 518 return err;
484} 519}
@@ -487,23 +522,28 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
487{ 522{
488 struct gfar_private *priv = netdev_priv(dev); 523 struct gfar_private *priv = netdev_priv(dev);
489 unsigned long flags; 524 unsigned long flags;
490 int err = 0; 525 int err = 0, i = 0;
491 526
492 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 527 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
493 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
494 529
530
495 if (dev->flags & IFF_UP) { 531 if (dev->flags & IFF_UP) {
496 /* Halt TX and RX, and process the frames which 532 /* Halt TX and RX, and process the frames which
497 * have already been received */ 533 * have already been received */
498 spin_lock_irqsave(&priv->txlock, flags); 534 local_irq_save(flags);
499 spin_lock(&priv->rxlock); 535 lock_tx_qs(priv);
536 lock_rx_qs(priv);
500 537
501 gfar_halt(dev); 538 gfar_halt(dev);
502 539
503 spin_unlock(&priv->rxlock); 540 unlock_tx_qs(priv);
504 spin_unlock_irqrestore(&priv->txlock, flags); 541 unlock_rx_qs(priv);
542 local_irq_save(flags);
505 543
506 gfar_clean_rx_ring(dev, priv->rx_ring_size); 544 for (i = 0; i < priv->num_rx_queues; i++)
545 gfar_clean_rx_ring(priv->rx_queue[i],
546 priv->rx_queue[i]->rx_ring_size);
507 547
508 /* Now we take down the rings to rebuild them */ 548 /* Now we take down the rings to rebuild them */
509 stop_gfar(dev); 549 stop_gfar(dev);
@@ -515,7 +555,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
515 555
516 if (dev->flags & IFF_UP) { 556 if (dev->flags & IFF_UP) {
517 err = startup_gfar(dev); 557 err = startup_gfar(dev);
518 netif_wake_queue(dev); 558 netif_tx_wake_all_queues(dev);
519 } 559 }
520 return err; 560 return err;
521} 561}
@@ -605,6 +645,241 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
605} 645}
606#endif 646#endif
607 647
648static int gfar_ethflow_to_class(int flow_type, u64 *class)
649{
650 switch (flow_type) {
651 case TCP_V4_FLOW:
652 *class = CLASS_CODE_TCP_IPV4;
653 break;
654 case UDP_V4_FLOW:
655 *class = CLASS_CODE_UDP_IPV4;
656 break;
657 case AH_V4_FLOW:
658 case ESP_V4_FLOW:
659 *class = CLASS_CODE_AH_ESP_IPV4;
660 break;
661 case SCTP_V4_FLOW:
662 *class = CLASS_CODE_SCTP_IPV4;
663 break;
664 case TCP_V6_FLOW:
665 *class = CLASS_CODE_TCP_IPV6;
666 break;
667 case UDP_V6_FLOW:
668 *class = CLASS_CODE_UDP_IPV6;
669 break;
670 case AH_V6_FLOW:
671 case ESP_V6_FLOW:
672 *class = CLASS_CODE_AH_ESP_IPV6;
673 break;
674 case SCTP_V6_FLOW:
675 *class = CLASS_CODE_SCTP_IPV6;
676 break;
677 default:
678 return 0;
679 }
680
681 return 1;
682}
683
684static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
685{
686 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
687
688 if (ethflow & RXH_L2DA) {
689 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
690 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
691 ftp_rqfpr[priv->cur_filer_idx] = fpr;
692 ftp_rqfcr[priv->cur_filer_idx] = fcr;
693 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
694 priv->cur_filer_idx = priv->cur_filer_idx - 1;
695
696 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
697 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
698 ftp_rqfpr[priv->cur_filer_idx] = fpr;
699 ftp_rqfcr[priv->cur_filer_idx] = fcr;
700 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
701 priv->cur_filer_idx = priv->cur_filer_idx - 1;
702 }
703
704 if (ethflow & RXH_VLAN) {
705 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
706 RQFCR_AND | RQFCR_HASHTBL_0;
707 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
708 ftp_rqfpr[priv->cur_filer_idx] = fpr;
709 ftp_rqfcr[priv->cur_filer_idx] = fcr;
710 priv->cur_filer_idx = priv->cur_filer_idx - 1;
711 }
712
713 if (ethflow & RXH_IP_SRC) {
714 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
715 RQFCR_AND | RQFCR_HASHTBL_0;
716 ftp_rqfpr[priv->cur_filer_idx] = fpr;
717 ftp_rqfcr[priv->cur_filer_idx] = fcr;
718 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
719 priv->cur_filer_idx = priv->cur_filer_idx - 1;
720 }
721
722 if (ethflow & (RXH_IP_DST)) {
723 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
724 RQFCR_AND | RQFCR_HASHTBL_0;
725 ftp_rqfpr[priv->cur_filer_idx] = fpr;
726 ftp_rqfcr[priv->cur_filer_idx] = fcr;
727 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
728 priv->cur_filer_idx = priv->cur_filer_idx - 1;
729 }
730
731 if (ethflow & RXH_L3_PROTO) {
732 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
733 RQFCR_AND | RQFCR_HASHTBL_0;
734 ftp_rqfpr[priv->cur_filer_idx] = fpr;
735 ftp_rqfcr[priv->cur_filer_idx] = fcr;
736 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
737 priv->cur_filer_idx = priv->cur_filer_idx - 1;
738 }
739
740 if (ethflow & RXH_L4_B_0_1) {
741 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
742 RQFCR_AND | RQFCR_HASHTBL_0;
743 ftp_rqfpr[priv->cur_filer_idx] = fpr;
744 ftp_rqfcr[priv->cur_filer_idx] = fcr;
745 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
746 priv->cur_filer_idx = priv->cur_filer_idx - 1;
747 }
748
749 if (ethflow & RXH_L4_B_2_3) {
750 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
751 RQFCR_AND | RQFCR_HASHTBL_0;
752 ftp_rqfpr[priv->cur_filer_idx] = fpr;
753 ftp_rqfcr[priv->cur_filer_idx] = fcr;
754 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
755 priv->cur_filer_idx = priv->cur_filer_idx - 1;
756 }
757}
758
759static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
760{
761 unsigned int last_rule_idx = priv->cur_filer_idx;
762 unsigned int cmp_rqfpr;
763 unsigned int local_rqfpr[MAX_FILER_IDX + 1];
764 unsigned int local_rqfcr[MAX_FILER_IDX + 1];
765 int i = 0x0, k = 0x0;
766 int j = MAX_FILER_IDX, l = 0x0;
767
768 switch (class) {
769 case TCP_V4_FLOW:
770 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
771 break;
772 case UDP_V4_FLOW:
773 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
774 break;
775 case TCP_V6_FLOW:
776 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
777 break;
778 case UDP_V6_FLOW:
779 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
780 break;
781 case IPV4_FLOW:
782 cmp_rqfpr = RQFPR_IPV4;
783 case IPV6_FLOW:
784 cmp_rqfpr = RQFPR_IPV6;
785 break;
786 default:
787 printk(KERN_ERR "Right now this class is not supported\n");
788 return 0;
789 }
790
791 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
792 local_rqfpr[j] = ftp_rqfpr[i];
793 local_rqfcr[j] = ftp_rqfcr[i];
794 j--;
795 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
796 RQFCR_CLE |RQFCR_AND)) &&
797 (ftp_rqfpr[i] == cmp_rqfpr))
798 break;
799 }
800
801 if (i == MAX_FILER_IDX + 1) {
802 printk(KERN_ERR "No parse rule found, ");
803 printk(KERN_ERR "can't create hash rules\n");
804 return 0;
805 }
806
807 /* If a match was found, then it begins the starting of a cluster rule
808 * if it was already programmed, we need to overwrite these rules
809 */
810 for (l = i+1; l < MAX_FILER_IDX; l++) {
811 if ((ftp_rqfcr[l] & RQFCR_CLE) &&
812 !(ftp_rqfcr[l] & RQFCR_AND)) {
813 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
814 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
815 ftp_rqfpr[l] = FPR_FILER_MASK;
816 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
817 break;
818 }
819
820 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
821 continue;
822 else {
823 local_rqfpr[j] = ftp_rqfpr[l];
824 local_rqfcr[j] = ftp_rqfcr[l];
825 j--;
826 }
827 }
828
829 priv->cur_filer_idx = l - 1;
830 last_rule_idx = l;
831
832 /* hash rules */
833 ethflow_to_filer_rules(priv, ethflow);
834
835 /* Write back the popped out rules again */
836 for (k = j+1; k < MAX_FILER_IDX; k++) {
837 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
838 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
839 gfar_write_filer(priv, priv->cur_filer_idx,
840 local_rqfcr[k], local_rqfpr[k]);
841 if (!priv->cur_filer_idx)
842 break;
843 priv->cur_filer_idx = priv->cur_filer_idx - 1;
844 }
845
846 return 1;
847}
848
849static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
850{
851 u64 class;
852
853 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
854 return -EINVAL;
855
856 if (class < CLASS_CODE_USER_PROG1 ||
857 class > CLASS_CODE_SCTP_IPV6)
858 return -EINVAL;
859
860 /* write the filer rules here */
861 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
862 return -1;
863
864 return 0;
865}
866
867static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
868{
869 struct gfar_private *priv = netdev_priv(dev);
870 int ret = 0;
871
872 switch(cmd->cmd) {
873 case ETHTOOL_SRXFH:
874 ret = gfar_set_hash_opts(priv, cmd);
875 break;
876 default:
877 ret = -EINVAL;
878 }
879
880 return ret;
881}
882
608const struct ethtool_ops gfar_ethtool_ops = { 883const struct ethtool_ops gfar_ethtool_ops = {
609 .get_settings = gfar_gsettings, 884 .get_settings = gfar_gsettings,
610 .set_settings = gfar_ssettings, 885 .set_settings = gfar_ssettings,
@@ -630,4 +905,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
630 .get_wol = gfar_get_wol, 905 .get_wol = gfar_get_wol,
631 .set_wol = gfar_set_wol, 906 .set_wol = gfar_set_wol,
632#endif 907#endif
908 .set_rxnfc = gfar_set_nfc,
633}; 909};
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index dd26da74f27a..b31c9c8876e6 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -8,8 +8,9 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (galak@kernel.crashing.org) 10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
49 const char *buf, size_t count) 50 const char *buf, size_t count)
50{ 51{
51 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 52 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
53 struct gfar __iomem *regs = priv->gfargrp[0].regs;
52 int new_setting = 0; 54 int new_setting = 0;
53 u32 temp; 55 u32 temp;
54 unsigned long flags; 56 unsigned long flags;
@@ -56,6 +58,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) 58 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count; 59 return count;
58 60
61
59 /* Find out the new setting */ 62 /* Find out the new setting */
60 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 63 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
61 new_setting = 1; 64 new_setting = 1;
@@ -65,21 +68,24 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
65 else 68 else
66 return count; 69 return count;
67 70
68 spin_lock_irqsave(&priv->rxlock, flags); 71
72 local_irq_save(flags);
73 lock_rx_qs(priv);
69 74
70 /* Set the new stashing value */ 75 /* Set the new stashing value */
71 priv->bd_stash_en = new_setting; 76 priv->bd_stash_en = new_setting;
72 77
73 temp = gfar_read(&priv->regs->attr); 78 temp = gfar_read(&regs->attr);
74 79
75 if (new_setting) 80 if (new_setting)
76 temp |= ATTR_BDSTASH; 81 temp |= ATTR_BDSTASH;
77 else 82 else
78 temp &= ~(ATTR_BDSTASH); 83 temp &= ~(ATTR_BDSTASH);
79 84
80 gfar_write(&priv->regs->attr, temp); 85 gfar_write(&regs->attr, temp);
81 86
82 spin_unlock_irqrestore(&priv->rxlock, flags); 87 unlock_rx_qs(priv);
88 local_irq_restore(flags);
83 89
84 return count; 90 return count;
85} 91}
@@ -99,6 +105,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
99 const char *buf, size_t count) 105 const char *buf, size_t count)
100{ 106{
101 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 107 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
108 struct gfar __iomem *regs = priv->gfargrp[0].regs;
102 unsigned int length = simple_strtoul(buf, NULL, 0); 109 unsigned int length = simple_strtoul(buf, NULL, 0);
103 u32 temp; 110 u32 temp;
104 unsigned long flags; 111 unsigned long flags;
@@ -106,7 +113,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
106 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 113 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
107 return count; 114 return count;
108 115
109 spin_lock_irqsave(&priv->rxlock, flags); 116 local_irq_save(flags);
117 lock_rx_qs(priv);
118
110 if (length > priv->rx_buffer_size) 119 if (length > priv->rx_buffer_size)
111 goto out; 120 goto out;
112 121
@@ -115,23 +124,24 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
115 124
116 priv->rx_stash_size = length; 125 priv->rx_stash_size = length;
117 126
118 temp = gfar_read(&priv->regs->attreli); 127 temp = gfar_read(&regs->attreli);
119 temp &= ~ATTRELI_EL_MASK; 128 temp &= ~ATTRELI_EL_MASK;
120 temp |= ATTRELI_EL(length); 129 temp |= ATTRELI_EL(length);
121 gfar_write(&priv->regs->attreli, temp); 130 gfar_write(&regs->attreli, temp);
122 131
123 /* Turn stashing on/off as appropriate */ 132 /* Turn stashing on/off as appropriate */
124 temp = gfar_read(&priv->regs->attr); 133 temp = gfar_read(&regs->attr);
125 134
126 if (length) 135 if (length)
127 temp |= ATTR_BUFSTASH; 136 temp |= ATTR_BUFSTASH;
128 else 137 else
129 temp &= ~(ATTR_BUFSTASH); 138 temp &= ~(ATTR_BUFSTASH);
130 139
131 gfar_write(&priv->regs->attr, temp); 140 gfar_write(&regs->attr, temp);
132 141
133out: 142out:
134 spin_unlock_irqrestore(&priv->rxlock, flags); 143 unlock_rx_qs(priv);
144 local_irq_restore(flags);
135 145
136 return count; 146 return count;
137} 147}
@@ -154,6 +164,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
154 const char *buf, size_t count) 164 const char *buf, size_t count)
155{ 165{
156 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 166 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
167 struct gfar __iomem *regs = priv->gfargrp[0].regs;
157 unsigned short index = simple_strtoul(buf, NULL, 0); 168 unsigned short index = simple_strtoul(buf, NULL, 0);
158 u32 temp; 169 u32 temp;
159 unsigned long flags; 170 unsigned long flags;
@@ -161,7 +172,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
161 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 172 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
162 return count; 173 return count;
163 174
164 spin_lock_irqsave(&priv->rxlock, flags); 175 local_irq_save(flags);
176 lock_rx_qs(priv);
177
165 if (index > priv->rx_stash_size) 178 if (index > priv->rx_stash_size)
166 goto out; 179 goto out;
167 180
@@ -170,13 +183,14 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
170 183
171 priv->rx_stash_index = index; 184 priv->rx_stash_index = index;
172 185
173 temp = gfar_read(&priv->regs->attreli); 186 temp = gfar_read(&regs->attreli);
174 temp &= ~ATTRELI_EI_MASK; 187 temp &= ~ATTRELI_EI_MASK;
175 temp |= ATTRELI_EI(index); 188 temp |= ATTRELI_EI(index);
176 gfar_write(&priv->regs->attreli, flags); 189 gfar_write(&regs->attreli, temp);
177 190
178out: 191out:
179 spin_unlock_irqrestore(&priv->rxlock, flags); 192 unlock_rx_qs(priv);
193 local_irq_restore(flags);
180 194
181 return count; 195 return count;
182} 196}
@@ -198,6 +212,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
198 const char *buf, size_t count) 212 const char *buf, size_t count)
199{ 213{
200 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 214 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
215 struct gfar __iomem *regs = priv->gfargrp[0].regs;
201 unsigned int length = simple_strtoul(buf, NULL, 0); 216 unsigned int length = simple_strtoul(buf, NULL, 0);
202 u32 temp; 217 u32 temp;
203 unsigned long flags; 218 unsigned long flags;
@@ -205,16 +220,18 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 220 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 221 return count;
207 222
208 spin_lock_irqsave(&priv->txlock, flags); 223 local_irq_save(flags);
224 lock_tx_qs(priv);
209 225
210 priv->fifo_threshold = length; 226 priv->fifo_threshold = length;
211 227
212 temp = gfar_read(&priv->regs->fifo_tx_thr); 228 temp = gfar_read(&regs->fifo_tx_thr);
213 temp &= ~FIFO_TX_THR_MASK; 229 temp &= ~FIFO_TX_THR_MASK;
214 temp |= length; 230 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 231 gfar_write(&regs->fifo_tx_thr, temp);
216 232
217 spin_unlock_irqrestore(&priv->txlock, flags); 233 unlock_tx_qs(priv);
234 local_irq_restore(flags);
218 235
219 return count; 236 return count;
220} 237}
@@ -235,6 +252,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
235 const char *buf, size_t count) 252 const char *buf, size_t count)
236{ 253{
237 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 254 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
255 struct gfar __iomem *regs = priv->gfargrp[0].regs;
238 unsigned int num = simple_strtoul(buf, NULL, 0); 256 unsigned int num = simple_strtoul(buf, NULL, 0);
239 u32 temp; 257 u32 temp;
240 unsigned long flags; 258 unsigned long flags;
@@ -242,16 +260,18 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
242 if (num > GFAR_MAX_FIFO_STARVE) 260 if (num > GFAR_MAX_FIFO_STARVE)
243 return count; 261 return count;
244 262
245 spin_lock_irqsave(&priv->txlock, flags); 263 local_irq_save(flags);
264 lock_tx_qs(priv);
246 265
247 priv->fifo_starve = num; 266 priv->fifo_starve = num;
248 267
249 temp = gfar_read(&priv->regs->fifo_tx_starve); 268 temp = gfar_read(&regs->fifo_tx_starve);
250 temp &= ~FIFO_TX_STARVE_MASK; 269 temp &= ~FIFO_TX_STARVE_MASK;
251 temp |= num; 270 temp |= num;
252 gfar_write(&priv->regs->fifo_tx_starve, temp); 271 gfar_write(&regs->fifo_tx_starve, temp);
253 272
254 spin_unlock_irqrestore(&priv->txlock, flags); 273 unlock_tx_qs(priv);
274 local_irq_restore(flags);
255 275
256 return count; 276 return count;
257} 277}
@@ -273,6 +293,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
273 const char *buf, size_t count) 293 const char *buf, size_t count)
274{ 294{
275 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 295 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
296 struct gfar __iomem *regs = priv->gfargrp[0].regs;
276 unsigned int num = simple_strtoul(buf, NULL, 0); 297 unsigned int num = simple_strtoul(buf, NULL, 0);
277 u32 temp; 298 u32 temp;
278 unsigned long flags; 299 unsigned long flags;
@@ -280,16 +301,18 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
280 if (num > GFAR_MAX_FIFO_STARVE_OFF) 301 if (num > GFAR_MAX_FIFO_STARVE_OFF)
281 return count; 302 return count;
282 303
283 spin_lock_irqsave(&priv->txlock, flags); 304 local_irq_save(flags);
305 lock_tx_qs(priv);
284 306
285 priv->fifo_starve_off = num; 307 priv->fifo_starve_off = num;
286 308
287 temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff); 309 temp = gfar_read(&regs->fifo_tx_starve_shutoff);
288 temp &= ~FIFO_TX_STARVE_OFF_MASK; 310 temp &= ~FIFO_TX_STARVE_OFF_MASK;
289 temp |= num; 311 temp |= num;
290 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 312 gfar_write(&regs->fifo_tx_starve_shutoff, temp);
291 313
292 spin_unlock_irqrestore(&priv->txlock, flags); 314 unlock_tx_qs(priv);
315 local_irq_restore(flags);
293 316
294 return count; 317 return count;
295} 318}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index f7519a594945..5d8c6333070e 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -407,10 +407,9 @@ that case.
407/* A few values that may be tweaked. */ 407/* A few values that may be tweaked. */
408/* Size of each temporary Rx buffer, calculated as: 408/* Size of each temporary Rx buffer, calculated as:
409 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for 409 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
410 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum + 410 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
411 * 2 more because we use skb_reserve.
412 */ 411 */
413#define PKT_BUF_SZ 1538 412#define PKT_BUF_SZ 1536
414 413
415/* For now, this is going to be set to the maximum size of an ethernet 414/* For now, this is going to be set to the maximum size of an ethernet
416 * packet. Eventually, we may want to make it a variable that is 415 * packet. Eventually, we may want to make it a variable that is
@@ -873,7 +872,7 @@ static int hamachi_open(struct net_device *dev)
873 u32 rx_int_var, tx_int_var; 872 u32 rx_int_var, tx_int_var;
874 u16 fifo_info; 873 u16 fifo_info;
875 874
876 i = request_irq(dev->irq, &hamachi_interrupt, IRQF_SHARED, dev->name, dev); 875 i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev);
877 if (i) 876 if (i)
878 return i; 877 return i;
879 878
@@ -1152,12 +1151,13 @@ static void hamachi_tx_timeout(struct net_device *dev)
1152 } 1151 }
1153 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1152 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1154 for (i = 0; i < RX_RING_SIZE; i++) { 1153 for (i = 0; i < RX_RING_SIZE; i++) {
1155 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz); 1154 struct sk_buff *skb;
1155
1156 skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
1156 hmp->rx_skbuff[i] = skb; 1157 hmp->rx_skbuff[i] = skb;
1157 if (skb == NULL) 1158 if (skb == NULL)
1158 break; 1159 break;
1159 1160
1160 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1161 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1161 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1162 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1162 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1163 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1163 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
@@ -1196,7 +1196,7 @@ static void hamachi_init_ring(struct net_device *dev)
1196 * card. -KDU 1196 * card. -KDU
1197 */ 1197 */
1198 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ : 1198 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
1199 (((dev->mtu+26+7) & ~7) + 2 + 16)); 1199 (((dev->mtu+26+7) & ~7) + 16));
1200 1200
1201 /* Initialize all Rx descriptors. */ 1201 /* Initialize all Rx descriptors. */
1202 for (i = 0; i < RX_RING_SIZE; i++) { 1202 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index fb588301a05d..689b9bd377a5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -34,6 +34,7 @@
34#include <linux/ip.h> 34#include <linux/ip.h>
35#include <linux/tcp.h> 35#include <linux/tcp.h>
36#include <linux/semaphore.h> 36#include <linux/semaphore.h>
37#include <linux/compat.h>
37#include <asm/atomic.h> 38#include <asm/atomic.h>
38 39
39#define SIXPACK_VERSION "Revision: 0.3.0" 40#define SIXPACK_VERSION "Revision: 0.3.0"
@@ -777,6 +778,23 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
777 return err; 778 return err;
778} 779}
779 780
781#ifdef CONFIG_COMPAT
782static long sixpack_compat_ioctl(struct tty_struct * tty, struct file * file,
783 unsigned int cmd, unsigned long arg)
784{
785 switch (cmd) {
786 case SIOCGIFNAME:
787 case SIOCGIFENCAP:
788 case SIOCSIFENCAP:
789 case SIOCSIFHWADDR:
790 return sixpack_ioctl(tty, file, cmd,
791 (unsigned long)compat_ptr(arg));
792 }
793
794 return -ENOIOCTLCMD;
795}
796#endif
797
780static struct tty_ldisc_ops sp_ldisc = { 798static struct tty_ldisc_ops sp_ldisc = {
781 .owner = THIS_MODULE, 799 .owner = THIS_MODULE,
782 .magic = TTY_LDISC_MAGIC, 800 .magic = TTY_LDISC_MAGIC,
@@ -784,6 +802,9 @@ static struct tty_ldisc_ops sp_ldisc = {
784 .open = sixpack_open, 802 .open = sixpack_open,
785 .close = sixpack_close, 803 .close = sixpack_close,
786 .ioctl = sixpack_ioctl, 804 .ioctl = sixpack_ioctl,
805#ifdef CONFIG_COMPAT
806 .compat_ioctl = sixpack_compat_ioctl,
807#endif
787 .receive_buf = sixpack_receive_buf, 808 .receive_buf = sixpack_receive_buf,
788 .write_wakeup = sixpack_write_wakeup, 809 .write_wakeup = sixpack_write_wakeup,
789}; 810};
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index e344c84c0ef9..a3c0dc9d8b98 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -596,16 +596,16 @@ static int receive(struct net_device *dev, int cnt)
596 if (!(notbitstream & (0x1fc << j))) 596 if (!(notbitstream & (0x1fc << j)))
597 state = 0; 597 state = 0;
598 598
599 /* not flag received */ 599 /* flag received */
600 else if (!(bitstream & (0x1fe << j)) != (0x0fc << j)) { 600 else if ((bitstream & (0x1fe << j)) == (0x0fc << j)) {
601 if (state) 601 if (state)
602 do_rxpacket(dev); 602 do_rxpacket(dev);
603 bc->hdlcrx.bufcnt = 0; 603 bc->hdlcrx.bufcnt = 0;
604 bc->hdlcrx.bufptr = bc->hdlcrx.buf; 604 bc->hdlcrx.bufptr = bc->hdlcrx.buf;
605 state = 1; 605 state = 1;
606 numbits = 7-j; 606 numbits = 7-j;
607 }
608 } 607 }
608 }
609 609
610 /* stuffed bit */ 610 /* stuffed bit */
611 else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) { 611 else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) {
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index fe893c91a01b..76abed9a70b1 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -186,7 +186,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
186 struct ethhdr *eth; 186 struct ethhdr *eth;
187 struct bpqdev *bpq; 187 struct bpqdev *bpq;
188 188
189 if (dev_net(dev) != &init_net) 189 if (!net_eq(dev_net(dev), &init_net))
190 goto drop; 190 goto drop;
191 191
192 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 192 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -552,7 +552,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
552{ 552{
553 struct net_device *dev = (struct net_device *)ptr; 553 struct net_device *dev = (struct net_device *)ptr;
554 554
555 if (dev_net(dev) != &init_net) 555 if (!net_eq(dev_net(dev), &init_net))
556 return NOTIFY_DONE; 556 return NOTIFY_DONE;
557 557
558 if (!dev_is_ethdev(dev)) 558 if (!dev_is_ethdev(dev))
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index db4b7f1603f6..7db0a1c3216c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -36,6 +36,7 @@
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/if_arp.h> 37#include <linux/if_arp.h>
38#include <linux/jiffies.h> 38#include <linux/jiffies.h>
39#include <linux/compat.h>
39 40
40#include <net/ax25.h> 41#include <net/ax25.h>
41 42
@@ -898,6 +899,23 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
898 return err; 899 return err;
899} 900}
900 901
902#ifdef CONFIG_COMPAT
903static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
904 unsigned int cmd, unsigned long arg)
905{
906 switch (cmd) {
907 case SIOCGIFNAME:
908 case SIOCGIFENCAP:
909 case SIOCSIFENCAP:
910 case SIOCSIFHWADDR:
911 return mkiss_ioctl(tty, file, cmd,
912 (unsigned long)compat_ptr(arg));
913 }
914
915 return -ENOIOCTLCMD;
916}
917#endif
918
901/* 919/*
902 * Handle the 'receiver data ready' interrupt. 920 * Handle the 'receiver data ready' interrupt.
903 * This function is called by the 'tty_io' module in the kernel when 921 * This function is called by the 'tty_io' module in the kernel when
@@ -972,6 +990,9 @@ static struct tty_ldisc_ops ax_ldisc = {
972 .open = mkiss_open, 990 .open = mkiss_open,
973 .close = mkiss_close, 991 .close = mkiss_close,
974 .ioctl = mkiss_ioctl, 992 .ioctl = mkiss_ioctl,
993#ifdef CONFIG_COMPAT
994 .compat_ioctl = mkiss_compat_ioctl,
995#endif
975 .receive_buf = mkiss_receive_buf, 996 .receive_buf = mkiss_receive_buf,
976 .write_wakeup = mkiss_write_wakeup 997 .write_wakeup = mkiss_write_wakeup
977}; 998};
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3fae87559791..af117c626e73 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2149,9 +2149,12 @@ static int emac_ethtool_nway_reset(struct net_device *ndev)
2149 return res; 2149 return res;
2150} 2150}
2151 2151
2152static int emac_ethtool_get_stats_count(struct net_device *ndev) 2152static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2153{ 2153{
2154 return EMAC_ETHTOOL_STATS_COUNT; 2154 if (stringset == ETH_SS_STATS)
2155 return EMAC_ETHTOOL_STATS_COUNT;
2156 else
2157 return -EINVAL;
2155} 2158}
2156 2159
2157static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, 2160static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
@@ -2182,7 +2185,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2182 info->fw_version[0] = '\0'; 2185 info->fw_version[0] = '\0';
2183 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", 2186 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2184 dev->cell_index, dev->ofdev->node->full_name); 2187 dev->cell_index, dev->ofdev->node->full_name);
2185 info->n_stats = emac_ethtool_get_stats_count(ndev);
2186 info->regdump_len = emac_ethtool_get_regs_len(ndev); 2188 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2187} 2189}
2188 2190
@@ -2202,7 +2204,7 @@ static const struct ethtool_ops emac_ethtool_ops = {
2202 .get_rx_csum = emac_ethtool_get_rx_csum, 2204 .get_rx_csum = emac_ethtool_get_rx_csum,
2203 2205
2204 .get_strings = emac_ethtool_get_strings, 2206 .get_strings = emac_ethtool_get_strings,
2205 .get_stats_count = emac_ethtool_get_stats_count, 2207 .get_sset_count = emac_ethtool_get_sset_count,
2206 .get_ethtool_stats = emac_ethtool_get_ethtool_stats, 2208 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2207 2209
2208 .get_link = ethtool_op_get_link, 2210 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 5862282ab2fe..a86693906ac8 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -625,7 +625,7 @@ static int ibmveth_open(struct net_device *netdev)
625 } 625 }
626 626
627 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq); 627 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
628 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) { 628 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
629 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc); 629 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
630 do { 630 do {
631 rc = h_free_logical_lan(adapter->vdev->unit_address); 631 rc = h_free_logical_lan(adapter->vdev->unit_address);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 030913f8bd26..f4081c0a2d9c 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,14 +98,16 @@ static void ri_tasklet(unsigned long dev)
98 stats->tx_packets++; 98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len; 99 stats->tx_bytes +=skb->len;
100 100
101 skb->dev = dev_get_by_index(&init_net, skb->iif); 101 rcu_read_lock();
102 skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif);
102 if (!skb->dev) { 103 if (!skb->dev) {
104 rcu_read_unlock();
103 dev_kfree_skb(skb); 105 dev_kfree_skb(skb);
104 stats->tx_dropped++; 106 stats->tx_dropped++;
105 break; 107 break;
106 } 108 }
107 dev_put(skb->dev); 109 rcu_read_unlock();
108 skb->iif = _dev->ifindex; 110 skb->skb_iif = _dev->ifindex;
109 111
110 if (from & AT_EGRESS) { 112 if (from & AT_EGRESS) {
111 dp->st_rx_frm_egr++; 113 dp->st_rx_frm_egr++;
@@ -170,7 +172,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
170 stats->rx_packets++; 172 stats->rx_packets++;
171 stats->rx_bytes+=skb->len; 173 stats->rx_bytes+=skb->len;
172 174
173 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->iif) { 175 if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) {
174 dev_kfree_skb(skb); 176 dev_kfree_skb(skb);
175 stats->rx_dropped++; 177 stats->rx_dropped++;
176 return NETDEV_TX_OK; 178 return NETDEV_TX_OK;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f8f5772557ce..e8e9e9194a88 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -46,7 +46,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *);
46static s32 igb_init_hw_82575(struct e1000_hw *); 46static s32 igb_init_hw_82575(struct e1000_hw *);
47static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *); 47static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
48static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *); 48static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
49static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
50static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
49static s32 igb_reset_hw_82575(struct e1000_hw *); 51static s32 igb_reset_hw_82575(struct e1000_hw *);
52static s32 igb_reset_hw_82580(struct e1000_hw *);
50static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool); 53static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
51static s32 igb_setup_copper_link_82575(struct e1000_hw *); 54static s32 igb_setup_copper_link_82575(struct e1000_hw *);
52static s32 igb_setup_serdes_link_82575(struct e1000_hw *); 55static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
@@ -62,6 +65,12 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *);
62static s32 igb_read_mac_addr_82575(struct e1000_hw *); 65static s32 igb_read_mac_addr_82575(struct e1000_hw *);
63static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); 66static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
64 67
68static const u16 e1000_82580_rxpbs_table[] =
69 { 36, 72, 144, 1, 2, 4, 8, 16,
70 35, 70, 140 };
71#define E1000_82580_RXPBS_TABLE_SIZE \
72 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
73
65static s32 igb_get_invariants_82575(struct e1000_hw *hw) 74static s32 igb_get_invariants_82575(struct e1000_hw *hw)
66{ 75{
67 struct e1000_phy_info *phy = &hw->phy; 76 struct e1000_phy_info *phy = &hw->phy;
@@ -81,12 +90,20 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
81 break; 90 break;
82 case E1000_DEV_ID_82576: 91 case E1000_DEV_ID_82576:
83 case E1000_DEV_ID_82576_NS: 92 case E1000_DEV_ID_82576_NS:
93 case E1000_DEV_ID_82576_NS_SERDES:
84 case E1000_DEV_ID_82576_FIBER: 94 case E1000_DEV_ID_82576_FIBER:
85 case E1000_DEV_ID_82576_SERDES: 95 case E1000_DEV_ID_82576_SERDES:
86 case E1000_DEV_ID_82576_QUAD_COPPER: 96 case E1000_DEV_ID_82576_QUAD_COPPER:
87 case E1000_DEV_ID_82576_SERDES_QUAD: 97 case E1000_DEV_ID_82576_SERDES_QUAD:
88 mac->type = e1000_82576; 98 mac->type = e1000_82576;
89 break; 99 break;
100 case E1000_DEV_ID_82580_COPPER:
101 case E1000_DEV_ID_82580_FIBER:
102 case E1000_DEV_ID_82580_SERDES:
103 case E1000_DEV_ID_82580_SGMII:
104 case E1000_DEV_ID_82580_COPPER_DUAL:
105 mac->type = e1000_82580;
106 break;
90 default: 107 default:
91 return -E1000_ERR_MAC_INIT; 108 return -E1000_ERR_MAC_INIT;
92 break; 109 break;
@@ -109,6 +126,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
109 dev_spec->sgmii_active = true; 126 dev_spec->sgmii_active = true;
110 ctrl_ext |= E1000_CTRL_I2C_ENA; 127 ctrl_ext |= E1000_CTRL_I2C_ENA;
111 break; 128 break;
129 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
112 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: 130 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
113 hw->phy.media_type = e1000_media_type_internal_serdes; 131 hw->phy.media_type = e1000_media_type_internal_serdes;
114 ctrl_ext |= E1000_CTRL_I2C_ENA; 132 ctrl_ext |= E1000_CTRL_I2C_ENA;
@@ -120,12 +138,26 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
120 138
121 wr32(E1000_CTRL_EXT, ctrl_ext); 139 wr32(E1000_CTRL_EXT, ctrl_ext);
122 140
141 /*
142 * if using i2c make certain the MDICNFG register is cleared to prevent
143 * communications from being misrouted to the mdic registers
144 */
145 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
146 wr32(E1000_MDICNFG, 0);
147
123 /* Set mta register count */ 148 /* Set mta register count */
124 mac->mta_reg_count = 128; 149 mac->mta_reg_count = 128;
125 /* Set rar entry count */ 150 /* Set rar entry count */
126 mac->rar_entry_count = E1000_RAR_ENTRIES_82575; 151 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
127 if (mac->type == e1000_82576) 152 if (mac->type == e1000_82576)
128 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 153 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
154 if (mac->type == e1000_82580)
155 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
156 /* reset */
157 if (mac->type == e1000_82580)
158 mac->ops.reset_hw = igb_reset_hw_82580;
159 else
160 mac->ops.reset_hw = igb_reset_hw_82575;
129 /* Set if part includes ASF firmware */ 161 /* Set if part includes ASF firmware */
130 mac->asf_firmware_present = true; 162 mac->asf_firmware_present = true;
131 /* Set if manageability features are enabled. */ 163 /* Set if manageability features are enabled. */
@@ -193,6 +225,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
193 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 225 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
194 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 226 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
195 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 227 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
228 } else if (hw->mac.type == e1000_82580) {
229 phy->ops.reset = igb_phy_hw_reset;
230 phy->ops.read_reg = igb_read_phy_reg_82580;
231 phy->ops.write_reg = igb_write_phy_reg_82580;
196 } else { 232 } else {
197 phy->ops.reset = igb_phy_hw_reset; 233 phy->ops.reset = igb_phy_hw_reset;
198 phy->ops.read_reg = igb_read_phy_reg_igp; 234 phy->ops.read_reg = igb_read_phy_reg_igp;
@@ -224,6 +260,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
224 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575; 260 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
225 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 261 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
226 break; 262 break;
263 case I82580_I_PHY_ID:
264 phy->type = e1000_phy_82580;
265 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
266 phy->ops.get_cable_length = igb_get_cable_length_82580;
267 phy->ops.get_phy_info = igb_get_phy_info_82580;
268 break;
227 default: 269 default:
228 return -E1000_ERR_PHY; 270 return -E1000_ERR_PHY;
229 } 271 }
@@ -240,9 +282,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
240 **/ 282 **/
241static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 283static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
242{ 284{
243 u16 mask; 285 u16 mask = E1000_SWFW_PHY0_SM;
244 286
245 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 287 if (hw->bus.func == E1000_FUNC_1)
288 mask = E1000_SWFW_PHY1_SM;
246 289
247 return igb_acquire_swfw_sync_82575(hw, mask); 290 return igb_acquire_swfw_sync_82575(hw, mask);
248} 291}
@@ -256,9 +299,11 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
256 **/ 299 **/
257static void igb_release_phy_82575(struct e1000_hw *hw) 300static void igb_release_phy_82575(struct e1000_hw *hw)
258{ 301{
259 u16 mask; 302 u16 mask = E1000_SWFW_PHY0_SM;
303
304 if (hw->bus.func == E1000_FUNC_1)
305 mask = E1000_SWFW_PHY1_SM;
260 306
261 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
262 igb_release_swfw_sync_82575(hw, mask); 307 igb_release_swfw_sync_82575(hw, mask);
263} 308}
264 309
@@ -274,45 +319,23 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
274static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 319static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
275 u16 *data) 320 u16 *data)
276{ 321{
277 struct e1000_phy_info *phy = &hw->phy; 322 s32 ret_val = -E1000_ERR_PARAM;
278 u32 i, i2ccmd = 0;
279 323
280 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 324 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
281 hw_dbg("PHY Address %u is out of range\n", offset); 325 hw_dbg("PHY Address %u is out of range\n", offset);
282 return -E1000_ERR_PARAM; 326 goto out;
283 } 327 }
284 328
285 /* 329 ret_val = hw->phy.ops.acquire(hw);
286 * Set up Op-code, Phy Address, and register address in the I2CCMD 330 if (ret_val)
287 * register. The MAC will take care of interfacing with the 331 goto out;
288 * PHY to retrieve the desired data.
289 */
290 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
291 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
292 (E1000_I2CCMD_OPCODE_READ));
293
294 wr32(E1000_I2CCMD, i2ccmd);
295 332
296 /* Poll the ready bit to see if the I2C read completed */ 333 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
297 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
298 udelay(50);
299 i2ccmd = rd32(E1000_I2CCMD);
300 if (i2ccmd & E1000_I2CCMD_READY)
301 break;
302 }
303 if (!(i2ccmd & E1000_I2CCMD_READY)) {
304 hw_dbg("I2CCMD Read did not complete\n");
305 return -E1000_ERR_PHY;
306 }
307 if (i2ccmd & E1000_I2CCMD_ERROR) {
308 hw_dbg("I2CCMD Error bit set\n");
309 return -E1000_ERR_PHY;
310 }
311 334
312 /* Need to byte-swap the 16-bit value. */ 335 hw->phy.ops.release(hw);
313 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
314 336
315 return 0; 337out:
338 return ret_val;
316} 339}
317 340
318/** 341/**
@@ -327,47 +350,24 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
327static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 350static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
328 u16 data) 351 u16 data)
329{ 352{
330 struct e1000_phy_info *phy = &hw->phy; 353 s32 ret_val = -E1000_ERR_PARAM;
331 u32 i, i2ccmd = 0; 354
332 u16 phy_data_swapped;
333 355
334 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 356 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
335 hw_dbg("PHY Address %d is out of range\n", offset); 357 hw_dbg("PHY Address %d is out of range\n", offset);
336 return -E1000_ERR_PARAM; 358 goto out;
337 } 359 }
338 360
339 /* Swap the data bytes for the I2C interface */ 361 ret_val = hw->phy.ops.acquire(hw);
340 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); 362 if (ret_val)
363 goto out;
341 364
342 /* 365 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
343 * Set up Op-code, Phy Address, and register address in the I2CCMD
344 * register. The MAC will take care of interfacing with the
345 * PHY to retrieve the desired data.
346 */
347 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
348 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
349 E1000_I2CCMD_OPCODE_WRITE |
350 phy_data_swapped);
351
352 wr32(E1000_I2CCMD, i2ccmd);
353
354 /* Poll the ready bit to see if the I2C read completed */
355 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
356 udelay(50);
357 i2ccmd = rd32(E1000_I2CCMD);
358 if (i2ccmd & E1000_I2CCMD_READY)
359 break;
360 }
361 if (!(i2ccmd & E1000_I2CCMD_READY)) {
362 hw_dbg("I2CCMD Write did not complete\n");
363 return -E1000_ERR_PHY;
364 }
365 if (i2ccmd & E1000_I2CCMD_ERROR) {
366 hw_dbg("I2CCMD Error bit set\n");
367 return -E1000_ERR_PHY;
368 }
369 366
370 return 0; 367 hw->phy.ops.release(hw);
368
369out:
370 return ret_val;
371} 371}
372 372
373/** 373/**
@@ -676,6 +676,10 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
676 676
677 if (hw->bus.func == 1) 677 if (hw->bus.func == 1)
678 mask = E1000_NVM_CFG_DONE_PORT_1; 678 mask = E1000_NVM_CFG_DONE_PORT_1;
679 else if (hw->bus.func == E1000_FUNC_2)
680 mask = E1000_NVM_CFG_DONE_PORT_2;
681 else if (hw->bus.func == E1000_FUNC_3)
682 mask = E1000_NVM_CFG_DONE_PORT_3;
679 683
680 while (timeout) { 684 while (timeout) {
681 if (rd32(E1000_EEMNGCTL) & mask) 685 if (rd32(E1000_EEMNGCTL) & mask)
@@ -706,9 +710,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
706 s32 ret_val; 710 s32 ret_val;
707 u16 speed, duplex; 711 u16 speed, duplex;
708 712
709 /* SGMII link check is done through the PCS register. */ 713 if (hw->phy.media_type != e1000_media_type_copper) {
710 if ((hw->phy.media_type != e1000_media_type_copper) ||
711 (igb_sgmii_active_82575(hw))) {
712 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 714 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
713 &duplex); 715 &duplex);
714 /* 716 /*
@@ -723,6 +725,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
723 725
724 return ret_val; 726 return ret_val;
725} 727}
728
726/** 729/**
727 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 730 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
728 * @hw: pointer to the HW structure 731 * @hw: pointer to the HW structure
@@ -788,13 +791,27 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
788void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 791void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
789{ 792{
790 u32 reg; 793 u32 reg;
794 u16 eeprom_data = 0;
791 795
792 if (hw->phy.media_type != e1000_media_type_internal_serdes || 796 if (hw->phy.media_type != e1000_media_type_internal_serdes ||
793 igb_sgmii_active_82575(hw)) 797 igb_sgmii_active_82575(hw))
794 return; 798 return;
795 799
796 /* if the management interface is not enabled, then power down */ 800 if (hw->bus.func == E1000_FUNC_0)
797 if (!igb_enable_mng_pass_thru(hw)) { 801 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
802 else if (hw->mac.type == e1000_82580)
803 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
804 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
805 &eeprom_data);
806 else if (hw->bus.func == E1000_FUNC_1)
807 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
808
809 /*
810 * If APM is not enabled in the EEPROM and management interface is
811 * not enabled, then power down.
812 */
813 if (!(eeprom_data & E1000_NVM_APME_82575) &&
814 !igb_enable_mng_pass_thru(hw)) {
798 /* Disable PCS to turn off link */ 815 /* Disable PCS to turn off link */
799 reg = rd32(E1000_PCS_CFG0); 816 reg = rd32(E1000_PCS_CFG0);
800 reg &= ~E1000_PCS_CFG_PCS_EN; 817 reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -908,6 +925,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
908 for (i = 0; i < mac->mta_reg_count; i++) 925 for (i = 0; i < mac->mta_reg_count; i++)
909 array_wr32(E1000_MTA, i, 0); 926 array_wr32(E1000_MTA, i, 0);
910 927
928 /* Zero out the Unicast HASH table */
929 hw_dbg("Zeroing the UTA\n");
930 for (i = 0; i < mac->uta_reg_count; i++)
931 array_wr32(E1000_UTA, i, 0);
932
911 /* Setup link and flow control */ 933 /* Setup link and flow control */
912 ret_val = igb_setup_link(hw); 934 ret_val = igb_setup_link(hw);
913 935
@@ -934,7 +956,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
934{ 956{
935 u32 ctrl; 957 u32 ctrl;
936 s32 ret_val; 958 s32 ret_val;
937 bool link;
938 959
939 ctrl = rd32(E1000_CTRL); 960 ctrl = rd32(E1000_CTRL);
940 ctrl |= E1000_CTRL_SLU; 961 ctrl |= E1000_CTRL_SLU;
@@ -946,6 +967,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
946 goto out; 967 goto out;
947 968
948 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) { 969 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
970 /* allow time for SFP cage time to power up phy */
971 msleep(300);
972
949 ret_val = hw->phy.ops.reset(hw); 973 ret_val = hw->phy.ops.reset(hw);
950 if (ret_val) { 974 if (ret_val) {
951 hw_dbg("Error resetting the PHY.\n"); 975 hw_dbg("Error resetting the PHY.\n");
@@ -959,6 +983,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
959 case e1000_phy_igp_3: 983 case e1000_phy_igp_3:
960 ret_val = igb_copper_link_setup_igp(hw); 984 ret_val = igb_copper_link_setup_igp(hw);
961 break; 985 break;
986 case e1000_phy_82580:
987 ret_val = igb_copper_link_setup_82580(hw);
988 break;
962 default: 989 default:
963 ret_val = -E1000_ERR_PHY; 990 ret_val = -E1000_ERR_PHY;
964 break; 991 break;
@@ -967,57 +994,24 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
967 if (ret_val) 994 if (ret_val)
968 goto out; 995 goto out;
969 996
970 if (hw->mac.autoneg) { 997 ret_val = igb_setup_copper_link(hw);
971 /*
972 * Setup autoneg and flow control advertisement
973 * and perform autonegotiation.
974 */
975 ret_val = igb_copper_link_autoneg(hw);
976 if (ret_val)
977 goto out;
978 } else {
979 /*
980 * PHY will be set to 10H, 10F, 100H or 100F
981 * depending on user settings.
982 */
983 hw_dbg("Forcing Speed and Duplex\n");
984 ret_val = hw->phy.ops.force_speed_duplex(hw);
985 if (ret_val) {
986 hw_dbg("Error Forcing Speed and Duplex\n");
987 goto out;
988 }
989 }
990
991 /*
992 * Check link status. Wait up to 100 microseconds for link to become
993 * valid.
994 */
995 ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
996 if (ret_val)
997 goto out;
998
999 if (link) {
1000 hw_dbg("Valid link established!!!\n");
1001 /* Config the MAC and PHY after link is up */
1002 igb_config_collision_dist(hw);
1003 ret_val = igb_config_fc_after_link_up(hw);
1004 } else {
1005 hw_dbg("Unable to establish link!!!\n");
1006 }
1007
1008out: 998out:
1009 return ret_val; 999 return ret_val;
1010} 1000}
1011 1001
1012/** 1002/**
1013 * igb_setup_serdes_link_82575 - Setup link for fiber/serdes 1003 * igb_setup_serdes_link_82575 - Setup link for serdes
1014 * @hw: pointer to the HW structure 1004 * @hw: pointer to the HW structure
1015 * 1005 *
1016 * Configures speed and duplex for fiber and serdes links. 1006 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1007 * used on copper connections where the serialized gigabit media independent
1008 * interface (sgmii), or serdes fiber is being used. Configures the link
1009 * for auto-negotiation or forces speed/duplex.
1017 **/ 1010 **/
1018static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 1011static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1019{ 1012{
1020 u32 ctrl_reg, reg; 1013 u32 ctrl_ext, ctrl_reg, reg;
1014 bool pcs_autoneg;
1021 1015
1022 if ((hw->phy.media_type != e1000_media_type_internal_serdes) && 1016 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1023 !igb_sgmii_active_82575(hw)) 1017 !igb_sgmii_active_82575(hw))
@@ -1032,9 +1026,9 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1032 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); 1026 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1033 1027
1034 /* power on the sfp cage if present */ 1028 /* power on the sfp cage if present */
1035 reg = rd32(E1000_CTRL_EXT); 1029 ctrl_ext = rd32(E1000_CTRL_EXT);
1036 reg &= ~E1000_CTRL_EXT_SDP3_DATA; 1030 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1037 wr32(E1000_CTRL_EXT, reg); 1031 wr32(E1000_CTRL_EXT, ctrl_ext);
1038 1032
1039 ctrl_reg = rd32(E1000_CTRL); 1033 ctrl_reg = rd32(E1000_CTRL);
1040 ctrl_reg |= E1000_CTRL_SLU; 1034 ctrl_reg |= E1000_CTRL_SLU;
@@ -1051,15 +1045,31 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1051 1045
1052 reg = rd32(E1000_PCS_LCTL); 1046 reg = rd32(E1000_PCS_LCTL);
1053 1047
1054 if (igb_sgmii_active_82575(hw)) { 1048 /* default pcs_autoneg to the same setting as mac autoneg */
1055 /* allow time for SFP cage to power up phy */ 1049 pcs_autoneg = hw->mac.autoneg;
1056 msleep(300);
1057 1050
1058 /* AN time out should be disabled for SGMII mode */ 1051 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1052 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1053 /* sgmii mode lets the phy handle forcing speed/duplex */
1054 pcs_autoneg = true;
1055 /* autoneg time out should be disabled for SGMII mode */
1059 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); 1056 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1060 } else { 1057 break;
1058 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1059 /* disable PCS autoneg and support parallel detect only */
1060 pcs_autoneg = false;
1061 default:
1062 /*
1063 * non-SGMII modes only supports a speed of 1000/Full for the
1064 * link so it is best to just force the MAC and let the pcs
1065 * link either autoneg or be forced to 1000/Full
1066 */
1061 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | 1067 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1062 E1000_CTRL_FD | E1000_CTRL_FRCDPX; 1068 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1069
1070 /* set speed of 1000/Full if speed/duplex is forced */
1071 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1072 break;
1063 } 1073 }
1064 1074
1065 wr32(E1000_CTRL, ctrl_reg); 1075 wr32(E1000_CTRL, ctrl_reg);
@@ -1070,7 +1080,6 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1070 * mode that will be compatible with older link partners and switches. 1080 * mode that will be compatible with older link partners and switches.
1071 * However, both are supported by the hardware and some drivers/tools. 1081 * However, both are supported by the hardware and some drivers/tools.
1072 */ 1082 */
1073
1074 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | 1083 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1075 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); 1084 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1076 1085
@@ -1080,25 +1089,18 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1080 */ 1089 */
1081 reg |= E1000_PCS_LCTL_FORCE_FCTRL; 1090 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1082 1091
1083 /* 1092 if (pcs_autoneg) {
1084 * we always set sgmii to autoneg since it is the phy that will be
1085 * forcing the link and the serdes is just a go-between
1086 */
1087 if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) {
1088 /* Set PCS register for autoneg */ 1093 /* Set PCS register for autoneg */
1089 reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1094 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1090 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1095 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1091 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1096 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1092 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1093 hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1094 } else { 1097 } else {
1095 /* Set PCS register for forced speed */ 1098 /* Set PCS register for forced link */
1096 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1099 reg |= E1000_PCS_LCTL_FSD | /* Force Speed */
1097 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1100 E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
1098 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1101 E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
1099 E1000_PCS_LCTL_FSD | /* Force Speed */ 1102
1100 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1103 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1101 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1102 } 1104 }
1103 1105
1104 wr32(E1000_PCS_LCTL, reg); 1106 wr32(E1000_PCS_LCTL, reg);
@@ -1167,9 +1169,18 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1167{ 1169{
1168 s32 ret_val = 0; 1170 s32 ret_val = 0;
1169 1171
1170 if (igb_check_alt_mac_addr(hw)) 1172 /*
1171 ret_val = igb_read_mac_addr(hw); 1173 * If there's an alternate MAC address place it in RAR0
1174 * so that it will override the Si installed default perm
1175 * address.
1176 */
1177 ret_val = igb_check_alt_mac_addr(hw);
1178 if (ret_val)
1179 goto out;
1180
1181 ret_val = igb_read_mac_addr(hw);
1172 1182
1183out:
1173 return ret_val; 1184 return ret_val;
1174} 1185}
1175 1186
@@ -1181,61 +1192,59 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1181 **/ 1192 **/
1182static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1193static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1183{ 1194{
1184 u32 temp;
1185
1186 igb_clear_hw_cntrs_base(hw); 1195 igb_clear_hw_cntrs_base(hw);
1187 1196
1188 temp = rd32(E1000_PRC64); 1197 rd32(E1000_PRC64);
1189 temp = rd32(E1000_PRC127); 1198 rd32(E1000_PRC127);
1190 temp = rd32(E1000_PRC255); 1199 rd32(E1000_PRC255);
1191 temp = rd32(E1000_PRC511); 1200 rd32(E1000_PRC511);
1192 temp = rd32(E1000_PRC1023); 1201 rd32(E1000_PRC1023);
1193 temp = rd32(E1000_PRC1522); 1202 rd32(E1000_PRC1522);
1194 temp = rd32(E1000_PTC64); 1203 rd32(E1000_PTC64);
1195 temp = rd32(E1000_PTC127); 1204 rd32(E1000_PTC127);
1196 temp = rd32(E1000_PTC255); 1205 rd32(E1000_PTC255);
1197 temp = rd32(E1000_PTC511); 1206 rd32(E1000_PTC511);
1198 temp = rd32(E1000_PTC1023); 1207 rd32(E1000_PTC1023);
1199 temp = rd32(E1000_PTC1522); 1208 rd32(E1000_PTC1522);
1200 1209
1201 temp = rd32(E1000_ALGNERRC); 1210 rd32(E1000_ALGNERRC);
1202 temp = rd32(E1000_RXERRC); 1211 rd32(E1000_RXERRC);
1203 temp = rd32(E1000_TNCRS); 1212 rd32(E1000_TNCRS);
1204 temp = rd32(E1000_CEXTERR); 1213 rd32(E1000_CEXTERR);
1205 temp = rd32(E1000_TSCTC); 1214 rd32(E1000_TSCTC);
1206 temp = rd32(E1000_TSCTFC); 1215 rd32(E1000_TSCTFC);
1207 1216
1208 temp = rd32(E1000_MGTPRC); 1217 rd32(E1000_MGTPRC);
1209 temp = rd32(E1000_MGTPDC); 1218 rd32(E1000_MGTPDC);
1210 temp = rd32(E1000_MGTPTC); 1219 rd32(E1000_MGTPTC);
1211 1220
1212 temp = rd32(E1000_IAC); 1221 rd32(E1000_IAC);
1213 temp = rd32(E1000_ICRXOC); 1222 rd32(E1000_ICRXOC);
1214 1223
1215 temp = rd32(E1000_ICRXPTC); 1224 rd32(E1000_ICRXPTC);
1216 temp = rd32(E1000_ICRXATC); 1225 rd32(E1000_ICRXATC);
1217 temp = rd32(E1000_ICTXPTC); 1226 rd32(E1000_ICTXPTC);
1218 temp = rd32(E1000_ICTXATC); 1227 rd32(E1000_ICTXATC);
1219 temp = rd32(E1000_ICTXQEC); 1228 rd32(E1000_ICTXQEC);
1220 temp = rd32(E1000_ICTXQMTC); 1229 rd32(E1000_ICTXQMTC);
1221 temp = rd32(E1000_ICRXDMTC); 1230 rd32(E1000_ICRXDMTC);
1222 1231
1223 temp = rd32(E1000_CBTMPC); 1232 rd32(E1000_CBTMPC);
1224 temp = rd32(E1000_HTDPMC); 1233 rd32(E1000_HTDPMC);
1225 temp = rd32(E1000_CBRMPC); 1234 rd32(E1000_CBRMPC);
1226 temp = rd32(E1000_RPTHC); 1235 rd32(E1000_RPTHC);
1227 temp = rd32(E1000_HGPTC); 1236 rd32(E1000_HGPTC);
1228 temp = rd32(E1000_HTCBDPC); 1237 rd32(E1000_HTCBDPC);
1229 temp = rd32(E1000_HGORCL); 1238 rd32(E1000_HGORCL);
1230 temp = rd32(E1000_HGORCH); 1239 rd32(E1000_HGORCH);
1231 temp = rd32(E1000_HGOTCL); 1240 rd32(E1000_HGOTCL);
1232 temp = rd32(E1000_HGOTCH); 1241 rd32(E1000_HGOTCH);
1233 temp = rd32(E1000_LENERRS); 1242 rd32(E1000_LENERRS);
1234 1243
1235 /* This register should not be read in copper configurations */ 1244 /* This register should not be read in copper configurations */
1236 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1245 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1237 igb_sgmii_active_82575(hw)) 1246 igb_sgmii_active_82575(hw))
1238 temp = rd32(E1000_SCVPC); 1247 rd32(E1000_SCVPC);
1239} 1248}
1240 1249
1241/** 1250/**
@@ -1400,8 +1409,183 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1400 wr32(E1000_VT_CTL, vt_ctl); 1409 wr32(E1000_VT_CTL, vt_ctl);
1401} 1410}
1402 1411
1412/**
1413 * igb_read_phy_reg_82580 - Read 82580 MDI control register
1414 * @hw: pointer to the HW structure
1415 * @offset: register offset to be read
1416 * @data: pointer to the read data
1417 *
1418 * Reads the MDI control register in the PHY at offset and stores the
1419 * information read to data.
1420 **/
1421static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1422{
1423 u32 mdicnfg = 0;
1424 s32 ret_val;
1425
1426
1427 ret_val = hw->phy.ops.acquire(hw);
1428 if (ret_val)
1429 goto out;
1430
1431 /*
1432 * We config the phy address in MDICNFG register now. Same bits
1433 * as before. The values in MDIC can be written but will be
1434 * ignored. This allows us to call the old function after
1435 * configuring the PHY address in the new register
1436 */
1437 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1438 wr32(E1000_MDICNFG, mdicnfg);
1439
1440 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1441
1442 hw->phy.ops.release(hw);
1443
1444out:
1445 return ret_val;
1446}
1447
1448/**
1449 * igb_write_phy_reg_82580 - Write 82580 MDI control register
1450 * @hw: pointer to the HW structure
1451 * @offset: register offset to write to
1452 * @data: data to write to register at offset
1453 *
1454 * Writes data to MDI control register in the PHY at offset.
1455 **/
1456static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1457{
1458 u32 mdicnfg = 0;
1459 s32 ret_val;
1460
1461
1462 ret_val = hw->phy.ops.acquire(hw);
1463 if (ret_val)
1464 goto out;
1465
1466 /*
1467 * We config the phy address in MDICNFG register now. Same bits
1468 * as before. The values in MDIC can be written but will be
1469 * ignored. This allows us to call the old function after
1470 * configuring the PHY address in the new register
1471 */
1472 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1473 wr32(E1000_MDICNFG, mdicnfg);
1474
1475 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1476
1477 hw->phy.ops.release(hw);
1478
1479out:
1480 return ret_val;
1481}
1482
1483/**
1484 * igb_reset_hw_82580 - Reset hardware
1485 * @hw: pointer to the HW structure
1486 *
1487 * This resets function or entire device (all ports, etc.)
1488 * to a known state.
1489 **/
1490static s32 igb_reset_hw_82580(struct e1000_hw *hw)
1491{
1492 s32 ret_val = 0;
1493 /* BH SW mailbox bit in SW_FW_SYNC */
1494 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1495 u32 ctrl, icr;
1496 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1497
1498
1499 hw->dev_spec._82575.global_device_reset = false;
1500
1501 /* Get current control state. */
1502 ctrl = rd32(E1000_CTRL);
1503
1504 /*
1505 * Prevent the PCI-E bus from sticking if there is no TLP connection
1506 * on the last TLP read/write transaction when MAC is reset.
1507 */
1508 ret_val = igb_disable_pcie_master(hw);
1509 if (ret_val)
1510 hw_dbg("PCI-E Master disable polling has failed.\n");
1511
1512 hw_dbg("Masking off all interrupts\n");
1513 wr32(E1000_IMC, 0xffffffff);
1514 wr32(E1000_RCTL, 0);
1515 wr32(E1000_TCTL, E1000_TCTL_PSP);
1516 wrfl();
1517
1518 msleep(10);
1519
1520 /* Determine whether or not a global dev reset is requested */
1521 if (global_device_reset &&
1522 igb_acquire_swfw_sync_82575(hw, swmbsw_mask))
1523 global_device_reset = false;
1524
1525 if (global_device_reset &&
1526 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1527 ctrl |= E1000_CTRL_DEV_RST;
1528 else
1529 ctrl |= E1000_CTRL_RST;
1530
1531 wr32(E1000_CTRL, ctrl);
1532
1533 /* Add delay to insure DEV_RST has time to complete */
1534 if (global_device_reset)
1535 msleep(5);
1536
1537 ret_val = igb_get_auto_rd_done(hw);
1538 if (ret_val) {
1539 /*
1540 * When auto config read does not complete, do not
1541 * return with an error. This can happen in situations
1542 * where there is no eeprom and prevents getting link.
1543 */
1544 hw_dbg("Auto Read Done did not complete\n");
1545 }
1546
1547 /* If EEPROM is not present, run manual init scripts */
1548 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1549 igb_reset_init_script_82575(hw);
1550
1551 /* clear global device reset status bit */
1552 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
1553
1554 /* Clear any pending interrupt events. */
1555 wr32(E1000_IMC, 0xffffffff);
1556 icr = rd32(E1000_ICR);
1557
1558 /* Install any alternate MAC address into RAR0 */
1559 ret_val = igb_check_alt_mac_addr(hw);
1560
1561 /* Release semaphore */
1562 if (global_device_reset)
1563 igb_release_swfw_sync_82575(hw, swmbsw_mask);
1564
1565 return ret_val;
1566}
1567
1568/**
1569 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
1570 * @data: data received by reading RXPBS register
1571 *
1572 * The 82580 uses a table based approach for packet buffer allocation sizes.
1573 * This function converts the retrieved value into the correct table value
1574 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
1575 * 0x0 36 72 144 1 2 4 8 16
1576 * 0x8 35 70 140 rsv rsv rsv rsv rsv
1577 */
1578u16 igb_rxpbs_adjust_82580(u32 data)
1579{
1580 u16 ret_val = 0;
1581
1582 if (data < E1000_82580_RXPBS_TABLE_SIZE)
1583 ret_val = e1000_82580_rxpbs_table[data];
1584
1585 return ret_val;
1586}
1587
1403static struct e1000_mac_operations e1000_mac_ops_82575 = { 1588static struct e1000_mac_operations e1000_mac_ops_82575 = {
1404 .reset_hw = igb_reset_hw_82575,
1405 .init_hw = igb_init_hw_82575, 1589 .init_hw = igb_init_hw_82575,
1406 .check_for_link = igb_check_for_link_82575, 1590 .check_for_link = igb_check_for_link_82575,
1407 .rar_set = igb_rar_set, 1591 .rar_set = igb_rar_set,
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index ebd146fd4e15..d51c9927c819 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,6 +38,11 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
38 38
39#define E1000_RAR_ENTRIES_82575 16 39#define E1000_RAR_ENTRIES_82575 16
40#define E1000_RAR_ENTRIES_82576 24 40#define E1000_RAR_ENTRIES_82576 24
41#define E1000_RAR_ENTRIES_82580 24
42
43#define E1000_SW_SYNCH_MB 0x00000100
44#define E1000_STAT_DEV_RST_SET 0x00100000
45#define E1000_CTRL_DEV_RST 0x20000000
41 46
42/* SRRCTL bit definitions */ 47/* SRRCTL bit definitions */
43#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ 48#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
@@ -66,6 +71,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
66 E1000_EICR_RX_QUEUE3) 71 E1000_EICR_RX_QUEUE3)
67 72
68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 73/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
74#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
75#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
69 76
70/* Receive Descriptor - Advanced */ 77/* Receive Descriptor - Advanced */
71union e1000_adv_rx_desc { 78union e1000_adv_rx_desc {
@@ -98,6 +105,7 @@ union e1000_adv_rx_desc {
98 105
99#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 106#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
100#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 107#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
108#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
101 109
102/* Transmit Descriptor - Advanced */ 110/* Transmit Descriptor - Advanced */
103union e1000_adv_tx_desc { 111union e1000_adv_tx_desc {
@@ -167,6 +175,18 @@ struct e1000_adv_tx_context_desc {
167#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 175#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
168#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 176#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
169 177
178/* ETQF register bit definitions */
179#define E1000_ETQF_FILTER_ENABLE (1 << 26)
180#define E1000_ETQF_1588 (1 << 30)
181
182/* FTQF register bit definitions */
183#define E1000_FTQF_VF_BP 0x00008000
184#define E1000_FTQF_1588_TIME_STAMP 0x08000000
185#define E1000_FTQF_MASK 0xF0000000
186#define E1000_FTQF_MASK_PROTO_BP 0x10000000
187#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
188
189#define E1000_NVM_APME_82575 0x0400
170#define MAX_NUM_VFS 8 190#define MAX_NUM_VFS 8
171 191
172#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ 192#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
@@ -202,9 +222,21 @@ struct e1000_adv_tx_context_desc {
202#define E1000_IOVCTL 0x05BBC 222#define E1000_IOVCTL 0x05BBC
203#define E1000_IOVCTL_REUSE_VFQ 0x00000001 223#define E1000_IOVCTL_REUSE_VFQ 0x00000001
204 224
225#define E1000_RPLOLR_STRVLAN 0x40000000
226#define E1000_RPLOLR_STRCRC 0x80000000
227
228#define E1000_DTXCTL_8023LL 0x0004
229#define E1000_DTXCTL_VLAN_ADDED 0x0008
230#define E1000_DTXCTL_OOS_ENABLE 0x0010
231#define E1000_DTXCTL_MDP_EN 0x0020
232#define E1000_DTXCTL_SPOOF_INT 0x0040
233
205#define ALL_QUEUES 0xFFFF 234#define ALL_QUEUES 0xFFFF
206 235
236/* RX packet buffer size defines */
237#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
207void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 238void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
208void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 239void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
240u16 igb_rxpbs_adjust_82580(u32 data);
209 241
210#endif 242#endif
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index cb916833f303..6e036ae3138f 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -49,6 +49,7 @@
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 49#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
52#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
53#define E1000_CTRL_EXT_EIAME 0x01000000 54#define E1000_CTRL_EXT_EIAME 0x01000000
54#define E1000_CTRL_EXT_IRCA 0x00000001 55#define E1000_CTRL_EXT_IRCA 0x00000001
@@ -329,6 +330,7 @@
329#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 330#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
330#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 331#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
331#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ 332#define E1000_ICR_VMMB 0x00000100 /* VM MB event */
333#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */
332/* If this bit asserted, the driver should claim the interrupt */ 334/* If this bit asserted, the driver should claim the interrupt */
333#define E1000_ICR_INT_ASSERTED 0x80000000 335#define E1000_ICR_INT_ASSERTED 0x80000000
334/* LAN connected device generates an interrupt */ 336/* LAN connected device generates an interrupt */
@@ -370,6 +372,7 @@
370#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 372#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
371#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 373#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
372#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 374#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
375#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */
373#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ 376#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
374 377
375/* Extended Interrupt Mask Set */ 378/* Extended Interrupt Mask Set */
@@ -378,6 +381,7 @@
378/* Interrupt Cause Set */ 381/* Interrupt Cause Set */
379#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 382#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
380#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 383#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
384#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
381 385
382/* Extended Interrupt Cause Set */ 386/* Extended Interrupt Cause Set */
383 387
@@ -435,6 +439,39 @@
435/* Flow Control */ 439/* Flow Control */
436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 440#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
437 441
442#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
443#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
444
445#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
446#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
447#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
448#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
449#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
450#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
451#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
452#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
453
454#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
455#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
456#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
457#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
458#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
459#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
460
461#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
462#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
463#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
464#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
465#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
466#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
467#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
468#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
469#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
470#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
471#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
472
473#define E1000_TIMINCA_16NS_SHIFT 24
474
438/* PCI Express Control */ 475/* PCI Express Control */
439#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 476#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
440#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 477#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
@@ -524,8 +561,12 @@
524#define NVM_ALT_MAC_ADDR_PTR 0x0037 561#define NVM_ALT_MAC_ADDR_PTR 0x0037
525#define NVM_CHECKSUM_REG 0x003F 562#define NVM_CHECKSUM_REG 0x003F
526 563
527#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ 564#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
528#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ 565#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
566#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */
567#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */
568
569#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
529 570
530/* Mask bits for fields in Word 0x0f of the NVM */ 571/* Mask bits for fields in Word 0x0f of the NVM */
531#define NVM_WORD0F_PAUSE_MASK 0x3000 572#define NVM_WORD0F_PAUSE_MASK 0x3000
@@ -592,6 +633,7 @@
592 */ 633 */
593#define M88E1111_I_PHY_ID 0x01410CC0 634#define M88E1111_I_PHY_ID 0x01410CC0
594#define IGP03E1000_E_PHY_ID 0x02A80390 635#define IGP03E1000_E_PHY_ID 0x02A80390
636#define I82580_I_PHY_ID 0x015403A0
595#define M88_VENDOR 0x0141 637#define M88_VENDOR 0x0141
596 638
597/* M88E1000 Specific Registers */ 639/* M88E1000 Specific Registers */
@@ -678,4 +720,8 @@
678#define E1000_VFTA_ENTRY_MASK 0x7F 720#define E1000_VFTA_ENTRY_MASK 0x7F
679#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 721#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
680 722
723/* DMA Coalescing register fields */
724#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
725 on DMA coal */
726
681#endif 727#endif
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 119869b1124d..dbaeb5f5e0c7 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -42,20 +42,35 @@ struct e1000_hw;
42#define E1000_DEV_ID_82576_SERDES 0x10E7 42#define E1000_DEV_ID_82576_SERDES 0x10E7
43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
44#define E1000_DEV_ID_82576_NS 0x150A 44#define E1000_DEV_ID_82576_NS 0x150A
45#define E1000_DEV_ID_82576_NS_SERDES 0x1518
45#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D 46#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
46#define E1000_DEV_ID_82575EB_COPPER 0x10A7 47#define E1000_DEV_ID_82575EB_COPPER 0x10A7
47#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 48#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
48#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 49#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
50#define E1000_DEV_ID_82580_COPPER 0x150E
51#define E1000_DEV_ID_82580_FIBER 0x150F
52#define E1000_DEV_ID_82580_SERDES 0x1510
53#define E1000_DEV_ID_82580_SGMII 0x1511
54#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
49 55
50#define E1000_REVISION_2 2 56#define E1000_REVISION_2 2
51#define E1000_REVISION_4 4 57#define E1000_REVISION_4 4
52 58
59#define E1000_FUNC_0 0
53#define E1000_FUNC_1 1 60#define E1000_FUNC_1 1
61#define E1000_FUNC_2 2
62#define E1000_FUNC_3 3
63
64#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
65#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
66#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6
67#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9
54 68
55enum e1000_mac_type { 69enum e1000_mac_type {
56 e1000_undefined = 0, 70 e1000_undefined = 0,
57 e1000_82575, 71 e1000_82575,
58 e1000_82576, 72 e1000_82576,
73 e1000_82580,
59 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 74 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
60}; 75};
61 76
@@ -70,7 +85,6 @@ enum e1000_nvm_type {
70 e1000_nvm_unknown = 0, 85 e1000_nvm_unknown = 0,
71 e1000_nvm_none, 86 e1000_nvm_none,
72 e1000_nvm_eeprom_spi, 87 e1000_nvm_eeprom_spi,
73 e1000_nvm_eeprom_microwire,
74 e1000_nvm_flash_hw, 88 e1000_nvm_flash_hw,
75 e1000_nvm_flash_sw 89 e1000_nvm_flash_sw
76}; 90};
@@ -79,8 +93,6 @@ enum e1000_nvm_override {
79 e1000_nvm_override_none = 0, 93 e1000_nvm_override_none = 0,
80 e1000_nvm_override_spi_small, 94 e1000_nvm_override_spi_small,
81 e1000_nvm_override_spi_large, 95 e1000_nvm_override_spi_large,
82 e1000_nvm_override_microwire_small,
83 e1000_nvm_override_microwire_large
84}; 96};
85 97
86enum e1000_phy_type { 98enum e1000_phy_type {
@@ -92,6 +104,7 @@ enum e1000_phy_type {
92 e1000_phy_gg82563, 104 e1000_phy_gg82563,
93 e1000_phy_igp_3, 105 e1000_phy_igp_3,
94 e1000_phy_ife, 106 e1000_phy_ife,
107 e1000_phy_82580,
95}; 108};
96 109
97enum e1000_bus_type { 110enum e1000_bus_type {
@@ -288,6 +301,7 @@ struct e1000_mac_operations {
288 301
289struct e1000_phy_operations { 302struct e1000_phy_operations {
290 s32 (*acquire)(struct e1000_hw *); 303 s32 (*acquire)(struct e1000_hw *);
304 s32 (*check_polarity)(struct e1000_hw *);
291 s32 (*check_reset_block)(struct e1000_hw *); 305 s32 (*check_reset_block)(struct e1000_hw *);
292 s32 (*force_speed_duplex)(struct e1000_hw *); 306 s32 (*force_speed_duplex)(struct e1000_hw *);
293 s32 (*get_cfg_done)(struct e1000_hw *hw); 307 s32 (*get_cfg_done)(struct e1000_hw *hw);
@@ -339,6 +353,7 @@ struct e1000_mac_info {
339 u16 ifs_ratio; 353 u16 ifs_ratio;
340 u16 ifs_step_size; 354 u16 ifs_step_size;
341 u16 mta_reg_count; 355 u16 mta_reg_count;
356 u16 uta_reg_count;
342 357
343 /* Maximum size of the MTA register table in all supported adapters */ 358 /* Maximum size of the MTA register table in all supported adapters */
344 #define MAX_MTA_REG 128 359 #define MAX_MTA_REG 128
@@ -463,6 +478,7 @@ struct e1000_mbx_info {
463 478
464struct e1000_dev_spec_82575 { 479struct e1000_dev_spec_82575 {
465 bool sgmii_active; 480 bool sgmii_active;
481 bool global_device_reset;
466}; 482};
467 483
468struct e1000_hw { 484struct e1000_hw {
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 7d76bb085e10..2ad358a240bf 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
185 } 185 }
186 186
187 if (nvm_alt_mac_addr_offset == 0xFFFF) { 187 if (nvm_alt_mac_addr_offset == 0xFFFF) {
188 ret_val = -(E1000_NOT_IMPLEMENTED); 188 /* There is no Alternate MAC Address */
189 goto out; 189 goto out;
190 } 190 }
191 191
192 if (hw->bus.func == E1000_FUNC_1) 192 if (hw->bus.func == E1000_FUNC_1)
193 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16); 193 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
194
195 for (i = 0; i < ETH_ALEN; i += 2) { 194 for (i = 0; i < ETH_ALEN; i += 2) {
196 offset = nvm_alt_mac_addr_offset + (i >> 1); 195 offset = nvm_alt_mac_addr_offset + (i >> 1);
197 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); 196 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
206 205
207 /* if multicast bit is set, the alternate address will not be used */ 206 /* if multicast bit is set, the alternate address will not be used */
208 if (alt_mac_addr[0] & 0x01) { 207 if (alt_mac_addr[0] & 0x01) {
209 ret_val = -(E1000_NOT_IMPLEMENTED); 208 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
210 goto out; 209 goto out;
211 } 210 }
212 211
213 for (i = 0; i < ETH_ALEN; i++) 212 /*
214 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; 213 * We have a valid alternate MAC address, and we want to treat it the
215 214 * same as the normal permanent MAC address stored by the HW into the
216 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0); 215 * RAR. Do this by mapping this address into RAR0.
216 */
217 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
217 218
218out: 219out:
219 return ret_val; 220 return ret_val;
@@ -246,8 +247,15 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
246 if (rar_low || rar_high) 247 if (rar_low || rar_high)
247 rar_high |= E1000_RAH_AV; 248 rar_high |= E1000_RAH_AV;
248 249
250 /*
251 * Some bridges will combine consecutive 32-bit writes into
252 * a single burst write, which will malfunction on some parts.
253 * The flushes avoid this.
254 */
249 wr32(E1000_RAL(index), rar_low); 255 wr32(E1000_RAL(index), rar_low);
256 wrfl();
250 wr32(E1000_RAH(index), rar_high); 257 wr32(E1000_RAH(index), rar_high);
258 wrfl();
251} 259}
252 260
253/** 261/**
@@ -399,45 +407,43 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
399 **/ 407 **/
400void igb_clear_hw_cntrs_base(struct e1000_hw *hw) 408void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
401{ 409{
402 u32 temp; 410 rd32(E1000_CRCERRS);
403 411 rd32(E1000_SYMERRS);
404 temp = rd32(E1000_CRCERRS); 412 rd32(E1000_MPC);
405 temp = rd32(E1000_SYMERRS); 413 rd32(E1000_SCC);
406 temp = rd32(E1000_MPC); 414 rd32(E1000_ECOL);
407 temp = rd32(E1000_SCC); 415 rd32(E1000_MCC);
408 temp = rd32(E1000_ECOL); 416 rd32(E1000_LATECOL);
409 temp = rd32(E1000_MCC); 417 rd32(E1000_COLC);
410 temp = rd32(E1000_LATECOL); 418 rd32(E1000_DC);
411 temp = rd32(E1000_COLC); 419 rd32(E1000_SEC);
412 temp = rd32(E1000_DC); 420 rd32(E1000_RLEC);
413 temp = rd32(E1000_SEC); 421 rd32(E1000_XONRXC);
414 temp = rd32(E1000_RLEC); 422 rd32(E1000_XONTXC);
415 temp = rd32(E1000_XONRXC); 423 rd32(E1000_XOFFRXC);
416 temp = rd32(E1000_XONTXC); 424 rd32(E1000_XOFFTXC);
417 temp = rd32(E1000_XOFFRXC); 425 rd32(E1000_FCRUC);
418 temp = rd32(E1000_XOFFTXC); 426 rd32(E1000_GPRC);
419 temp = rd32(E1000_FCRUC); 427 rd32(E1000_BPRC);
420 temp = rd32(E1000_GPRC); 428 rd32(E1000_MPRC);
421 temp = rd32(E1000_BPRC); 429 rd32(E1000_GPTC);
422 temp = rd32(E1000_MPRC); 430 rd32(E1000_GORCL);
423 temp = rd32(E1000_GPTC); 431 rd32(E1000_GORCH);
424 temp = rd32(E1000_GORCL); 432 rd32(E1000_GOTCL);
425 temp = rd32(E1000_GORCH); 433 rd32(E1000_GOTCH);
426 temp = rd32(E1000_GOTCL); 434 rd32(E1000_RNBC);
427 temp = rd32(E1000_GOTCH); 435 rd32(E1000_RUC);
428 temp = rd32(E1000_RNBC); 436 rd32(E1000_RFC);
429 temp = rd32(E1000_RUC); 437 rd32(E1000_ROC);
430 temp = rd32(E1000_RFC); 438 rd32(E1000_RJC);
431 temp = rd32(E1000_ROC); 439 rd32(E1000_TORL);
432 temp = rd32(E1000_RJC); 440 rd32(E1000_TORH);
433 temp = rd32(E1000_TORL); 441 rd32(E1000_TOTL);
434 temp = rd32(E1000_TORH); 442 rd32(E1000_TOTH);
435 temp = rd32(E1000_TOTL); 443 rd32(E1000_TPR);
436 temp = rd32(E1000_TOTH); 444 rd32(E1000_TPT);
437 temp = rd32(E1000_TPR); 445 rd32(E1000_MPTC);
438 temp = rd32(E1000_TPT); 446 rd32(E1000_BPTC);
439 temp = rd32(E1000_MPTC);
440 temp = rd32(E1000_BPTC);
441} 447}
442 448
443/** 449/**
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index ed9058eca45c..c474cdb70047 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -143,12 +143,16 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
143 if (!countdown || !mbx->ops.check_for_msg) 143 if (!countdown || !mbx->ops.check_for_msg)
144 goto out; 144 goto out;
145 145
146 while (mbx->ops.check_for_msg(hw, mbx_id)) { 146 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
147 countdown--; 147 countdown--;
148 if (!countdown) 148 if (!countdown)
149 break; 149 break;
150 udelay(mbx->usec_delay); 150 udelay(mbx->usec_delay);
151 } 151 }
152
153 /* if we failed, all future posted messages fail until reset */
154 if (!countdown)
155 mbx->timeout = 0;
152out: 156out:
153 return countdown ? 0 : -E1000_ERR_MBX; 157 return countdown ? 0 : -E1000_ERR_MBX;
154} 158}
@@ -168,12 +172,16 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
168 if (!countdown || !mbx->ops.check_for_ack) 172 if (!countdown || !mbx->ops.check_for_ack)
169 goto out; 173 goto out;
170 174
171 while (mbx->ops.check_for_ack(hw, mbx_id)) { 175 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
172 countdown--; 176 countdown--;
173 if (!countdown) 177 if (!countdown)
174 break; 178 break;
175 udelay(mbx->usec_delay); 179 udelay(mbx->usec_delay);
176 } 180 }
181
182 /* if we failed, all future posted messages fail until reset */
183 if (!countdown)
184 mbx->timeout = 0;
177out: 185out:
178 return countdown ? 0 : -E1000_ERR_MBX; 186 return countdown ? 0 : -E1000_ERR_MBX;
179} 187}
@@ -217,12 +225,13 @@ out:
217static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 225static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
218{ 226{
219 struct e1000_mbx_info *mbx = &hw->mbx; 227 struct e1000_mbx_info *mbx = &hw->mbx;
220 s32 ret_val = 0; 228 s32 ret_val = -E1000_ERR_MBX;
221 229
222 if (!mbx->ops.write) 230 /* exit if either we can't write or there isn't a defined timeout */
231 if (!mbx->ops.write || !mbx->timeout)
223 goto out; 232 goto out;
224 233
225 /* send msg*/ 234 /* send msg */
226 ret_val = mbx->ops.write(hw, msg, size, mbx_id); 235 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
227 236
228 /* if msg sent wait until we receive an ack */ 237 /* if msg sent wait until we receive an ack */
@@ -305,6 +314,30 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
305} 314}
306 315
307/** 316/**
317 * igb_obtain_mbx_lock_pf - obtain mailbox lock
318 * @hw: pointer to the HW structure
319 * @vf_number: the VF index
320 *
321 * return SUCCESS if we obtained the mailbox lock
322 **/
323static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
324{
325 s32 ret_val = -E1000_ERR_MBX;
326 u32 p2v_mailbox;
327
328
329 /* Take ownership of the buffer */
330 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
331
332 /* reserve mailbox for vf use */
333 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
334 if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
335 ret_val = 0;
336
337 return ret_val;
338}
339
340/**
308 * igb_write_mbx_pf - Places a message in the mailbox 341 * igb_write_mbx_pf - Places a message in the mailbox
309 * @hw: pointer to the HW structure 342 * @hw: pointer to the HW structure
310 * @msg: The message buffer 343 * @msg: The message buffer
@@ -316,27 +349,17 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
316static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 349static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
317 u16 vf_number) 350 u16 vf_number)
318{ 351{
319 u32 p2v_mailbox; 352 s32 ret_val;
320 s32 ret_val = 0;
321 u16 i; 353 u16 i;
322 354
323 /* Take ownership of the buffer */ 355 /* lock the mailbox to prevent pf/vf race condition */
324 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 356 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
325 357 if (ret_val)
326 /* Make sure we have ownership now... */
327 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
328 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
329 /* failed to grab ownership */
330 ret_val = -E1000_ERR_MBX;
331 goto out_no_write; 358 goto out_no_write;
332 }
333 359
334 /* 360 /* flush msg and acks as we are overwriting the message buffer */
335 * flush any ack or msg which may already be in the queue
336 * as they are likely the result of an error
337 */
338 igb_check_for_ack_pf(hw, vf_number);
339 igb_check_for_msg_pf(hw, vf_number); 361 igb_check_for_msg_pf(hw, vf_number);
362 igb_check_for_ack_pf(hw, vf_number);
340 363
341 /* copy the caller specified message to the mailbox memory buffer */ 364 /* copy the caller specified message to the mailbox memory buffer */
342 for (i = 0; i < size; i++) 365 for (i = 0; i < size; i++)
@@ -367,20 +390,13 @@ out_no_write:
367static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 390static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
368 u16 vf_number) 391 u16 vf_number)
369{ 392{
370 u32 p2v_mailbox; 393 s32 ret_val;
371 s32 ret_val = 0;
372 u16 i; 394 u16 i;
373 395
374 /* Take ownership of the buffer */ 396 /* lock the mailbox to prevent pf/vf race condition */
375 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 397 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
376 398 if (ret_val)
377 /* Make sure we have ownership now... */
378 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
379 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
380 /* failed to grab ownership */
381 ret_val = -E1000_ERR_MBX;
382 goto out_no_read; 399 goto out_no_read;
383 }
384 400
385 /* copy the message to the mailbox memory buffer */ 401 /* copy the message to the mailbox memory buffer */
386 for (i = 0; i < size; i++) 402 for (i = 0; i < size; i++)
@@ -392,8 +408,6 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
392 /* update stats */ 408 /* update stats */
393 hw->mbx.stats.msgs_rx++; 409 hw->mbx.stats.msgs_rx++;
394 410
395 ret_val = 0;
396
397out_no_read: 411out_no_read:
398 return ret_val; 412 return ret_val;
399} 413}
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea3f198..bb112fb6c3a1 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59 59
60#define E1000_VF_RESET 0x01 /* VF requests reset */ 60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
65 67
66#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
67 69
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a88bfe2f1e8f..d83b77fa4038 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -78,9 +78,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
78 u32 mask; 78 u32 mask;
79 79
80 mask = 0x01 << (count - 1); 80 mask = 0x01 << (count - 1);
81 if (nvm->type == e1000_nvm_eeprom_microwire) 81 if (nvm->type == e1000_nvm_eeprom_spi)
82 eecd &= ~E1000_EECD_DO;
83 else if (nvm->type == e1000_nvm_eeprom_spi)
84 eecd |= E1000_EECD_DO; 82 eecd |= E1000_EECD_DO;
85 83
86 do { 84 do {
@@ -220,22 +218,7 @@ static void igb_standby_nvm(struct e1000_hw *hw)
220 struct e1000_nvm_info *nvm = &hw->nvm; 218 struct e1000_nvm_info *nvm = &hw->nvm;
221 u32 eecd = rd32(E1000_EECD); 219 u32 eecd = rd32(E1000_EECD);
222 220
223 if (nvm->type == e1000_nvm_eeprom_microwire) { 221 if (nvm->type == e1000_nvm_eeprom_spi) {
224 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
225 wr32(E1000_EECD, eecd);
226 wrfl();
227 udelay(nvm->delay_usec);
228
229 igb_raise_eec_clk(hw, &eecd);
230
231 /* Select EEPROM */
232 eecd |= E1000_EECD_CS;
233 wr32(E1000_EECD, eecd);
234 wrfl();
235 udelay(nvm->delay_usec);
236
237 igb_lower_eec_clk(hw, &eecd);
238 } else if (nvm->type == e1000_nvm_eeprom_spi) {
239 /* Toggle CS to flush commands */ 222 /* Toggle CS to flush commands */
240 eecd |= E1000_EECD_CS; 223 eecd |= E1000_EECD_CS;
241 wr32(E1000_EECD, eecd); 224 wr32(E1000_EECD, eecd);
@@ -263,12 +246,6 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
263 /* Pull CS high */ 246 /* Pull CS high */
264 eecd |= E1000_EECD_CS; 247 eecd |= E1000_EECD_CS;
265 igb_lower_eec_clk(hw, &eecd); 248 igb_lower_eec_clk(hw, &eecd);
266 } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
267 /* CS on Microcwire is active-high */
268 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
269 wr32(E1000_EECD, eecd);
270 igb_raise_eec_clk(hw, &eecd);
271 igb_lower_eec_clk(hw, &eecd);
272 } 249 }
273} 250}
274 251
@@ -304,14 +281,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
304 u8 spi_stat_reg; 281 u8 spi_stat_reg;
305 282
306 283
307 if (nvm->type == e1000_nvm_eeprom_microwire) { 284 if (nvm->type == e1000_nvm_eeprom_spi) {
308 /* Clear SK and DI */
309 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
310 wr32(E1000_EECD, eecd);
311 /* Set CS */
312 eecd |= E1000_EECD_CS;
313 wr32(E1000_EECD, eecd);
314 } else if (nvm->type == e1000_nvm_eeprom_spi) {
315 /* Clear SK and CS */ 285 /* Clear SK and CS */
316 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
317 wr32(E1000_EECD, eecd); 287 wr32(E1000_EECD, eecd);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ee460600e74b..5c9d73e9bb8d 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -39,6 +39,9 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw);
39/* Cable length tables */ 39/* Cable length tables */
40static const u16 e1000_m88_cable_length_table[] = 40static const u16 e1000_m88_cable_length_table[] =
41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
42#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
43 (sizeof(e1000_m88_cable_length_table) / \
44 sizeof(e1000_m88_cable_length_table[0]))
42 45
43static const u16 e1000_igp_2_cable_length_table[] = 46static const u16 e1000_igp_2_cable_length_table[] =
44 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 47 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -109,7 +112,10 @@ out:
109 **/ 112 **/
110static s32 igb_phy_reset_dsp(struct e1000_hw *hw) 113static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
111{ 114{
112 s32 ret_val; 115 s32 ret_val = 0;
116
117 if (!(hw->phy.ops.write_reg))
118 goto out;
113 119
114 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); 120 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
115 if (ret_val) 121 if (ret_val)
@@ -130,7 +136,7 @@ out:
130 * Reads the MDI control regsiter in the PHY at offset and stores the 136 * Reads the MDI control regsiter in the PHY at offset and stores the
131 * information read to data. 137 * information read to data.
132 **/ 138 **/
133static s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) 139s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
134{ 140{
135 struct e1000_phy_info *phy = &hw->phy; 141 struct e1000_phy_info *phy = &hw->phy;
136 u32 i, mdic = 0; 142 u32 i, mdic = 0;
@@ -188,7 +194,7 @@ out:
188 * 194 *
189 * Writes data to MDI control register in the PHY at offset. 195 * Writes data to MDI control register in the PHY at offset.
190 **/ 196 **/
191static s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) 197s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
192{ 198{
193 struct e1000_phy_info *phy = &hw->phy; 199 struct e1000_phy_info *phy = &hw->phy;
194 u32 i, mdic = 0; 200 u32 i, mdic = 0;
@@ -239,6 +245,103 @@ out:
239} 245}
240 246
241/** 247/**
248 * igb_read_phy_reg_i2c - Read PHY register using i2c
249 * @hw: pointer to the HW structure
250 * @offset: register offset to be read
251 * @data: pointer to the read data
252 *
253 * Reads the PHY register at offset using the i2c interface and stores the
254 * retrieved information in data.
255 **/
256s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
257{
258 struct e1000_phy_info *phy = &hw->phy;
259 u32 i, i2ccmd = 0;
260
261
262 /*
263 * Set up Op-code, Phy Address, and register address in the I2CCMD
264 * register. The MAC will take care of interfacing with the
265 * PHY to retrieve the desired data.
266 */
267 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
268 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
269 (E1000_I2CCMD_OPCODE_READ));
270
271 wr32(E1000_I2CCMD, i2ccmd);
272
273 /* Poll the ready bit to see if the I2C read completed */
274 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
275 udelay(50);
276 i2ccmd = rd32(E1000_I2CCMD);
277 if (i2ccmd & E1000_I2CCMD_READY)
278 break;
279 }
280 if (!(i2ccmd & E1000_I2CCMD_READY)) {
281 hw_dbg("I2CCMD Read did not complete\n");
282 return -E1000_ERR_PHY;
283 }
284 if (i2ccmd & E1000_I2CCMD_ERROR) {
285 hw_dbg("I2CCMD Error bit set\n");
286 return -E1000_ERR_PHY;
287 }
288
289 /* Need to byte-swap the 16-bit value. */
290 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
291
292 return 0;
293}
294
295/**
296 * igb_write_phy_reg_i2c - Write PHY register using i2c
297 * @hw: pointer to the HW structure
298 * @offset: register offset to write to
299 * @data: data to write at register offset
300 *
301 * Writes the data to PHY register at the offset using the i2c interface.
302 **/
303s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
304{
305 struct e1000_phy_info *phy = &hw->phy;
306 u32 i, i2ccmd = 0;
307 u16 phy_data_swapped;
308
309
310 /* Swap the data bytes for the I2C interface */
311 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
312
313 /*
314 * Set up Op-code, Phy Address, and register address in the I2CCMD
315 * register. The MAC will take care of interfacing with the
316 * PHY to retrieve the desired data.
317 */
318 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
319 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
320 E1000_I2CCMD_OPCODE_WRITE |
321 phy_data_swapped);
322
323 wr32(E1000_I2CCMD, i2ccmd);
324
325 /* Poll the ready bit to see if the I2C read completed */
326 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
327 udelay(50);
328 i2ccmd = rd32(E1000_I2CCMD);
329 if (i2ccmd & E1000_I2CCMD_READY)
330 break;
331 }
332 if (!(i2ccmd & E1000_I2CCMD_READY)) {
333 hw_dbg("I2CCMD Write did not complete\n");
334 return -E1000_ERR_PHY;
335 }
336 if (i2ccmd & E1000_I2CCMD_ERROR) {
337 hw_dbg("I2CCMD Error bit set\n");
338 return -E1000_ERR_PHY;
339 }
340
341 return 0;
342}
343
344/**
242 * igb_read_phy_reg_igp - Read igp PHY register 345 * igb_read_phy_reg_igp - Read igp PHY register
243 * @hw: pointer to the HW structure 346 * @hw: pointer to the HW structure
244 * @offset: register offset to be read 347 * @offset: register offset to be read
@@ -318,6 +421,57 @@ out:
318} 421}
319 422
320/** 423/**
424 * igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
425 * @hw: pointer to the HW structure
426 *
427 * Sets up Carrier-sense on Transmit and downshift values.
428 **/
429s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
430{
431 struct e1000_phy_info *phy = &hw->phy;
432 s32 ret_val;
433 u16 phy_data;
434
435
436 if (phy->reset_disable) {
437 ret_val = 0;
438 goto out;
439 }
440
441 if (phy->type == e1000_phy_82580) {
442 ret_val = hw->phy.ops.reset(hw);
443 if (ret_val) {
444 hw_dbg("Error resetting the PHY.\n");
445 goto out;
446 }
447 }
448
449 /* Enable CRS on TX. This must be set for half-duplex operation. */
450 ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
451 if (ret_val)
452 goto out;
453
454 phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
455
456 /* Enable downshift */
457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
458
459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
460 if (ret_val)
461 goto out;
462
463 /* Set number of link attempts before downshift */
464 ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
465 if (ret_val)
466 goto out;
467 phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
468 ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
469
470out:
471 return ret_val;
472}
473
474/**
321 * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link 475 * igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
322 * @hw: pointer to the HW structure 476 * @hw: pointer to the HW structure
323 * 477 *
@@ -572,7 +726,7 @@ out:
572 * and restart the negotiation process between the link partner. If 726 * and restart the negotiation process between the link partner. If
573 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. 727 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
574 **/ 728 **/
575s32 igb_copper_link_autoneg(struct e1000_hw *hw) 729static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
576{ 730{
577 struct e1000_phy_info *phy = &hw->phy; 731 struct e1000_phy_info *phy = &hw->phy;
578 s32 ret_val; 732 s32 ret_val;
@@ -796,6 +950,65 @@ out:
796} 950}
797 951
798/** 952/**
953 * igb_setup_copper_link - Configure copper link settings
954 * @hw: pointer to the HW structure
955 *
956 * Calls the appropriate function to configure the link for auto-neg or forced
957 * speed and duplex. Then we check for link, once link is established calls
958 * to configure collision distance and flow control are called. If link is
959 * not established, we return -E1000_ERR_PHY (-2).
960 **/
961s32 igb_setup_copper_link(struct e1000_hw *hw)
962{
963 s32 ret_val;
964 bool link;
965
966
967 if (hw->mac.autoneg) {
968 /*
969 * Setup autoneg and flow control advertisement and perform
970 * autonegotiation.
971 */
972 ret_val = igb_copper_link_autoneg(hw);
973 if (ret_val)
974 goto out;
975 } else {
976 /*
977 * PHY will be set to 10H, 10F, 100H or 100F
978 * depending on user settings.
979 */
980 hw_dbg("Forcing Speed and Duplex\n");
981 ret_val = hw->phy.ops.force_speed_duplex(hw);
982 if (ret_val) {
983 hw_dbg("Error Forcing Speed and Duplex\n");
984 goto out;
985 }
986 }
987
988 /*
989 * Check link status. Wait up to 100 microseconds for link to become
990 * valid.
991 */
992 ret_val = igb_phy_has_link(hw,
993 COPPER_LINK_UP_LIMIT,
994 10,
995 &link);
996 if (ret_val)
997 goto out;
998
999 if (link) {
1000 hw_dbg("Valid link established!!!\n");
1001 igb_config_collision_dist(hw);
1002 ret_val = igb_config_fc_after_link_up(hw);
1003 } else {
1004 hw_dbg("Unable to establish link!!!\n");
1005 }
1006
1007out:
1008 return ret_val;
1009}
1010
1011/**
799 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY 1012 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
800 * @hw: pointer to the HW structure 1013 * @hw: pointer to the HW structure
801 * 1014 *
@@ -903,22 +1116,19 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
903 1116
904 igb_phy_force_speed_duplex_setup(hw, &phy_data); 1117 igb_phy_force_speed_duplex_setup(hw, &phy_data);
905 1118
906 /* Reset the phy to commit changes. */
907 phy_data |= MII_CR_RESET;
908
909 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 1119 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
910 if (ret_val) 1120 if (ret_val)
911 goto out; 1121 goto out;
912 1122
913 udelay(1); 1123 /* Reset the phy to commit changes. */
1124 ret_val = igb_phy_sw_reset(hw);
1125 if (ret_val)
1126 goto out;
914 1127
915 if (phy->autoneg_wait_to_complete) { 1128 if (phy->autoneg_wait_to_complete) {
916 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); 1129 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
917 1130
918 ret_val = igb_phy_has_link(hw, 1131 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
919 PHY_FORCE_LIMIT,
920 100000,
921 &link);
922 if (ret_val) 1132 if (ret_val)
923 goto out; 1133 goto out;
924 1134
@@ -928,8 +1138,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
928 * Reset the DSP and cross our fingers. 1138 * Reset the DSP and cross our fingers.
929 */ 1139 */
930 ret_val = phy->ops.write_reg(hw, 1140 ret_val = phy->ops.write_reg(hw,
931 M88E1000_PHY_PAGE_SELECT, 1141 M88E1000_PHY_PAGE_SELECT,
932 0x001d); 1142 0x001d);
933 if (ret_val) 1143 if (ret_val)
934 goto out; 1144 goto out;
935 ret_val = igb_phy_reset_dsp(hw); 1145 ret_val = igb_phy_reset_dsp(hw);
@@ -939,7 +1149,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
939 1149
940 /* Try once more */ 1150 /* Try once more */
941 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 1151 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
942 100000, &link); 1152 100000, &link);
943 if (ret_val) 1153 if (ret_val)
944 goto out; 1154 goto out;
945 } 1155 }
@@ -1051,9 +1261,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1051s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) 1261s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1052{ 1262{
1053 struct e1000_phy_info *phy = &hw->phy; 1263 struct e1000_phy_info *phy = &hw->phy;
1054 s32 ret_val; 1264 s32 ret_val = 0;
1055 u16 data; 1265 u16 data;
1056 1266
1267 if (!(hw->phy.ops.read_reg))
1268 goto out;
1269
1057 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 1270 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1058 if (ret_val) 1271 if (ret_val)
1059 goto out; 1272 goto out;
@@ -1288,8 +1501,14 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1288 * it across the board. 1501 * it across the board.
1289 */ 1502 */
1290 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1503 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1291 if (ret_val) 1504 if (ret_val) {
1292 break; 1505 /*
1506 * If the first read fails, another entity may have
1507 * ownership of the resources, wait and try again to
1508 * see if they have relinquished the resources yet.
1509 */
1510 udelay(usec_interval);
1511 }
1293 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1512 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1294 if (ret_val) 1513 if (ret_val)
1295 break; 1514 break;
@@ -1333,8 +1552,13 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1333 1552
1334 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 1553 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1335 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 1554 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1555 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1556 ret_val = -E1000_ERR_PHY;
1557 goto out;
1558 }
1559
1336 phy->min_cable_length = e1000_m88_cable_length_table[index]; 1560 phy->min_cable_length = e1000_m88_cable_length_table[index];
1337 phy->max_cable_length = e1000_m88_cable_length_table[index+1]; 1561 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1338 1562
1339 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; 1563 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1340 1564
@@ -1715,3 +1939,194 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
1715 return 0; 1939 return 0;
1716} 1940}
1717 1941
1942/**
1943 * igb_check_polarity_82580 - Checks the polarity.
1944 * @hw: pointer to the HW structure
1945 *
1946 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
1947 *
1948 * Polarity is determined based on the PHY specific status register.
1949 **/
1950static s32 igb_check_polarity_82580(struct e1000_hw *hw)
1951{
1952 struct e1000_phy_info *phy = &hw->phy;
1953 s32 ret_val;
1954 u16 data;
1955
1956
1957 ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
1958
1959 if (!ret_val)
1960 phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
1961 ? e1000_rev_polarity_reversed
1962 : e1000_rev_polarity_normal;
1963
1964 return ret_val;
1965}
1966
1967/**
1968 * igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
1969 * @hw: pointer to the HW structure
1970 *
1971 * Calls the PHY setup function to force speed and duplex. Clears the
1972 * auto-crossover to force MDI manually. Waits for link and returns
1973 * successful if link up is successful, else -E1000_ERR_PHY (-2).
1974 **/
1975s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
1976{
1977 struct e1000_phy_info *phy = &hw->phy;
1978 s32 ret_val;
1979 u16 phy_data;
1980 bool link;
1981
1982
1983 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
1984 if (ret_val)
1985 goto out;
1986
1987 igb_phy_force_speed_duplex_setup(hw, &phy_data);
1988
1989 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
1990 if (ret_val)
1991 goto out;
1992
1993 /*
1994 * Clear Auto-Crossover to force MDI manually. 82580 requires MDI
1995 * forced whenever speed and duplex are forced.
1996 */
1997 ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
1998 if (ret_val)
1999 goto out;
2000
2001 phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX;
2002 phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX;
2003
2004 ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
2005 if (ret_val)
2006 goto out;
2007
2008 hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
2009
2010 udelay(1);
2011
2012 if (phy->autoneg_wait_to_complete) {
2013 hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
2014
2015 ret_val = igb_phy_has_link(hw,
2016 PHY_FORCE_LIMIT,
2017 100000,
2018 &link);
2019 if (ret_val)
2020 goto out;
2021
2022 if (!link)
2023 hw_dbg("Link taking longer than expected.\n");
2024
2025 /* Try once more */
2026 ret_val = igb_phy_has_link(hw,
2027 PHY_FORCE_LIMIT,
2028 100000,
2029 &link);
2030 if (ret_val)
2031 goto out;
2032 }
2033
2034out:
2035 return ret_val;
2036}
2037
2038/**
2039 * igb_get_phy_info_82580 - Retrieve I82580 PHY information
2040 * @hw: pointer to the HW structure
2041 *
2042 * Read PHY status to determine if link is up. If link is up, then
2043 * set/determine 10base-T extended distance and polarity correction. Read
2044 * PHY port status to determine MDI/MDIx and speed. Based on the speed,
2045 * determine on the cable length, local and remote receiver.
2046 **/
2047s32 igb_get_phy_info_82580(struct e1000_hw *hw)
2048{
2049 struct e1000_phy_info *phy = &hw->phy;
2050 s32 ret_val;
2051 u16 data;
2052 bool link;
2053
2054
2055 ret_val = igb_phy_has_link(hw, 1, 0, &link);
2056 if (ret_val)
2057 goto out;
2058
2059 if (!link) {
2060 hw_dbg("Phy info is only valid if link is up\n");
2061 ret_val = -E1000_ERR_CONFIG;
2062 goto out;
2063 }
2064
2065 phy->polarity_correction = true;
2066
2067 ret_val = igb_check_polarity_82580(hw);
2068 if (ret_val)
2069 goto out;
2070
2071 ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
2072 if (ret_val)
2073 goto out;
2074
2075 phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
2076
2077 if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
2078 I82580_PHY_STATUS2_SPEED_1000MBPS) {
2079 ret_val = hw->phy.ops.get_cable_length(hw);
2080 if (ret_val)
2081 goto out;
2082
2083 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
2084 if (ret_val)
2085 goto out;
2086
2087 phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
2088 ? e1000_1000t_rx_status_ok
2089 : e1000_1000t_rx_status_not_ok;
2090
2091 phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
2092 ? e1000_1000t_rx_status_ok
2093 : e1000_1000t_rx_status_not_ok;
2094 } else {
2095 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
2096 phy->local_rx = e1000_1000t_rx_status_undefined;
2097 phy->remote_rx = e1000_1000t_rx_status_undefined;
2098 }
2099
2100out:
2101 return ret_val;
2102}
2103
2104/**
2105 * igb_get_cable_length_82580 - Determine cable length for 82580 PHY
2106 * @hw: pointer to the HW structure
2107 *
2108 * Reads the diagnostic status register and verifies result is valid before
2109 * placing it in the phy_cable_length field.
2110 **/
2111s32 igb_get_cable_length_82580(struct e1000_hw *hw)
2112{
2113 struct e1000_phy_info *phy = &hw->phy;
2114 s32 ret_val;
2115 u16 phy_data, length;
2116
2117
2118 ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
2119 if (ret_val)
2120 goto out;
2121
2122 length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
2123 I82580_DSTATUS_CABLE_LENGTH_SHIFT;
2124
2125 if (length == E1000_CABLE_LENGTH_UNDEFINED)
2126 ret_val = -E1000_ERR_PHY;
2127
2128 phy->cable_length = length;
2129
2130out:
2131 return ret_val;
2132}
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index ebe4b616db8a..555eb54bb6ed 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -43,7 +43,6 @@ enum e1000_smart_speed {
43 43
44s32 igb_check_downshift(struct e1000_hw *hw); 44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_autoneg(struct e1000_hw *hw);
47s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 48s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
@@ -57,10 +56,19 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw);
57s32 igb_phy_hw_reset(struct e1000_hw *hw); 56s32 igb_phy_hw_reset(struct e1000_hw *hw);
58s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 57s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
59s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); 58s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
59s32 igb_setup_copper_link(struct e1000_hw *hw);
60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, 61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
62 u32 usec_interval, bool *success); 62 u32 usec_interval, bool *success);
63s32 igb_phy_init_script_igp3(struct e1000_hw *hw); 63s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
64s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
65s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
66s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
67s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
68s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
69s32 igb_get_phy_info_82580(struct e1000_hw *hw);
70s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
71s32 igb_get_cable_length_82580(struct e1000_hw *hw);
64 72
65/* IGP01E1000 Specific Registers */ 73/* IGP01E1000 Specific Registers */
66#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ 74#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
@@ -75,6 +83,33 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
75#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ 83#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */
76#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 84#define IGP01E1000_PSCFR_SMART_SPEED 0x0080
77 85
86#define I82580_ADDR_REG 16
87#define I82580_CFG_REG 22
88#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
89#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
90#define I82580_CTRL_REG 23
91#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
92
93/* 82580 specific PHY registers */
94#define I82580_PHY_CTRL_2 18
95#define I82580_PHY_LBK_CTRL 19
96#define I82580_PHY_STATUS_2 26
97#define I82580_PHY_DIAG_STATUS 31
98
99/* I82580 PHY Status 2 */
100#define I82580_PHY_STATUS2_REV_POLARITY 0x0400
101#define I82580_PHY_STATUS2_MDIX 0x0800
102#define I82580_PHY_STATUS2_SPEED_MASK 0x0300
103#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
104#define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100
105
106/* I82580 PHY Control 2 */
107#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400
108#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200
109
110/* I82580 PHY Diagnostics Status */
111#define I82580_DSTATUS_CABLE_LENGTH 0x03FC
112#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
78/* Enable flexible speed on link-up */ 113/* Enable flexible speed on link-up */
79#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ 114#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
80#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ 115#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 345d1442d6d6..dd4e6ffd29f5 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -34,6 +34,7 @@
34#define E1000_EERD 0x00014 /* EEPROM Read - RW */ 34#define E1000_EERD 0x00014 /* EEPROM Read - RW */
35#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 35#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
36#define E1000_MDIC 0x00020 /* MDI Control - RW */ 36#define E1000_MDIC 0x00020 /* MDI Control - RW */
37#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */
37#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 38#define E1000_SCTL 0x00024 /* SerDes Control - RW */
38#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 39#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
39#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 40#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
@@ -76,59 +77,20 @@
76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ 77#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
77 78
78/* IEEE 1588 TIMESYNCH */ 79/* IEEE 1588 TIMESYNCH */
79#define E1000_TSYNCTXCTL 0x0B614 80#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
80#define E1000_TSYNCTXCTL_VALID (1<<0) 81#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
81#define E1000_TSYNCTXCTL_ENABLED (1<<4) 82#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
82#define E1000_TSYNCRXCTL 0x0B620 83#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
83#define E1000_TSYNCRXCTL_VALID (1<<0) 84#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
84#define E1000_TSYNCRXCTL_ENABLED (1<<4) 85#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
85enum { 86#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
86 E1000_TSYNCRXCTL_TYPE_L2_V2 = 0, 87#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
87 E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1), 88#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
88 E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2), 89#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
89 E1000_TSYNCRXCTL_TYPE_ALL = (1<<3), 90#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
90 E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1), 91#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
91}; 92#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
92#define E1000_TSYNCRXCFG 0x05F50 93#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
93enum {
94 E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
95 E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
96 E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
97 E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
98 E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
99
100 E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
101 E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
102 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
103 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
104 E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
105 E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
106 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
107 E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
108 E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
109 E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
110};
111#define E1000_SYSTIML 0x0B600
112#define E1000_SYSTIMH 0x0B604
113#define E1000_TIMINCA 0x0B608
114
115#define E1000_RXMTRL 0x0B634
116#define E1000_RXSTMPL 0x0B624
117#define E1000_RXSTMPH 0x0B628
118#define E1000_RXSATRL 0x0B62C
119#define E1000_RXSATRH 0x0B630
120
121#define E1000_TXSTMPL 0x0B618
122#define E1000_TXSTMPH 0x0B61C
123
124#define E1000_ETQF0 0x05CB0
125#define E1000_ETQF1 0x05CB4
126#define E1000_ETQF2 0x05CB8
127#define E1000_ETQF3 0x05CBC
128#define E1000_ETQF4 0x05CC0
129#define E1000_ETQF5 0x05CC4
130#define E1000_ETQF6 0x05CC8
131#define E1000_ETQF7 0x05CCC
132 94
133/* Filtering Registers */ 95/* Filtering Registers */
134#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) 96#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -143,7 +105,9 @@ enum {
143#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ 105#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
144 106
145#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 107#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
108
146/* Split and Replication RX Control - RW */ 109/* Split and Replication RX Control - RW */
110#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
147/* 111/*
148 * Convenience macros 112 * Convenience macros
149 * 113 *
@@ -288,10 +252,17 @@ enum {
288#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 252#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
289#define E1000_RA 0x05400 /* Receive Address - RW Array */ 253#define E1000_RA 0x05400 /* Receive Address - RW Array */
290#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 254#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
255#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
291#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 256#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
292 (0x054E0 + ((_i - 16) * 8))) 257 (0x054E0 + ((_i - 16) * 8)))
293#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 258#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
294 (0x054E4 + ((_i - 16) * 8))) 259 (0x054E4 + ((_i - 16) * 8)))
260#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
261#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
262#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
263#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
264#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
265#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
295#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 266#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
296#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ 267#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
297#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 268#define E1000_WUC 0x05800 /* Wakeup Control - RW */
@@ -331,6 +302,7 @@ enum {
331#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ 302#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
332#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ 303#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
333#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ 304#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
305#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
334#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ 306#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
335/* These act per VF so an array friendly macro is used */ 307/* These act per VF so an array friendly macro is used */
336#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) 308#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
@@ -348,4 +320,6 @@ enum {
348#define array_rd32(reg, offset) \ 320#define array_rd32(reg, offset) \
349 (readl(hw->hw_addr + reg + ((offset) << 2))) 321 (readl(hw->hw_addr + reg + ((offset) << 2)))
350 322
323/* DMA Coalescing registers */
324#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */
351#endif 325#endif
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7126fea26fec..c458d9b188ba 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -55,12 +55,14 @@ struct igb_adapter;
55#define IGB_DEFAULT_ITR 3 /* dynamic */ 55#define IGB_DEFAULT_ITR 3 /* dynamic */
56#define IGB_MAX_ITR_USECS 10000 56#define IGB_MAX_ITR_USECS 10000
57#define IGB_MIN_ITR_USECS 10 57#define IGB_MIN_ITR_USECS 10
58#define NON_Q_VECTORS 1
59#define MAX_Q_VECTORS 8
58 60
59/* Transmit and receive queues */ 61/* Transmit and receive queues */
60#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ 62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
61 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) 63 (hw->mac.type > e1000_82575 ? 8 : 4))
62#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES 64#define IGB_ABS_MAX_TX_QUEUES 8
63#define IGB_ABS_MAX_TX_QUEUES 4 65#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
64 66
65#define IGB_MAX_VF_MC_ENTRIES 30 67#define IGB_MAX_VF_MC_ENTRIES 30
66#define IGB_MAX_VF_FUNCTIONS 8 68#define IGB_MAX_VF_FUNCTIONS 8
@@ -71,9 +73,14 @@ struct vf_data_storage {
71 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 73 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
72 u16 num_vf_mc_hashes; 74 u16 num_vf_mc_hashes;
73 u16 vlans_enabled; 75 u16 vlans_enabled;
74 bool clear_to_send; 76 u32 flags;
77 unsigned long last_nack;
75}; 78};
76 79
80#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
81#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
82#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
83
77/* RX descriptor control thresholds. 84/* RX descriptor control thresholds.
78 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 85 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
79 * descriptors available in its onboard memory. 86 * descriptors available in its onboard memory.
@@ -85,17 +92,19 @@ struct vf_data_storage {
85 * descriptors until either it has this many to write back, or the 92 * descriptors until either it has this many to write back, or the
86 * ITR timer expires. 93 * ITR timer expires.
87 */ 94 */
88#define IGB_RX_PTHRESH 16 95#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
89#define IGB_RX_HTHRESH 8 96#define IGB_RX_HTHRESH 8
90#define IGB_RX_WTHRESH 1 97#define IGB_RX_WTHRESH 1
98#define IGB_TX_PTHRESH 8
99#define IGB_TX_HTHRESH 1
100#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
101 adapter->msix_entries) ? 0 : 16)
91 102
92/* this is the size past which hardware will drop packets when setting LPE=0 */ 103/* this is the size past which hardware will drop packets when setting LPE=0 */
93#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 104#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
94 105
95/* Supported Rx Buffer Sizes */ 106/* Supported Rx Buffer Sizes */
96#define IGB_RXBUFFER_128 128 /* Used for packet split */ 107#define IGB_RXBUFFER_128 128 /* Used for packet split */
97#define IGB_RXBUFFER_256 256 /* Used for packet split */
98#define IGB_RXBUFFER_512 512
99#define IGB_RXBUFFER_1024 1024 108#define IGB_RXBUFFER_1024 1024
100#define IGB_RXBUFFER_2048 2048 109#define IGB_RXBUFFER_2048 2048
101#define IGB_RXBUFFER_16384 16384 110#define IGB_RXBUFFER_16384 16384
@@ -141,36 +150,55 @@ struct igb_buffer {
141struct igb_tx_queue_stats { 150struct igb_tx_queue_stats {
142 u64 packets; 151 u64 packets;
143 u64 bytes; 152 u64 bytes;
153 u64 restart_queue;
144}; 154};
145 155
146struct igb_rx_queue_stats { 156struct igb_rx_queue_stats {
147 u64 packets; 157 u64 packets;
148 u64 bytes; 158 u64 bytes;
149 u64 drops; 159 u64 drops;
160 u64 csum_err;
161 u64 alloc_failed;
150}; 162};
151 163
152struct igb_ring { 164struct igb_q_vector {
153 struct igb_adapter *adapter; /* backlink */ 165 struct igb_adapter *adapter; /* backlink */
154 void *desc; /* descriptor ring memory */ 166 struct igb_ring *rx_ring;
155 dma_addr_t dma; /* phys address of the ring */ 167 struct igb_ring *tx_ring;
156 unsigned int size; /* length of desc. ring in bytes */ 168 struct napi_struct napi;
157 unsigned int count; /* number of desc. in the ring */
158 u16 next_to_use;
159 u16 next_to_clean;
160 u16 head;
161 u16 tail;
162 struct igb_buffer *buffer_info; /* array of buffer info structs */
163 169
164 u32 eims_value; 170 u32 eims_value;
165 u32 itr_val;
166 u16 itr_register;
167 u16 cpu; 171 u16 cpu;
168 172
169 u16 queue_index; 173 u16 itr_val;
170 u16 reg_idx; 174 u8 set_itr;
175 u8 itr_shift;
176 void __iomem *itr_register;
177
178 char name[IFNAMSIZ + 9];
179};
180
181struct igb_ring {
182 struct igb_q_vector *q_vector; /* backlink to q_vector */
183 struct net_device *netdev; /* back pointer to net_device */
184 struct pci_dev *pdev; /* pci device for dma mapping */
185 dma_addr_t dma; /* phys address of the ring */
186 void *desc; /* descriptor ring memory */
187 unsigned int size; /* length of desc. ring in bytes */
188 u16 count; /* number of desc. in the ring */
189 u16 next_to_use;
190 u16 next_to_clean;
191 u8 queue_index;
192 u8 reg_idx;
193 void __iomem *head;
194 void __iomem *tail;
195 struct igb_buffer *buffer_info; /* array of buffer info structs */
196
171 unsigned int total_bytes; 197 unsigned int total_bytes;
172 unsigned int total_packets; 198 unsigned int total_packets;
173 199
200 u32 flags;
201
174 union { 202 union {
175 /* TX */ 203 /* TX */
176 struct { 204 struct {
@@ -180,16 +208,18 @@ struct igb_ring {
180 /* RX */ 208 /* RX */
181 struct { 209 struct {
182 struct igb_rx_queue_stats rx_stats; 210 struct igb_rx_queue_stats rx_stats;
183 u64 rx_queue_drops; 211 u32 rx_buffer_len;
184 struct napi_struct napi;
185 int set_itr;
186 struct igb_ring *buddy;
187 }; 212 };
188 }; 213 };
189
190 char name[IFNAMSIZ + 5];
191}; 214};
192 215
216#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
217#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
218
219#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
220
221#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
222
193#define E1000_RX_DESC_ADV(R, i) \ 223#define E1000_RX_DESC_ADV(R, i) \
194 (&(((union e1000_adv_rx_desc *)((R).desc))[i])) 224 (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
195#define E1000_TX_DESC_ADV(R, i) \ 225#define E1000_TX_DESC_ADV(R, i) \
@@ -197,6 +227,15 @@ struct igb_ring {
197#define E1000_TX_CTXTDESC_ADV(R, i) \ 227#define E1000_TX_CTXTDESC_ADV(R, i) \
198 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 228 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
199 229
230/* igb_desc_unused - calculate if we have unused descriptors */
231static inline int igb_desc_unused(struct igb_ring *ring)
232{
233 if (ring->next_to_clean > ring->next_to_use)
234 return ring->next_to_clean - ring->next_to_use - 1;
235
236 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
237}
238
200/* board specific private data structure */ 239/* board specific private data structure */
201 240
202struct igb_adapter { 241struct igb_adapter {
@@ -205,18 +244,14 @@ struct igb_adapter {
205 struct vlan_group *vlgrp; 244 struct vlan_group *vlgrp;
206 u16 mng_vlan_id; 245 u16 mng_vlan_id;
207 u32 bd_number; 246 u32 bd_number;
208 u32 rx_buffer_len;
209 u32 wol; 247 u32 wol;
210 u32 en_mng_pt; 248 u32 en_mng_pt;
211 u16 link_speed; 249 u16 link_speed;
212 u16 link_duplex; 250 u16 link_duplex;
213 unsigned int total_tx_bytes; 251
214 unsigned int total_tx_packets;
215 unsigned int total_rx_bytes;
216 unsigned int total_rx_packets;
217 /* Interrupt Throttle Rate */ 252 /* Interrupt Throttle Rate */
218 u32 itr; 253 u32 rx_itr_setting;
219 u32 itr_setting; 254 u32 tx_itr_setting;
220 u16 tx_itr; 255 u16 tx_itr;
221 u16 rx_itr; 256 u16 rx_itr;
222 257
@@ -229,13 +264,7 @@ struct igb_adapter {
229 264
230 /* TX */ 265 /* TX */
231 struct igb_ring *tx_ring; /* One per active queue */ 266 struct igb_ring *tx_ring; /* One per active queue */
232 unsigned int restart_queue;
233 unsigned long tx_queue_len; 267 unsigned long tx_queue_len;
234 u32 txd_cmd;
235 u32 gotc;
236 u64 gotc_old;
237 u64 tpt_old;
238 u64 colc_old;
239 u32 tx_timeout_count; 268 u32 tx_timeout_count;
240 269
241 /* RX */ 270 /* RX */
@@ -243,20 +272,12 @@ struct igb_adapter {
243 int num_tx_queues; 272 int num_tx_queues;
244 int num_rx_queues; 273 int num_rx_queues;
245 274
246 u64 hw_csum_err;
247 u64 hw_csum_good;
248 u32 alloc_rx_buff_failed;
249 u32 gorc;
250 u64 gorc_old;
251 u16 rx_ps_hdr_size;
252 u32 max_frame_size; 275 u32 max_frame_size;
253 u32 min_frame_size; 276 u32 min_frame_size;
254 277
255 /* OS defined structs */ 278 /* OS defined structs */
256 struct net_device *netdev; 279 struct net_device *netdev;
257 struct napi_struct napi;
258 struct pci_dev *pdev; 280 struct pci_dev *pdev;
259 struct net_device_stats net_stats;
260 struct cyclecounter cycles; 281 struct cyclecounter cycles;
261 struct timecounter clock; 282 struct timecounter clock;
262 struct timecompare compare; 283 struct timecompare compare;
@@ -273,6 +294,9 @@ struct igb_adapter {
273 struct igb_ring test_rx_ring; 294 struct igb_ring test_rx_ring;
274 295
275 int msg_enable; 296 int msg_enable;
297
298 unsigned int num_q_vectors;
299 struct igb_q_vector *q_vector[MAX_Q_VECTORS];
276 struct msix_entry *msix_entries; 300 struct msix_entry *msix_entries;
277 u32 eims_enable_mask; 301 u32 eims_enable_mask;
278 u32 eims_other; 302 u32 eims_other;
@@ -283,18 +307,20 @@ struct igb_adapter {
283 u32 eeprom_wol; 307 u32 eeprom_wol;
284 308
285 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; 309 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
286 unsigned int tx_ring_count; 310 u16 tx_ring_count;
287 unsigned int rx_ring_count; 311 u16 rx_ring_count;
288 unsigned int vfs_allocated_count; 312 unsigned int vfs_allocated_count;
289 struct vf_data_storage *vf_data; 313 struct vf_data_storage *vf_data;
314 u32 rss_queues;
290}; 315};
291 316
292#define IGB_FLAG_HAS_MSI (1 << 0) 317#define IGB_FLAG_HAS_MSI (1 << 0)
293#define IGB_FLAG_DCA_ENABLED (1 << 1) 318#define IGB_FLAG_DCA_ENABLED (1 << 1)
294#define IGB_FLAG_QUAD_PORT_A (1 << 2) 319#define IGB_FLAG_QUAD_PORT_A (1 << 2)
295#define IGB_FLAG_NEED_CTX_IDX (1 << 3) 320#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
296#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4)
297 321
322#define IGB_82576_TSYNC_SHIFT 19
323#define IGB_82580_TSYNC_SHIFT 24
298enum e1000_state_t { 324enum e1000_state_t {
299 __IGB_TESTING, 325 __IGB_TESTING,
300 __IGB_RESETTING, 326 __IGB_RESETTING,
@@ -314,10 +340,18 @@ extern void igb_down(struct igb_adapter *);
314extern void igb_reinit_locked(struct igb_adapter *); 340extern void igb_reinit_locked(struct igb_adapter *);
315extern void igb_reset(struct igb_adapter *); 341extern void igb_reset(struct igb_adapter *);
316extern int igb_set_spd_dplx(struct igb_adapter *, u16); 342extern int igb_set_spd_dplx(struct igb_adapter *, u16);
317extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); 343extern int igb_setup_tx_resources(struct igb_ring *);
318extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); 344extern int igb_setup_rx_resources(struct igb_ring *);
319extern void igb_free_tx_resources(struct igb_ring *); 345extern void igb_free_tx_resources(struct igb_ring *);
320extern void igb_free_rx_resources(struct igb_ring *); 346extern void igb_free_rx_resources(struct igb_ring *);
347extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
348extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
349extern void igb_setup_tctl(struct igb_adapter *);
350extern void igb_setup_rctl(struct igb_adapter *);
351extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
352extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
353 struct igb_buffer *);
354extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
321extern void igb_update_stats(struct igb_adapter *); 355extern void igb_update_stats(struct igb_adapter *);
322extern void igb_set_ethtool_ops(struct net_device *); 356extern void igb_set_ethtool_ops(struct net_device *);
323 357
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index b243ed3b0c36..ac9d5272650d 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -44,78 +44,94 @@ struct igb_stats {
44 int stat_offset; 44 int stat_offset;
45}; 45};
46 46
47#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ 47#define IGB_STAT(_name, _stat) { \
48 offsetof(struct igb_adapter, m) 48 .stat_string = _name, \
49 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
50 .stat_offset = offsetof(struct igb_adapter, _stat) \
51}
49static const struct igb_stats igb_gstrings_stats[] = { 52static const struct igb_stats igb_gstrings_stats[] = {
50 { "rx_packets", IGB_STAT(stats.gprc) }, 53 IGB_STAT("rx_packets", stats.gprc),
51 { "tx_packets", IGB_STAT(stats.gptc) }, 54 IGB_STAT("tx_packets", stats.gptc),
52 { "rx_bytes", IGB_STAT(stats.gorc) }, 55 IGB_STAT("rx_bytes", stats.gorc),
53 { "tx_bytes", IGB_STAT(stats.gotc) }, 56 IGB_STAT("tx_bytes", stats.gotc),
54 { "rx_broadcast", IGB_STAT(stats.bprc) }, 57 IGB_STAT("rx_broadcast", stats.bprc),
55 { "tx_broadcast", IGB_STAT(stats.bptc) }, 58 IGB_STAT("tx_broadcast", stats.bptc),
56 { "rx_multicast", IGB_STAT(stats.mprc) }, 59 IGB_STAT("rx_multicast", stats.mprc),
57 { "tx_multicast", IGB_STAT(stats.mptc) }, 60 IGB_STAT("tx_multicast", stats.mptc),
58 { "rx_errors", IGB_STAT(net_stats.rx_errors) }, 61 IGB_STAT("multicast", stats.mprc),
59 { "tx_errors", IGB_STAT(net_stats.tx_errors) }, 62 IGB_STAT("collisions", stats.colc),
60 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, 63 IGB_STAT("rx_crc_errors", stats.crcerrs),
61 { "multicast", IGB_STAT(stats.mprc) }, 64 IGB_STAT("rx_no_buffer_count", stats.rnbc),
62 { "collisions", IGB_STAT(stats.colc) }, 65 IGB_STAT("rx_missed_errors", stats.mpc),
63 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, 66 IGB_STAT("tx_aborted_errors", stats.ecol),
64 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, 67 IGB_STAT("tx_carrier_errors", stats.tncrs),
65 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 68 IGB_STAT("tx_window_errors", stats.latecol),
66 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, 69 IGB_STAT("tx_abort_late_coll", stats.latecol),
67 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 70 IGB_STAT("tx_deferred_ok", stats.dc),
68 { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, 71 IGB_STAT("tx_single_coll_ok", stats.scc),
69 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 72 IGB_STAT("tx_multi_coll_ok", stats.mcc),
70 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 73 IGB_STAT("tx_timeout_count", tx_timeout_count),
71 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 74 IGB_STAT("rx_long_length_errors", stats.roc),
72 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, 75 IGB_STAT("rx_short_length_errors", stats.ruc),
73 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, 76 IGB_STAT("rx_align_errors", stats.algnerrc),
74 { "tx_window_errors", IGB_STAT(stats.latecol) }, 77 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
75 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 78 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
76 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 79 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
77 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 80 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
78 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 81 IGB_STAT("tx_flow_control_xon", stats.xontxc),
79 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 82 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
80 { "tx_restart_queue", IGB_STAT(restart_queue) }, 83 IGB_STAT("rx_long_byte_count", stats.gorc),
81 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 84 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
82 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 85 IGB_STAT("tx_smbus", stats.mgptc),
83 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 86 IGB_STAT("rx_smbus", stats.mgprc),
84 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 87 IGB_STAT("dropped_smbus", stats.mgpdc),
85 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 88};
86 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 89
87 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 90#define IGB_NETDEV_STAT(_net_stat) { \
88 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 91 .stat_string = __stringify(_net_stat), \
89 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 92 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
90 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 93 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
91 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 94}
92 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 95static const struct igb_stats igb_gstrings_net_stats[] = {
93 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 96 IGB_NETDEV_STAT(rx_errors),
94 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 97 IGB_NETDEV_STAT(tx_errors),
95 { "tx_smbus", IGB_STAT(stats.mgptc) }, 98 IGB_NETDEV_STAT(tx_dropped),
96 { "rx_smbus", IGB_STAT(stats.mgprc) }, 99 IGB_NETDEV_STAT(rx_length_errors),
97 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 100 IGB_NETDEV_STAT(rx_over_errors),
101 IGB_NETDEV_STAT(rx_frame_errors),
102 IGB_NETDEV_STAT(rx_fifo_errors),
103 IGB_NETDEV_STAT(tx_fifo_errors),
104 IGB_NETDEV_STAT(tx_heartbeat_errors)
98}; 105};
99 106
100#define IGB_QUEUE_STATS_LEN \
101 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
102 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
103 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
104 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
105#define IGB_GLOBAL_STATS_LEN \ 107#define IGB_GLOBAL_STATS_LEN \
106 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 108 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
107#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 109#define IGB_NETDEV_STATS_LEN \
110 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
111#define IGB_RX_QUEUE_STATS_LEN \
112 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
113#define IGB_TX_QUEUE_STATS_LEN \
114 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
115#define IGB_QUEUE_STATS_LEN \
116 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
117 IGB_RX_QUEUE_STATS_LEN) + \
118 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
119 IGB_TX_QUEUE_STATS_LEN))
120#define IGB_STATS_LEN \
121 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
122
108static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 123static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
109 "Register test (offline)", "Eeprom test (offline)", 124 "Register test (offline)", "Eeprom test (offline)",
110 "Interrupt test (offline)", "Loopback test (offline)", 125 "Interrupt test (offline)", "Loopback test (offline)",
111 "Link test (on/offline)" 126 "Link test (on/offline)"
112}; 127};
113#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 128#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
114 129
115static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 130static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
116{ 131{
117 struct igb_adapter *adapter = netdev_priv(netdev); 132 struct igb_adapter *adapter = netdev_priv(netdev);
118 struct e1000_hw *hw = &adapter->hw; 133 struct e1000_hw *hw = &adapter->hw;
134 u32 status;
119 135
120 if (hw->phy.media_type == e1000_media_type_copper) { 136 if (hw->phy.media_type == e1000_media_type_copper) {
121 137
@@ -150,17 +166,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
150 166
151 ecmd->transceiver = XCVR_INTERNAL; 167 ecmd->transceiver = XCVR_INTERNAL;
152 168
153 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 169 status = rd32(E1000_STATUS);
154 170
155 adapter->hw.mac.ops.get_speed_and_duplex(hw, 171 if (status & E1000_STATUS_LU) {
156 &adapter->link_speed,
157 &adapter->link_duplex);
158 ecmd->speed = adapter->link_speed;
159 172
160 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 173 if ((status & E1000_STATUS_SPEED_1000) ||
161 * and HALF_DUPLEX != DUPLEX_HALF */ 174 hw->phy.media_type != e1000_media_type_copper)
175 ecmd->speed = SPEED_1000;
176 else if (status & E1000_STATUS_SPEED_100)
177 ecmd->speed = SPEED_100;
178 else
179 ecmd->speed = SPEED_10;
162 180
163 if (adapter->link_duplex == FULL_DUPLEX) 181 if ((status & E1000_STATUS_FD) ||
182 hw->phy.media_type != e1000_media_type_copper)
164 ecmd->duplex = DUPLEX_FULL; 183 ecmd->duplex = DUPLEX_FULL;
165 else 184 else
166 ecmd->duplex = DUPLEX_HALF; 185 ecmd->duplex = DUPLEX_HALF;
@@ -251,8 +270,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
251 if (netif_running(adapter->netdev)) { 270 if (netif_running(adapter->netdev)) {
252 igb_down(adapter); 271 igb_down(adapter);
253 igb_up(adapter); 272 igb_up(adapter);
254 } else 273 } else {
255 igb_reset(adapter); 274 igb_reset(adapter);
275 }
256 } else { 276 } else {
257 if (pause->rx_pause && pause->tx_pause) 277 if (pause->rx_pause && pause->tx_pause)
258 hw->fc.requested_mode = e1000_fc_full; 278 hw->fc.requested_mode = e1000_fc_full;
@@ -276,17 +296,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
276static u32 igb_get_rx_csum(struct net_device *netdev) 296static u32 igb_get_rx_csum(struct net_device *netdev)
277{ 297{
278 struct igb_adapter *adapter = netdev_priv(netdev); 298 struct igb_adapter *adapter = netdev_priv(netdev);
279 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); 299 return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
280} 300}
281 301
282static int igb_set_rx_csum(struct net_device *netdev, u32 data) 302static int igb_set_rx_csum(struct net_device *netdev, u32 data)
283{ 303{
284 struct igb_adapter *adapter = netdev_priv(netdev); 304 struct igb_adapter *adapter = netdev_priv(netdev);
305 int i;
285 306
286 if (data) 307 for (i = 0; i < adapter->num_rx_queues; i++) {
287 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; 308 if (data)
288 else 309 adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
289 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; 310 else
311 adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
312 }
290 313
291 return 0; 314 return 0;
292} 315}
@@ -302,7 +325,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data)
302 325
303 if (data) { 326 if (data) {
304 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 327 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
305 if (adapter->hw.mac.type == e1000_82576) 328 if (adapter->hw.mac.type >= e1000_82576)
306 netdev->features |= NETIF_F_SCTP_CSUM; 329 netdev->features |= NETIF_F_SCTP_CSUM;
307 } else { 330 } else {
308 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 331 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -496,19 +519,10 @@ static void igb_get_regs(struct net_device *netdev,
496 regs_buff[119] = adapter->stats.scvpc; 519 regs_buff[119] = adapter->stats.scvpc;
497 regs_buff[120] = adapter->stats.hrmpc; 520 regs_buff[120] = adapter->stats.hrmpc;
498 521
499 /* These should probably be added to e1000_regs.h instead */
500 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
501 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
502 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
503 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
504 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
505 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
506 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
507
508 for (i = 0; i < 4; i++) 522 for (i = 0; i < 4; i++)
509 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 523 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
510 for (i = 0; i < 4; i++) 524 for (i = 0; i < 4; i++)
511 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 525 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
512 for (i = 0; i < 4; i++) 526 for (i = 0; i < 4; i++)
513 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 527 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
514 for (i = 0; i < 4; i++) 528 for (i = 0; i < 4; i++)
@@ -733,17 +747,17 @@ static int igb_set_ringparam(struct net_device *netdev,
733 struct igb_adapter *adapter = netdev_priv(netdev); 747 struct igb_adapter *adapter = netdev_priv(netdev);
734 struct igb_ring *temp_ring; 748 struct igb_ring *temp_ring;
735 int i, err = 0; 749 int i, err = 0;
736 u32 new_rx_count, new_tx_count; 750 u16 new_rx_count, new_tx_count;
737 751
738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 752 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
739 return -EINVAL; 753 return -EINVAL;
740 754
741 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 755 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
742 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 756 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
743 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 757 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
744 758
745 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 759 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
746 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 760 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
747 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 761 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
748 762
749 if ((new_tx_count == adapter->tx_ring_count) && 763 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -788,7 +802,7 @@ static int igb_set_ringparam(struct net_device *netdev,
788 802
789 for (i = 0; i < adapter->num_tx_queues; i++) { 803 for (i = 0; i < adapter->num_tx_queues; i++) {
790 temp_ring[i].count = new_tx_count; 804 temp_ring[i].count = new_tx_count;
791 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 805 err = igb_setup_tx_resources(&temp_ring[i]);
792 if (err) { 806 if (err) {
793 while (i) { 807 while (i) {
794 i--; 808 i--;
@@ -813,7 +827,7 @@ static int igb_set_ringparam(struct net_device *netdev,
813 827
814 for (i = 0; i < adapter->num_rx_queues; i++) { 828 for (i = 0; i < adapter->num_rx_queues; i++) {
815 temp_ring[i].count = new_rx_count; 829 temp_ring[i].count = new_rx_count;
816 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 830 err = igb_setup_rx_resources(&temp_ring[i]);
817 if (err) { 831 if (err) {
818 while (i) { 832 while (i) {
819 i--; 833 i--;
@@ -867,6 +881,49 @@ struct igb_reg_test {
867#define TABLE64_TEST_LO 5 881#define TABLE64_TEST_LO 5
868#define TABLE64_TEST_HI 6 882#define TABLE64_TEST_HI 6
869 883
884/* 82580 reg test */
885static struct igb_reg_test reg_test_82580[] = {
886 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
887 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
888 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
889 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
890 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
891 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
892 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
893 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
894 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
895 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
896 /* RDH is read-only for 82580, only test RDT. */
897 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
898 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
899 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
900 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
901 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
902 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
903 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
904 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
905 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
906 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
907 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
908 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
909 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
910 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
911 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
912 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
913 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
914 { E1000_RA, 0, 16, TABLE64_TEST_LO,
915 0xFFFFFFFF, 0xFFFFFFFF },
916 { E1000_RA, 0, 16, TABLE64_TEST_HI,
917 0x83FFFFFF, 0xFFFFFFFF },
918 { E1000_RA2, 0, 8, TABLE64_TEST_LO,
919 0xFFFFFFFF, 0xFFFFFFFF },
920 { E1000_RA2, 0, 8, TABLE64_TEST_HI,
921 0x83FFFFFF, 0xFFFFFFFF },
922 { E1000_MTA, 0, 128, TABLE32_TEST,
923 0xFFFFFFFF, 0xFFFFFFFF },
924 { 0, 0, 0, 0 }
925};
926
870/* 82576 reg test */ 927/* 82576 reg test */
871static struct igb_reg_test reg_test_82576[] = { 928static struct igb_reg_test reg_test_82576[] = {
872 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 929 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -944,7 +1001,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
944{ 1001{
945 struct e1000_hw *hw = &adapter->hw; 1002 struct e1000_hw *hw = &adapter->hw;
946 u32 pat, val; 1003 u32 pat, val;
947 u32 _test[] = 1004 static const u32 _test[] =
948 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1005 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
949 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1006 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
950 wr32(reg, (_test[pat] & write)); 1007 wr32(reg, (_test[pat] & write));
@@ -957,6 +1014,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
957 return 1; 1014 return 1;
958 } 1015 }
959 } 1016 }
1017
960 return 0; 1018 return 0;
961} 1019}
962 1020
@@ -974,6 +1032,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
974 *data = reg; 1032 *data = reg;
975 return 1; 1033 return 1;
976 } 1034 }
1035
977 return 0; 1036 return 0;
978} 1037}
979 1038
@@ -996,14 +1055,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
996 u32 value, before, after; 1055 u32 value, before, after;
997 u32 i, toggle; 1056 u32 i, toggle;
998 1057
999 toggle = 0x7FFFF3FF;
1000
1001 switch (adapter->hw.mac.type) { 1058 switch (adapter->hw.mac.type) {
1059 case e1000_82580:
1060 test = reg_test_82580;
1061 toggle = 0x7FEFF3FF;
1062 break;
1002 case e1000_82576: 1063 case e1000_82576:
1003 test = reg_test_82576; 1064 test = reg_test_82576;
1065 toggle = 0x7FFFF3FF;
1004 break; 1066 break;
1005 default: 1067 default:
1006 test = reg_test_82575; 1068 test = reg_test_82575;
1069 toggle = 0x7FFFF3FF;
1007 break; 1070 break;
1008 } 1071 }
1009 1072
@@ -1081,8 +1144,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1081 *data = 0; 1144 *data = 0;
1082 /* Read and add up the contents of the EEPROM */ 1145 /* Read and add up the contents of the EEPROM */
1083 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1146 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1084 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) 1147 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1085 < 0) {
1086 *data = 1; 1148 *data = 1;
1087 break; 1149 break;
1088 } 1150 }
@@ -1098,8 +1160,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1098 1160
1099static irqreturn_t igb_test_intr(int irq, void *data) 1161static irqreturn_t igb_test_intr(int irq, void *data)
1100{ 1162{
1101 struct net_device *netdev = (struct net_device *) data; 1163 struct igb_adapter *adapter = (struct igb_adapter *) data;
1102 struct igb_adapter *adapter = netdev_priv(netdev);
1103 struct e1000_hw *hw = &adapter->hw; 1164 struct e1000_hw *hw = &adapter->hw;
1104 1165
1105 adapter->test_icr |= rd32(E1000_ICR); 1166 adapter->test_icr |= rd32(E1000_ICR);
@@ -1117,38 +1178,45 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1117 *data = 0; 1178 *data = 0;
1118 1179
1119 /* Hook up test interrupt handler just for this test */ 1180 /* Hook up test interrupt handler just for this test */
1120 if (adapter->msix_entries) 1181 if (adapter->msix_entries) {
1121 /* NOTE: we don't test MSI-X interrupts here, yet */ 1182 if (request_irq(adapter->msix_entries[0].vector,
1122 return 0; 1183 igb_test_intr, 0, netdev->name, adapter)) {
1123 1184 *data = 1;
1124 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1185 return -1;
1186 }
1187 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1125 shared_int = false; 1188 shared_int = false;
1126 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1189 if (request_irq(irq,
1190 igb_test_intr, 0, netdev->name, adapter)) {
1127 *data = 1; 1191 *data = 1;
1128 return -1; 1192 return -1;
1129 } 1193 }
1130 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1194 } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1131 netdev->name, netdev)) { 1195 netdev->name, adapter)) {
1132 shared_int = false; 1196 shared_int = false;
1133 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1197 } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
1134 netdev->name, netdev)) { 1198 netdev->name, adapter)) {
1135 *data = 1; 1199 *data = 1;
1136 return -1; 1200 return -1;
1137 } 1201 }
1138 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1202 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1139 (shared_int ? "shared" : "unshared")); 1203 (shared_int ? "shared" : "unshared"));
1204
1140 /* Disable all the interrupts */ 1205 /* Disable all the interrupts */
1141 wr32(E1000_IMC, 0xFFFFFFFF); 1206 wr32(E1000_IMC, ~0);
1142 msleep(10); 1207 msleep(10);
1143 1208
1144 /* Define all writable bits for ICS */ 1209 /* Define all writable bits for ICS */
1145 switch(hw->mac.type) { 1210 switch (hw->mac.type) {
1146 case e1000_82575: 1211 case e1000_82575:
1147 ics_mask = 0x37F47EDD; 1212 ics_mask = 0x37F47EDD;
1148 break; 1213 break;
1149 case e1000_82576: 1214 case e1000_82576:
1150 ics_mask = 0x77D4FBFD; 1215 ics_mask = 0x77D4FBFD;
1151 break; 1216 break;
1217 case e1000_82580:
1218 ics_mask = 0x77DCFED5;
1219 break;
1152 default: 1220 default:
1153 ics_mask = 0x7FFFFFFF; 1221 ics_mask = 0x7FFFFFFF;
1154 break; 1222 break;
@@ -1232,190 +1300,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1232 msleep(10); 1300 msleep(10);
1233 1301
1234 /* Unhook test interrupt handler */ 1302 /* Unhook test interrupt handler */
1235 free_irq(irq, netdev); 1303 if (adapter->msix_entries)
1304 free_irq(adapter->msix_entries[0].vector, adapter);
1305 else
1306 free_irq(irq, adapter);
1236 1307
1237 return *data; 1308 return *data;
1238} 1309}
1239 1310
1240static void igb_free_desc_rings(struct igb_adapter *adapter) 1311static void igb_free_desc_rings(struct igb_adapter *adapter)
1241{ 1312{
1242 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1313 igb_free_tx_resources(&adapter->test_tx_ring);
1243 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1314 igb_free_rx_resources(&adapter->test_rx_ring);
1244 struct pci_dev *pdev = adapter->pdev;
1245 int i;
1246
1247 if (tx_ring->desc && tx_ring->buffer_info) {
1248 for (i = 0; i < tx_ring->count; i++) {
1249 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1250 if (buf->dma)
1251 pci_unmap_single(pdev, buf->dma, buf->length,
1252 PCI_DMA_TODEVICE);
1253 if (buf->skb)
1254 dev_kfree_skb(buf->skb);
1255 }
1256 }
1257
1258 if (rx_ring->desc && rx_ring->buffer_info) {
1259 for (i = 0; i < rx_ring->count; i++) {
1260 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1261 if (buf->dma)
1262 pci_unmap_single(pdev, buf->dma,
1263 IGB_RXBUFFER_2048,
1264 PCI_DMA_FROMDEVICE);
1265 if (buf->skb)
1266 dev_kfree_skb(buf->skb);
1267 }
1268 }
1269
1270 if (tx_ring->desc) {
1271 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1272 tx_ring->dma);
1273 tx_ring->desc = NULL;
1274 }
1275 if (rx_ring->desc) {
1276 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1277 rx_ring->dma);
1278 rx_ring->desc = NULL;
1279 }
1280
1281 kfree(tx_ring->buffer_info);
1282 tx_ring->buffer_info = NULL;
1283 kfree(rx_ring->buffer_info);
1284 rx_ring->buffer_info = NULL;
1285
1286 return;
1287} 1315}
1288 1316
1289static int igb_setup_desc_rings(struct igb_adapter *adapter) 1317static int igb_setup_desc_rings(struct igb_adapter *adapter)
1290{ 1318{
1291 struct e1000_hw *hw = &adapter->hw;
1292 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1319 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1293 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1320 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1294 struct pci_dev *pdev = adapter->pdev; 1321 struct e1000_hw *hw = &adapter->hw;
1295 struct igb_buffer *buffer_info; 1322 int ret_val;
1296 u32 rctl;
1297 int i, ret_val;
1298 1323
1299 /* Setup Tx descriptor ring and Tx buffers */ 1324 /* Setup Tx descriptor ring and Tx buffers */
1325 tx_ring->count = IGB_DEFAULT_TXD;
1326 tx_ring->pdev = adapter->pdev;
1327 tx_ring->netdev = adapter->netdev;
1328 tx_ring->reg_idx = adapter->vfs_allocated_count;
1300 1329
1301 if (!tx_ring->count) 1330 if (igb_setup_tx_resources(tx_ring)) {
1302 tx_ring->count = IGB_DEFAULT_TXD;
1303
1304 tx_ring->buffer_info = kcalloc(tx_ring->count,
1305 sizeof(struct igb_buffer),
1306 GFP_KERNEL);
1307 if (!tx_ring->buffer_info) {
1308 ret_val = 1; 1331 ret_val = 1;
1309 goto err_nomem; 1332 goto err_nomem;
1310 } 1333 }
1311 1334
1312 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1335 igb_setup_tctl(adapter);
1313 tx_ring->size = ALIGN(tx_ring->size, 4096); 1336 igb_configure_tx_ring(adapter, tx_ring);
1314 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1315 &tx_ring->dma);
1316 if (!tx_ring->desc) {
1317 ret_val = 2;
1318 goto err_nomem;
1319 }
1320 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1321
1322 wr32(E1000_TDBAL(0),
1323 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1324 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1325 wr32(E1000_TDLEN(0),
1326 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1327 wr32(E1000_TDH(0), 0);
1328 wr32(E1000_TDT(0), 0);
1329 wr32(E1000_TCTL,
1330 E1000_TCTL_PSP | E1000_TCTL_EN |
1331 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1332 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1333
1334 for (i = 0; i < tx_ring->count; i++) {
1335 union e1000_adv_tx_desc *tx_desc;
1336 struct sk_buff *skb;
1337 unsigned int size = 1024;
1338
1339 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1340 skb = alloc_skb(size, GFP_KERNEL);
1341 if (!skb) {
1342 ret_val = 3;
1343 goto err_nomem;
1344 }
1345 skb_put(skb, size);
1346 buffer_info = &tx_ring->buffer_info[i];
1347 buffer_info->skb = skb;
1348 buffer_info->length = skb->len;
1349 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1350 PCI_DMA_TODEVICE);
1351 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1352 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1353 E1000_ADVTXD_PAYLEN_SHIFT;
1354 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1355 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1356 E1000_TXD_CMD_IFCS |
1357 E1000_TXD_CMD_RS |
1358 E1000_ADVTXD_DTYP_DATA |
1359 E1000_ADVTXD_DCMD_DEXT);
1360 }
1361 1337
1362 /* Setup Rx descriptor ring and Rx buffers */ 1338 /* Setup Rx descriptor ring and Rx buffers */
1363 1339 rx_ring->count = IGB_DEFAULT_RXD;
1364 if (!rx_ring->count) 1340 rx_ring->pdev = adapter->pdev;
1365 rx_ring->count = IGB_DEFAULT_RXD; 1341 rx_ring->netdev = adapter->netdev;
1366 1342 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1367 rx_ring->buffer_info = kcalloc(rx_ring->count, 1343 rx_ring->reg_idx = adapter->vfs_allocated_count;
1368 sizeof(struct igb_buffer), 1344
1369 GFP_KERNEL); 1345 if (igb_setup_rx_resources(rx_ring)) {
1370 if (!rx_ring->buffer_info) { 1346 ret_val = 3;
1371 ret_val = 4;
1372 goto err_nomem; 1347 goto err_nomem;
1373 } 1348 }
1374 1349
1375 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1350 /* set the default queue to queue 0 of PF */
1376 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1351 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1377 &rx_ring->dma);
1378 if (!rx_ring->desc) {
1379 ret_val = 5;
1380 goto err_nomem;
1381 }
1382 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1383 1352
1384 rctl = rd32(E1000_RCTL); 1353 /* enable receive ring */
1385 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1354 igb_setup_rctl(adapter);
1386 wr32(E1000_RDBAL(0), 1355 igb_configure_rx_ring(adapter, rx_ring);
1387 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1356
1388 wr32(E1000_RDBAH(0), 1357 igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
1389 ((u64) rx_ring->dma >> 32));
1390 wr32(E1000_RDLEN(0), rx_ring->size);
1391 wr32(E1000_RDH(0), 0);
1392 wr32(E1000_RDT(0), 0);
1393 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1394 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1395 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1396 wr32(E1000_RCTL, rctl);
1397 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1398
1399 for (i = 0; i < rx_ring->count; i++) {
1400 union e1000_adv_rx_desc *rx_desc;
1401 struct sk_buff *skb;
1402
1403 buffer_info = &rx_ring->buffer_info[i];
1404 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1405 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1406 GFP_KERNEL);
1407 if (!skb) {
1408 ret_val = 6;
1409 goto err_nomem;
1410 }
1411 skb_reserve(skb, NET_IP_ALIGN);
1412 buffer_info->skb = skb;
1413 buffer_info->dma = pci_map_single(pdev, skb->data,
1414 IGB_RXBUFFER_2048,
1415 PCI_DMA_FROMDEVICE);
1416 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1417 memset(skb->data, 0x00, skb->len);
1418 }
1419 1358
1420 return 0; 1359 return 0;
1421 1360
@@ -1449,6 +1388,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1449 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1388 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1450 /* autoneg off */ 1389 /* autoneg off */
1451 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1390 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1391 } else if (hw->phy.type == e1000_phy_82580) {
1392 /* enable MII loopback */
1393 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1452 } 1394 }
1453 1395
1454 ctrl_reg = rd32(E1000_CTRL); 1396 ctrl_reg = rd32(E1000_CTRL);
@@ -1491,7 +1433,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1491 struct e1000_hw *hw = &adapter->hw; 1433 struct e1000_hw *hw = &adapter->hw;
1492 u32 reg; 1434 u32 reg;
1493 1435
1494 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1436 reg = rd32(E1000_CTRL_EXT);
1437
1438 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1439 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1495 reg = rd32(E1000_RCTL); 1440 reg = rd32(E1000_RCTL);
1496 reg |= E1000_RCTL_LBM_TCVR; 1441 reg |= E1000_RCTL_LBM_TCVR;
1497 wr32(E1000_RCTL, reg); 1442 wr32(E1000_RCTL, reg);
@@ -1522,11 +1467,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1522 wr32(E1000_PCS_LCTL, reg); 1467 wr32(E1000_PCS_LCTL, reg);
1523 1468
1524 return 0; 1469 return 0;
1525 } else if (hw->phy.media_type == e1000_media_type_copper) {
1526 return igb_set_phy_loopback(adapter);
1527 } 1470 }
1528 1471
1529 return 7; 1472 return igb_set_phy_loopback(adapter);
1530} 1473}
1531 1474
1532static void igb_loopback_cleanup(struct igb_adapter *adapter) 1475static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1552,35 +1495,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1552 unsigned int frame_size) 1495 unsigned int frame_size)
1553{ 1496{
1554 memset(skb->data, 0xFF, frame_size); 1497 memset(skb->data, 0xFF, frame_size);
1555 frame_size &= ~1; 1498 frame_size /= 2;
1556 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1499 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1557 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1500 memset(&skb->data[frame_size + 10], 0xBE, 1);
1558 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1501 memset(&skb->data[frame_size + 12], 0xAF, 1);
1559} 1502}
1560 1503
1561static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1504static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1562{ 1505{
1563 frame_size &= ~1; 1506 frame_size /= 2;
1564 if (*(skb->data + 3) == 0xFF) 1507 if (*(skb->data + 3) == 0xFF) {
1565 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1508 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1566 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1509 (*(skb->data + frame_size + 12) == 0xAF)) {
1567 return 0; 1510 return 0;
1511 }
1512 }
1568 return 13; 1513 return 13;
1569} 1514}
1570 1515
1516static int igb_clean_test_rings(struct igb_ring *rx_ring,
1517 struct igb_ring *tx_ring,
1518 unsigned int size)
1519{
1520 union e1000_adv_rx_desc *rx_desc;
1521 struct igb_buffer *buffer_info;
1522 int rx_ntc, tx_ntc, count = 0;
1523 u32 staterr;
1524
1525 /* initialize next to clean and descriptor values */
1526 rx_ntc = rx_ring->next_to_clean;
1527 tx_ntc = tx_ring->next_to_clean;
1528 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1529 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1530
1531 while (staterr & E1000_RXD_STAT_DD) {
1532 /* check rx buffer */
1533 buffer_info = &rx_ring->buffer_info[rx_ntc];
1534
1535 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1536 pci_unmap_single(rx_ring->pdev,
1537 buffer_info->dma,
1538 rx_ring->rx_buffer_len,
1539 PCI_DMA_FROMDEVICE);
1540 buffer_info->dma = 0;
1541
1542 /* verify contents of skb */
1543 if (!igb_check_lbtest_frame(buffer_info->skb, size))
1544 count++;
1545
1546 /* unmap buffer on tx side */
1547 buffer_info = &tx_ring->buffer_info[tx_ntc];
1548 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
1549
1550 /* increment rx/tx next to clean counters */
1551 rx_ntc++;
1552 if (rx_ntc == rx_ring->count)
1553 rx_ntc = 0;
1554 tx_ntc++;
1555 if (tx_ntc == tx_ring->count)
1556 tx_ntc = 0;
1557
1558 /* fetch next descriptor */
1559 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1560 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1561 }
1562
1563 /* re-map buffers to ring, store next to clean values */
1564 igb_alloc_rx_buffers_adv(rx_ring, count);
1565 rx_ring->next_to_clean = rx_ntc;
1566 tx_ring->next_to_clean = tx_ntc;
1567
1568 return count;
1569}
1570
1571static int igb_run_loopback_test(struct igb_adapter *adapter) 1571static int igb_run_loopback_test(struct igb_adapter *adapter)
1572{ 1572{
1573 struct e1000_hw *hw = &adapter->hw;
1574 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1573 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1575 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1574 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1576 struct pci_dev *pdev = adapter->pdev; 1575 int i, j, lc, good_cnt, ret_val = 0;
1577 int i, j, k, l, lc, good_cnt; 1576 unsigned int size = 1024;
1578 int ret_val = 0; 1577 netdev_tx_t tx_ret_val;
1579 unsigned long time; 1578 struct sk_buff *skb;
1580 1579
1581 wr32(E1000_RDT(0), rx_ring->count - 1); 1580 /* allocate test skb */
1581 skb = alloc_skb(size, GFP_KERNEL);
1582 if (!skb)
1583 return 11;
1582 1584
1583 /* Calculate the loop count based on the largest descriptor ring 1585 /* place data into test skb */
1586 igb_create_lbtest_frame(skb, size);
1587 skb_put(skb, size);
1588
1589 /*
1590 * Calculate the loop count based on the largest descriptor ring
1584 * The idea is to wrap the largest ring a number of times using 64 1591 * The idea is to wrap the largest ring a number of times using 64
1585 * send/receive pairs during each loop 1592 * send/receive pairs during each loop
1586 */ 1593 */
@@ -1590,50 +1597,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1590 else 1597 else
1591 lc = ((rx_ring->count / 64) * 2) + 1; 1598 lc = ((rx_ring->count / 64) * 2) + 1;
1592 1599
1593 k = l = 0;
1594 for (j = 0; j <= lc; j++) { /* loop count loop */ 1600 for (j = 0; j <= lc; j++) { /* loop count loop */
1595 for (i = 0; i < 64; i++) { /* send the packets */ 1601 /* reset count of good packets */
1596 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1597 1024);
1598 pci_dma_sync_single_for_device(pdev,
1599 tx_ring->buffer_info[k].dma,
1600 tx_ring->buffer_info[k].length,
1601 PCI_DMA_TODEVICE);
1602 k++;
1603 if (k == tx_ring->count)
1604 k = 0;
1605 }
1606 wr32(E1000_TDT(0), k);
1607 msleep(200);
1608 time = jiffies; /* set the start time for the receive */
1609 good_cnt = 0; 1602 good_cnt = 0;
1610 do { /* receive the sent packets */ 1603
1611 pci_dma_sync_single_for_cpu(pdev, 1604 /* place 64 packets on the transmit queue*/
1612 rx_ring->buffer_info[l].dma, 1605 for (i = 0; i < 64; i++) {
1613 IGB_RXBUFFER_2048, 1606 skb_get(skb);
1614 PCI_DMA_FROMDEVICE); 1607 tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
1615 1608 if (tx_ret_val == NETDEV_TX_OK)
1616 ret_val = igb_check_lbtest_frame(
1617 rx_ring->buffer_info[l].skb, 1024);
1618 if (!ret_val)
1619 good_cnt++; 1609 good_cnt++;
1620 l++; 1610 }
1621 if (l == rx_ring->count) 1611
1622 l = 0;
1623 /* time + 20 msecs (200 msecs on 2.4) is more than
1624 * enough time to complete the receives, if it's
1625 * exceeded, break and error off
1626 */
1627 } while (good_cnt < 64 && jiffies < (time + 20));
1628 if (good_cnt != 64) { 1612 if (good_cnt != 64) {
1629 ret_val = 13; /* ret_val is the same as mis-compare */ 1613 ret_val = 12;
1630 break; 1614 break;
1631 } 1615 }
1632 if (jiffies >= (time + 20)) { 1616
1633 ret_val = 14; /* error code for time out error */ 1617 /* allow 200 milliseconds for packets to go from tx to rx */
1618 msleep(200);
1619
1620 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1621 if (good_cnt != 64) {
1622 ret_val = 13;
1634 break; 1623 break;
1635 } 1624 }
1636 } /* end loop count loop */ 1625 } /* end loop count loop */
1626
1627 /* free the original skb */
1628 kfree_skb(skb);
1629
1637 return ret_val; 1630 return ret_val;
1638} 1631}
1639 1632
@@ -1686,8 +1679,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1686 if (hw->mac.autoneg) 1679 if (hw->mac.autoneg)
1687 msleep(4000); 1680 msleep(4000);
1688 1681
1689 if (!(rd32(E1000_STATUS) & 1682 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1690 E1000_STATUS_LU))
1691 *data = 1; 1683 *data = 1;
1692 } 1684 }
1693 return *data; 1685 return *data;
@@ -1869,7 +1861,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1869 adapter->wol |= E1000_WUFC_BC; 1861 adapter->wol |= E1000_WUFC_BC;
1870 if (wol->wolopts & WAKE_MAGIC) 1862 if (wol->wolopts & WAKE_MAGIC)
1871 adapter->wol |= E1000_WUFC_MAG; 1863 adapter->wol |= E1000_WUFC_MAG;
1872
1873 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1864 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1874 1865
1875 return 0; 1866 return 0;
@@ -1882,12 +1873,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
1882{ 1873{
1883 struct igb_adapter *adapter = netdev_priv(netdev); 1874 struct igb_adapter *adapter = netdev_priv(netdev);
1884 struct e1000_hw *hw = &adapter->hw; 1875 struct e1000_hw *hw = &adapter->hw;
1876 unsigned long timeout;
1885 1877
1886 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1878 timeout = data * 1000;
1887 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1879
1880 /*
1881 * msleep_interruptable only accepts unsigned int so we are limited
1882 * in how long a duration we can wait
1883 */
1884 if (!timeout || timeout > UINT_MAX)
1885 timeout = UINT_MAX;
1888 1886
1889 igb_blink_led(hw); 1887 igb_blink_led(hw);
1890 msleep_interruptible(data * 1000); 1888 msleep_interruptible(timeout);
1891 1889
1892 igb_led_off(hw); 1890 igb_led_off(hw);
1893 clear_bit(IGB_LED_ON, &adapter->led_status); 1891 clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1900,7 +1898,6 @@ static int igb_set_coalesce(struct net_device *netdev,
1900 struct ethtool_coalesce *ec) 1898 struct ethtool_coalesce *ec)
1901{ 1899{
1902 struct igb_adapter *adapter = netdev_priv(netdev); 1900 struct igb_adapter *adapter = netdev_priv(netdev);
1903 struct e1000_hw *hw = &adapter->hw;
1904 int i; 1901 int i;
1905 1902
1906 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1903 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1909,17 +1906,39 @@ static int igb_set_coalesce(struct net_device *netdev,
1909 (ec->rx_coalesce_usecs == 2)) 1906 (ec->rx_coalesce_usecs == 2))
1910 return -EINVAL; 1907 return -EINVAL;
1911 1908
1909 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1910 ((ec->tx_coalesce_usecs > 3) &&
1911 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1912 (ec->tx_coalesce_usecs == 2))
1913 return -EINVAL;
1914
1915 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1916 return -EINVAL;
1917
1912 /* convert to rate of irq's per second */ 1918 /* convert to rate of irq's per second */
1913 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1919 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1914 adapter->itr_setting = ec->rx_coalesce_usecs; 1920 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1915 adapter->itr = IGB_START_ITR; 1921 else
1916 } else { 1922 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1917 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1918 adapter->itr = adapter->itr_setting;
1919 }
1920 1923
1921 for (i = 0; i < adapter->num_rx_queues; i++) 1924 /* convert to rate of irq's per second */
1922 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1925 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1926 adapter->tx_itr_setting = adapter->rx_itr_setting;
1927 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1928 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1929 else
1930 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1931
1932 for (i = 0; i < adapter->num_q_vectors; i++) {
1933 struct igb_q_vector *q_vector = adapter->q_vector[i];
1934 if (q_vector->rx_ring)
1935 q_vector->itr_val = adapter->rx_itr_setting;
1936 else
1937 q_vector->itr_val = adapter->tx_itr_setting;
1938 if (q_vector->itr_val && q_vector->itr_val <= 3)
1939 q_vector->itr_val = IGB_START_ITR;
1940 q_vector->set_itr = 1;
1941 }
1923 1942
1924 return 0; 1943 return 0;
1925} 1944}
@@ -1929,15 +1948,21 @@ static int igb_get_coalesce(struct net_device *netdev,
1929{ 1948{
1930 struct igb_adapter *adapter = netdev_priv(netdev); 1949 struct igb_adapter *adapter = netdev_priv(netdev);
1931 1950
1932 if (adapter->itr_setting <= 3) 1951 if (adapter->rx_itr_setting <= 3)
1933 ec->rx_coalesce_usecs = adapter->itr_setting; 1952 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1934 else 1953 else
1935 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1954 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1955
1956 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
1957 if (adapter->tx_itr_setting <= 3)
1958 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1959 else
1960 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1961 }
1936 1962
1937 return 0; 1963 return 0;
1938} 1964}
1939 1965
1940
1941static int igb_nway_reset(struct net_device *netdev) 1966static int igb_nway_reset(struct net_device *netdev)
1942{ 1967{
1943 struct igb_adapter *adapter = netdev_priv(netdev); 1968 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1962,31 +1987,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1962 struct ethtool_stats *stats, u64 *data) 1987 struct ethtool_stats *stats, u64 *data)
1963{ 1988{
1964 struct igb_adapter *adapter = netdev_priv(netdev); 1989 struct igb_adapter *adapter = netdev_priv(netdev);
1990 struct net_device_stats *net_stats = &netdev->stats;
1965 u64 *queue_stat; 1991 u64 *queue_stat;
1966 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 1992 int i, j, k;
1967 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 1993 char *p;
1968 int j;
1969 int i;
1970 1994
1971 igb_update_stats(adapter); 1995 igb_update_stats(adapter);
1996
1972 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1997 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1973 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; 1998 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1974 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1999 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1975 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2000 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1976 } 2001 }
2002 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
2003 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
2004 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
2005 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2006 }
1977 for (j = 0; j < adapter->num_tx_queues; j++) { 2007 for (j = 0; j < adapter->num_tx_queues; j++) {
1978 int k;
1979 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 2008 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1980 for (k = 0; k < stat_count_tx; k++) 2009 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
1981 data[i + k] = queue_stat[k]; 2010 data[i] = queue_stat[k];
1982 i += k;
1983 } 2011 }
1984 for (j = 0; j < adapter->num_rx_queues; j++) { 2012 for (j = 0; j < adapter->num_rx_queues; j++) {
1985 int k;
1986 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 2013 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1987 for (k = 0; k < stat_count_rx; k++) 2014 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
1988 data[i + k] = queue_stat[k]; 2015 data[i] = queue_stat[k];
1989 i += k;
1990 } 2016 }
1991} 2017}
1992 2018
@@ -2007,11 +2033,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2007 ETH_GSTRING_LEN); 2033 ETH_GSTRING_LEN);
2008 p += ETH_GSTRING_LEN; 2034 p += ETH_GSTRING_LEN;
2009 } 2035 }
2036 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2037 memcpy(p, igb_gstrings_net_stats[i].stat_string,
2038 ETH_GSTRING_LEN);
2039 p += ETH_GSTRING_LEN;
2040 }
2010 for (i = 0; i < adapter->num_tx_queues; i++) { 2041 for (i = 0; i < adapter->num_tx_queues; i++) {
2011 sprintf(p, "tx_queue_%u_packets", i); 2042 sprintf(p, "tx_queue_%u_packets", i);
2012 p += ETH_GSTRING_LEN; 2043 p += ETH_GSTRING_LEN;
2013 sprintf(p, "tx_queue_%u_bytes", i); 2044 sprintf(p, "tx_queue_%u_bytes", i);
2014 p += ETH_GSTRING_LEN; 2045 p += ETH_GSTRING_LEN;
2046 sprintf(p, "tx_queue_%u_restart", i);
2047 p += ETH_GSTRING_LEN;
2015 } 2048 }
2016 for (i = 0; i < adapter->num_rx_queues; i++) { 2049 for (i = 0; i < adapter->num_rx_queues; i++) {
2017 sprintf(p, "rx_queue_%u_packets", i); 2050 sprintf(p, "rx_queue_%u_packets", i);
@@ -2020,6 +2053,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2020 p += ETH_GSTRING_LEN; 2053 p += ETH_GSTRING_LEN;
2021 sprintf(p, "rx_queue_%u_drops", i); 2054 sprintf(p, "rx_queue_%u_drops", i);
2022 p += ETH_GSTRING_LEN; 2055 p += ETH_GSTRING_LEN;
2056 sprintf(p, "rx_queue_%u_csum_err", i);
2057 p += ETH_GSTRING_LEN;
2058 sprintf(p, "rx_queue_%u_alloc_failed", i);
2059 p += ETH_GSTRING_LEN;
2023 } 2060 }
2024/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2061/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2025 break; 2062 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 714c3a4a44ef..bb1a6eeade06 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -49,7 +49,7 @@
49#endif 49#endif
50#include "igb.h" 50#include "igb.h"
51 51
52#define DRV_VERSION "1.3.16-k2" 52#define DRV_VERSION "2.1.0-k2"
53char igb_driver_name[] = "igb"; 53char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION; 54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] = 55static const char igb_driver_string[] =
@@ -61,8 +61,14 @@ static const struct e1000_info *igb_info_tbl[] = {
61}; 61};
62 62
63static struct pci_device_id igb_pci_tbl[] = { 63static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
@@ -81,6 +87,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
81static int igb_setup_all_rx_resources(struct igb_adapter *); 87static int igb_setup_all_rx_resources(struct igb_adapter *);
82static void igb_free_all_tx_resources(struct igb_adapter *); 88static void igb_free_all_tx_resources(struct igb_adapter *);
83static void igb_free_all_rx_resources(struct igb_adapter *); 89static void igb_free_all_rx_resources(struct igb_adapter *);
90static void igb_setup_mrqc(struct igb_adapter *);
84void igb_update_stats(struct igb_adapter *); 91void igb_update_stats(struct igb_adapter *);
85static int igb_probe(struct pci_dev *, const struct pci_device_id *); 92static int igb_probe(struct pci_dev *, const struct pci_device_id *);
86static void __devexit igb_remove(struct pci_dev *pdev); 93static void __devexit igb_remove(struct pci_dev *pdev);
@@ -89,7 +96,6 @@ static int igb_open(struct net_device *);
89static int igb_close(struct net_device *); 96static int igb_close(struct net_device *);
90static void igb_configure_tx(struct igb_adapter *); 97static void igb_configure_tx(struct igb_adapter *);
91static void igb_configure_rx(struct igb_adapter *); 98static void igb_configure_rx(struct igb_adapter *);
92static void igb_setup_rctl(struct igb_adapter *);
93static void igb_clean_all_tx_rings(struct igb_adapter *); 99static void igb_clean_all_tx_rings(struct igb_adapter *);
94static void igb_clean_all_rx_rings(struct igb_adapter *); 100static void igb_clean_all_rx_rings(struct igb_adapter *);
95static void igb_clean_tx_ring(struct igb_ring *); 101static void igb_clean_tx_ring(struct igb_ring *);
@@ -98,28 +104,22 @@ static void igb_set_rx_mode(struct net_device *);
98static void igb_update_phy_info(unsigned long); 104static void igb_update_phy_info(unsigned long);
99static void igb_watchdog(unsigned long); 105static void igb_watchdog(unsigned long);
100static void igb_watchdog_task(struct work_struct *); 106static void igb_watchdog_task(struct work_struct *);
101static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, 107static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
102 struct net_device *,
103 struct igb_ring *);
104static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
105 struct net_device *);
106static struct net_device_stats *igb_get_stats(struct net_device *); 108static struct net_device_stats *igb_get_stats(struct net_device *);
107static int igb_change_mtu(struct net_device *, int); 109static int igb_change_mtu(struct net_device *, int);
108static int igb_set_mac(struct net_device *, void *); 110static int igb_set_mac(struct net_device *, void *);
111static void igb_set_uta(struct igb_adapter *adapter);
109static irqreturn_t igb_intr(int irq, void *); 112static irqreturn_t igb_intr(int irq, void *);
110static irqreturn_t igb_intr_msi(int irq, void *); 113static irqreturn_t igb_intr_msi(int irq, void *);
111static irqreturn_t igb_msix_other(int irq, void *); 114static irqreturn_t igb_msix_other(int irq, void *);
112static irqreturn_t igb_msix_rx(int irq, void *); 115static irqreturn_t igb_msix_ring(int irq, void *);
113static irqreturn_t igb_msix_tx(int irq, void *);
114#ifdef CONFIG_IGB_DCA 116#ifdef CONFIG_IGB_DCA
115static void igb_update_rx_dca(struct igb_ring *); 117static void igb_update_dca(struct igb_q_vector *);
116static void igb_update_tx_dca(struct igb_ring *);
117static void igb_setup_dca(struct igb_adapter *); 118static void igb_setup_dca(struct igb_adapter *);
118#endif /* CONFIG_IGB_DCA */ 119#endif /* CONFIG_IGB_DCA */
119static bool igb_clean_tx_irq(struct igb_ring *); 120static bool igb_clean_tx_irq(struct igb_q_vector *);
120static int igb_poll(struct napi_struct *, int); 121static int igb_poll(struct napi_struct *, int);
121static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 122static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
122static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *); 124static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *); 125static void igb_reset_task(struct work_struct *);
@@ -127,57 +127,13 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16); 127static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16); 128static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *); 129static void igb_restore_vlan(struct igb_adapter *);
130static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
130static void igb_ping_all_vfs(struct igb_adapter *); 131static void igb_ping_all_vfs(struct igb_adapter *);
131static void igb_msg_task(struct igb_adapter *); 132static void igb_msg_task(struct igb_adapter *);
132static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
133static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
134static void igb_vmm_control(struct igb_adapter *); 133static void igb_vmm_control(struct igb_adapter *);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 134static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 135static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 136
138static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
139{
140 u32 reg_data;
141
142 reg_data = rd32(E1000_VMOLR(vfn));
143 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
144 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
149}
150
151static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
152 int vfn)
153{
154 struct e1000_hw *hw = &adapter->hw;
155 u32 vmolr;
156
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
162
163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
167
168 return 0;
169}
170
171static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
172{
173 u32 reg_data;
174
175 reg_data = rd32(E1000_RAH(entry));
176 reg_data &= ~E1000_RAH_POOL_MASK;
177 reg_data |= E1000_RAH_POOL_1 << pool;;
178 wr32(E1000_RAH(entry), reg_data);
179}
180
181#ifdef CONFIG_PM 137#ifdef CONFIG_PM
182static int igb_suspend(struct pci_dev *, pm_message_t); 138static int igb_suspend(struct pci_dev *, pm_message_t);
183static int igb_resume(struct pci_dev *); 139static int igb_resume(struct pci_dev *);
@@ -228,46 +184,12 @@ static struct pci_driver igb_driver = {
228 .err_handler = &igb_err_handler 184 .err_handler = &igb_err_handler
229}; 185};
230 186
231static int global_quad_port_a; /* global quad port a indication */
232
233MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 187MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
234MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 188MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
235MODULE_LICENSE("GPL"); 189MODULE_LICENSE("GPL");
236MODULE_VERSION(DRV_VERSION); 190MODULE_VERSION(DRV_VERSION);
237 191
238/** 192/**
239 * Scale the NIC clock cycle by a large factor so that
240 * relatively small clock corrections can be added or
241 * substracted at each clock tick. The drawbacks of a
242 * large factor are a) that the clock register overflows
243 * more quickly (not such a big deal) and b) that the
244 * increment per tick has to fit into 24 bits.
245 *
246 * Note that
247 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
248 * IGB_TSYNC_SCALE
249 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
250 *
251 * The base scale factor is intentionally a power of two
252 * so that the division in %struct timecounter can be done with
253 * a shift.
254 */
255#define IGB_TSYNC_SHIFT (19)
256#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
257
258/**
259 * The duration of one clock cycle of the NIC.
260 *
261 * @todo This hard-coded value is part of the specification and might change
262 * in future hardware revisions. Add revision check.
263 */
264#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
265
266#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
267# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
268#endif
269
270/**
271 * igb_read_clock - read raw cycle counter (to be used by time counter) 193 * igb_read_clock - read raw cycle counter (to be used by time counter)
272 */ 194 */
273static cycle_t igb_read_clock(const struct cyclecounter *tc) 195static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -275,11 +197,21 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
275 struct igb_adapter *adapter = 197 struct igb_adapter *adapter =
276 container_of(tc, struct igb_adapter, cycles); 198 container_of(tc, struct igb_adapter, cycles);
277 struct e1000_hw *hw = &adapter->hw; 199 struct e1000_hw *hw = &adapter->hw;
278 u64 stamp; 200 u64 stamp = 0;
201 int shift = 0;
279 202
280 stamp = rd32(E1000_SYSTIML); 203 /*
281 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL; 204 * The timestamp latches on lowest register read. For the 82580
205 * the lowest register is SYSTIMR instead of SYSTIML. However we never
206 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
207 */
208 if (hw->mac.type == e1000_82580) {
209 stamp = rd32(E1000_SYSTIMR) >> 8;
210 shift = IGB_82580_TSYNC_SHIFT;
211 }
282 212
213 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
214 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
283 return stamp; 215 return stamp;
284} 216}
285 217
@@ -320,17 +252,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
320#endif 252#endif
321 253
322/** 254/**
323 * igb_desc_unused - calculate if we have unused descriptors
324 **/
325static int igb_desc_unused(struct igb_ring *ring)
326{
327 if (ring->next_to_clean > ring->next_to_use)
328 return ring->next_to_clean - ring->next_to_use - 1;
329
330 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
331}
332
333/**
334 * igb_init_module - Driver Registration Routine 255 * igb_init_module - Driver Registration Routine
335 * 256 *
336 * igb_init_module is the first routine called when the driver is 257 * igb_init_module is the first routine called when the driver is
@@ -344,12 +265,9 @@ static int __init igb_init_module(void)
344 265
345 printk(KERN_INFO "%s\n", igb_copyright); 266 printk(KERN_INFO "%s\n", igb_copyright);
346 267
347 global_quad_port_a = 0;
348
349#ifdef CONFIG_IGB_DCA 268#ifdef CONFIG_IGB_DCA
350 dca_register_notify(&dca_notifier); 269 dca_register_notify(&dca_notifier);
351#endif 270#endif
352
353 ret = pci_register_driver(&igb_driver); 271 ret = pci_register_driver(&igb_driver);
354 return ret; 272 return ret;
355} 273}
@@ -382,8 +300,8 @@ module_exit(igb_exit_module);
382 **/ 300 **/
383static void igb_cache_ring_register(struct igb_adapter *adapter) 301static void igb_cache_ring_register(struct igb_adapter *adapter)
384{ 302{
385 int i; 303 int i = 0, j = 0;
386 unsigned int rbase_offset = adapter->vfs_allocated_count; 304 u32 rbase_offset = adapter->vfs_allocated_count;
387 305
388 switch (adapter->hw.mac.type) { 306 switch (adapter->hw.mac.type) {
389 case e1000_82576: 307 case e1000_82576:
@@ -392,23 +310,37 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
392 * In order to avoid collision we start at the first free queue 310 * In order to avoid collision we start at the first free queue
393 * and continue consuming queues in the same sequence 311 * and continue consuming queues in the same sequence
394 */ 312 */
395 for (i = 0; i < adapter->num_rx_queues; i++) 313 if (adapter->vfs_allocated_count) {
396 adapter->rx_ring[i].reg_idx = rbase_offset + 314 for (; i < adapter->rss_queues; i++)
397 Q_IDX_82576(i); 315 adapter->rx_ring[i].reg_idx = rbase_offset +
398 for (i = 0; i < adapter->num_tx_queues; i++) 316 Q_IDX_82576(i);
399 adapter->tx_ring[i].reg_idx = rbase_offset + 317 for (; j < adapter->rss_queues; j++)
400 Q_IDX_82576(i); 318 adapter->tx_ring[j].reg_idx = rbase_offset +
401 break; 319 Q_IDX_82576(j);
320 }
402 case e1000_82575: 321 case e1000_82575:
322 case e1000_82580:
403 default: 323 default:
404 for (i = 0; i < adapter->num_rx_queues; i++) 324 for (; i < adapter->num_rx_queues; i++)
405 adapter->rx_ring[i].reg_idx = i; 325 adapter->rx_ring[i].reg_idx = rbase_offset + i;
406 for (i = 0; i < adapter->num_tx_queues; i++) 326 for (; j < adapter->num_tx_queues; j++)
407 adapter->tx_ring[i].reg_idx = i; 327 adapter->tx_ring[j].reg_idx = rbase_offset + j;
408 break; 328 break;
409 } 329 }
410} 330}
411 331
332static void igb_free_queues(struct igb_adapter *adapter)
333{
334 kfree(adapter->tx_ring);
335 kfree(adapter->rx_ring);
336
337 adapter->tx_ring = NULL;
338 adapter->rx_ring = NULL;
339
340 adapter->num_rx_queues = 0;
341 adapter->num_tx_queues = 0;
342}
343
412/** 344/**
413 * igb_alloc_queues - Allocate memory for all rings 345 * igb_alloc_queues - Allocate memory for all rings
414 * @adapter: board private structure to initialize 346 * @adapter: board private structure to initialize
@@ -423,59 +355,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
423 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 355 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
424 sizeof(struct igb_ring), GFP_KERNEL); 356 sizeof(struct igb_ring), GFP_KERNEL);
425 if (!adapter->tx_ring) 357 if (!adapter->tx_ring)
426 return -ENOMEM; 358 goto err;
427 359
428 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 360 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
429 sizeof(struct igb_ring), GFP_KERNEL); 361 sizeof(struct igb_ring), GFP_KERNEL);
430 if (!adapter->rx_ring) { 362 if (!adapter->rx_ring)
431 kfree(adapter->tx_ring); 363 goto err;
432 return -ENOMEM;
433 }
434
435 adapter->rx_ring->buddy = adapter->tx_ring;
436 364
437 for (i = 0; i < adapter->num_tx_queues; i++) { 365 for (i = 0; i < adapter->num_tx_queues; i++) {
438 struct igb_ring *ring = &(adapter->tx_ring[i]); 366 struct igb_ring *ring = &(adapter->tx_ring[i]);
439 ring->count = adapter->tx_ring_count; 367 ring->count = adapter->tx_ring_count;
440 ring->adapter = adapter;
441 ring->queue_index = i; 368 ring->queue_index = i;
369 ring->pdev = adapter->pdev;
370 ring->netdev = adapter->netdev;
371 /* For 82575, context index must be unique per ring. */
372 if (adapter->hw.mac.type == e1000_82575)
373 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
442 } 374 }
375
443 for (i = 0; i < adapter->num_rx_queues; i++) { 376 for (i = 0; i < adapter->num_rx_queues; i++) {
444 struct igb_ring *ring = &(adapter->rx_ring[i]); 377 struct igb_ring *ring = &(adapter->rx_ring[i]);
445 ring->count = adapter->rx_ring_count; 378 ring->count = adapter->rx_ring_count;
446 ring->adapter = adapter;
447 ring->queue_index = i; 379 ring->queue_index = i;
448 ring->itr_register = E1000_ITR; 380 ring->pdev = adapter->pdev;
449 381 ring->netdev = adapter->netdev;
450 /* set a default napi handler for each rx_ring */ 382 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
451 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 383 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
384 /* set flag indicating ring supports SCTP checksum offload */
385 if (adapter->hw.mac.type >= e1000_82576)
386 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
452 } 387 }
453 388
454 igb_cache_ring_register(adapter); 389 igb_cache_ring_register(adapter);
455 return 0;
456}
457
458static void igb_free_queues(struct igb_adapter *adapter)
459{
460 int i;
461 390
462 for (i = 0; i < adapter->num_rx_queues; i++) 391 return 0;
463 netif_napi_del(&adapter->rx_ring[i].napi);
464 392
465 adapter->num_rx_queues = 0; 393err:
466 adapter->num_tx_queues = 0; 394 igb_free_queues(adapter);
467 395
468 kfree(adapter->tx_ring); 396 return -ENOMEM;
469 kfree(adapter->rx_ring);
470} 397}
471 398
472#define IGB_N0_QUEUE -1 399#define IGB_N0_QUEUE -1
473static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, 400static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
474 int tx_queue, int msix_vector)
475{ 401{
476 u32 msixbm = 0; 402 u32 msixbm = 0;
403 struct igb_adapter *adapter = q_vector->adapter;
477 struct e1000_hw *hw = &adapter->hw; 404 struct e1000_hw *hw = &adapter->hw;
478 u32 ivar, index; 405 u32 ivar, index;
406 int rx_queue = IGB_N0_QUEUE;
407 int tx_queue = IGB_N0_QUEUE;
408
409 if (q_vector->rx_ring)
410 rx_queue = q_vector->rx_ring->reg_idx;
411 if (q_vector->tx_ring)
412 tx_queue = q_vector->tx_ring->reg_idx;
479 413
480 switch (hw->mac.type) { 414 switch (hw->mac.type) {
481 case e1000_82575: 415 case e1000_82575:
@@ -483,16 +417,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
483 bitmask for the EICR/EIMS/EIMC registers. To assign one 417 bitmask for the EICR/EIMS/EIMC registers. To assign one
484 or more queues to a vector, we write the appropriate bits 418 or more queues to a vector, we write the appropriate bits
485 into the MSIXBM register for that vector. */ 419 into the MSIXBM register for that vector. */
486 if (rx_queue > IGB_N0_QUEUE) { 420 if (rx_queue > IGB_N0_QUEUE)
487 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
488 adapter->rx_ring[rx_queue].eims_value = msixbm; 422 if (tx_queue > IGB_N0_QUEUE)
489 }
490 if (tx_queue > IGB_N0_QUEUE) {
491 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
492 adapter->tx_ring[tx_queue].eims_value =
493 E1000_EICR_TX_QUEUE0 << tx_queue;
494 }
495 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
425 q_vector->eims_value = msixbm;
496 break; 426 break;
497 case e1000_82576: 427 case e1000_82576:
498 /* 82576 uses a table-based method for assigning vectors. 428 /* 82576 uses a table-based method for assigning vectors.
@@ -500,7 +430,40 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
500 a vector number along with a "valid" bit. Sadly, the layout 430 a vector number along with a "valid" bit. Sadly, the layout
501 of the table is somewhat counterintuitive. */ 431 of the table is somewhat counterintuitive. */
502 if (rx_queue > IGB_N0_QUEUE) { 432 if (rx_queue > IGB_N0_QUEUE) {
503 index = (rx_queue >> 1) + adapter->vfs_allocated_count; 433 index = (rx_queue & 0x7);
434 ivar = array_rd32(E1000_IVAR0, index);
435 if (rx_queue < 8) {
436 /* vector goes into low byte of register */
437 ivar = ivar & 0xFFFFFF00;
438 ivar |= msix_vector | E1000_IVAR_VALID;
439 } else {
440 /* vector goes into third byte of register */
441 ivar = ivar & 0xFF00FFFF;
442 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
443 }
444 array_wr32(E1000_IVAR0, index, ivar);
445 }
446 if (tx_queue > IGB_N0_QUEUE) {
447 index = (tx_queue & 0x7);
448 ivar = array_rd32(E1000_IVAR0, index);
449 if (tx_queue < 8) {
450 /* vector goes into second byte of register */
451 ivar = ivar & 0xFFFF00FF;
452 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
453 } else {
454 /* vector goes into high byte of register */
455 ivar = ivar & 0x00FFFFFF;
456 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
457 }
458 array_wr32(E1000_IVAR0, index, ivar);
459 }
460 q_vector->eims_value = 1 << msix_vector;
461 break;
462 case e1000_82580:
463 /* 82580 uses the same table-based approach as 82576 but has fewer
464 entries as a result we carry over for queues greater than 4. */
465 if (rx_queue > IGB_N0_QUEUE) {
466 index = (rx_queue >> 1);
504 ivar = array_rd32(E1000_IVAR0, index); 467 ivar = array_rd32(E1000_IVAR0, index);
505 if (rx_queue & 0x1) { 468 if (rx_queue & 0x1) {
506 /* vector goes into third byte of register */ 469 /* vector goes into third byte of register */
@@ -511,11 +474,10 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
511 ivar = ivar & 0xFFFFFF00; 474 ivar = ivar & 0xFFFFFF00;
512 ivar |= msix_vector | E1000_IVAR_VALID; 475 ivar |= msix_vector | E1000_IVAR_VALID;
513 } 476 }
514 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
515 array_wr32(E1000_IVAR0, index, ivar); 477 array_wr32(E1000_IVAR0, index, ivar);
516 } 478 }
517 if (tx_queue > IGB_N0_QUEUE) { 479 if (tx_queue > IGB_N0_QUEUE) {
518 index = (tx_queue >> 1) + adapter->vfs_allocated_count; 480 index = (tx_queue >> 1);
519 ivar = array_rd32(E1000_IVAR0, index); 481 ivar = array_rd32(E1000_IVAR0, index);
520 if (tx_queue & 0x1) { 482 if (tx_queue & 0x1) {
521 /* vector goes into high byte of register */ 483 /* vector goes into high byte of register */
@@ -526,9 +488,9 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
526 ivar = ivar & 0xFFFF00FF; 488 ivar = ivar & 0xFFFF00FF;
527 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 489 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
528 } 490 }
529 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
530 array_wr32(E1000_IVAR0, index, ivar); 491 array_wr32(E1000_IVAR0, index, ivar);
531 } 492 }
493 q_vector->eims_value = 1 << msix_vector;
532 break; 494 break;
533 default: 495 default:
534 BUG(); 496 BUG();
@@ -549,43 +511,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
549 struct e1000_hw *hw = &adapter->hw; 511 struct e1000_hw *hw = &adapter->hw;
550 512
551 adapter->eims_enable_mask = 0; 513 adapter->eims_enable_mask = 0;
552 if (hw->mac.type == e1000_82576)
553 /* Turn on MSI-X capability first, or our settings
554 * won't stick. And it will take days to debug. */
555 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
556 E1000_GPIE_PBA | E1000_GPIE_EIAME |
557 E1000_GPIE_NSICR);
558
559 for (i = 0; i < adapter->num_tx_queues; i++) {
560 struct igb_ring *tx_ring = &adapter->tx_ring[i];
561 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
562 adapter->eims_enable_mask |= tx_ring->eims_value;
563 if (tx_ring->itr_val)
564 writel(tx_ring->itr_val,
565 hw->hw_addr + tx_ring->itr_register);
566 else
567 writel(1, hw->hw_addr + tx_ring->itr_register);
568 }
569
570 for (i = 0; i < adapter->num_rx_queues; i++) {
571 struct igb_ring *rx_ring = &adapter->rx_ring[i];
572 rx_ring->buddy = NULL;
573 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
574 adapter->eims_enable_mask |= rx_ring->eims_value;
575 if (rx_ring->itr_val)
576 writel(rx_ring->itr_val,
577 hw->hw_addr + rx_ring->itr_register);
578 else
579 writel(1, hw->hw_addr + rx_ring->itr_register);
580 }
581
582 514
583 /* set vector for other causes, i.e. link changes */ 515 /* set vector for other causes, i.e. link changes */
584 switch (hw->mac.type) { 516 switch (hw->mac.type) {
585 case e1000_82575: 517 case e1000_82575:
586 array_wr32(E1000_MSIXBM(0), vector++,
587 E1000_EIMS_OTHER);
588
589 tmp = rd32(E1000_CTRL_EXT); 518 tmp = rd32(E1000_CTRL_EXT);
590 /* enable MSI-X PBA support*/ 519 /* enable MSI-X PBA support*/
591 tmp |= E1000_CTRL_EXT_PBA_CLR; 520 tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -595,22 +524,41 @@ static void igb_configure_msix(struct igb_adapter *adapter)
595 tmp |= E1000_CTRL_EXT_IRCA; 524 tmp |= E1000_CTRL_EXT_IRCA;
596 525
597 wr32(E1000_CTRL_EXT, tmp); 526 wr32(E1000_CTRL_EXT, tmp);
598 adapter->eims_enable_mask |= E1000_EIMS_OTHER; 527
528 /* enable msix_other interrupt */
529 array_wr32(E1000_MSIXBM(0), vector++,
530 E1000_EIMS_OTHER);
599 adapter->eims_other = E1000_EIMS_OTHER; 531 adapter->eims_other = E1000_EIMS_OTHER;
600 532
601 break; 533 break;
602 534
603 case e1000_82576: 535 case e1000_82576:
536 case e1000_82580:
537 /* Turn on MSI-X capability first, or our settings
538 * won't stick. And it will take days to debug. */
539 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
540 E1000_GPIE_PBA | E1000_GPIE_EIAME |
541 E1000_GPIE_NSICR);
542
543 /* enable msix_other interrupt */
544 adapter->eims_other = 1 << vector;
604 tmp = (vector++ | E1000_IVAR_VALID) << 8; 545 tmp = (vector++ | E1000_IVAR_VALID) << 8;
605 wr32(E1000_IVAR_MISC, tmp);
606 546
607 adapter->eims_enable_mask = (1 << (vector)) - 1; 547 wr32(E1000_IVAR_MISC, tmp);
608 adapter->eims_other = 1 << (vector - 1);
609 break; 548 break;
610 default: 549 default:
611 /* do nothing, since nothing else supports MSI-X */ 550 /* do nothing, since nothing else supports MSI-X */
612 break; 551 break;
613 } /* switch (hw->mac.type) */ 552 } /* switch (hw->mac.type) */
553
554 adapter->eims_enable_mask |= adapter->eims_other;
555
556 for (i = 0; i < adapter->num_q_vectors; i++) {
557 struct igb_q_vector *q_vector = adapter->q_vector[i];
558 igb_assign_vector(q_vector, vector++);
559 adapter->eims_enable_mask |= q_vector->eims_value;
560 }
561
614 wrfl(); 562 wrfl();
615} 563}
616 564
@@ -623,43 +571,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
623static int igb_request_msix(struct igb_adapter *adapter) 571static int igb_request_msix(struct igb_adapter *adapter)
624{ 572{
625 struct net_device *netdev = adapter->netdev; 573 struct net_device *netdev = adapter->netdev;
574 struct e1000_hw *hw = &adapter->hw;
626 int i, err = 0, vector = 0; 575 int i, err = 0, vector = 0;
627 576
628 vector = 0; 577 err = request_irq(adapter->msix_entries[vector].vector,
629 578 igb_msix_other, 0, netdev->name, adapter);
630 for (i = 0; i < adapter->num_tx_queues; i++) { 579 if (err)
631 struct igb_ring *ring = &(adapter->tx_ring[i]); 580 goto out;
632 sprintf(ring->name, "%s-tx-%d", netdev->name, i); 581 vector++;
633 err = request_irq(adapter->msix_entries[vector].vector, 582
634 &igb_msix_tx, 0, ring->name, 583 for (i = 0; i < adapter->num_q_vectors; i++) {
635 &(adapter->tx_ring[i])); 584 struct igb_q_vector *q_vector = adapter->q_vector[i];
636 if (err) 585
637 goto out; 586 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
638 ring->itr_register = E1000_EITR(0) + (vector << 2); 587
639 ring->itr_val = 976; /* ~4000 ints/sec */ 588 if (q_vector->rx_ring && q_vector->tx_ring)
640 vector++; 589 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
641 } 590 q_vector->rx_ring->queue_index);
642 for (i = 0; i < adapter->num_rx_queues; i++) { 591 else if (q_vector->tx_ring)
643 struct igb_ring *ring = &(adapter->rx_ring[i]); 592 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
644 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 593 q_vector->tx_ring->queue_index);
645 sprintf(ring->name, "%s-rx-%d", netdev->name, i); 594 else if (q_vector->rx_ring)
595 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
596 q_vector->rx_ring->queue_index);
646 else 597 else
647 memcpy(ring->name, netdev->name, IFNAMSIZ); 598 sprintf(q_vector->name, "%s-unused", netdev->name);
599
648 err = request_irq(adapter->msix_entries[vector].vector, 600 err = request_irq(adapter->msix_entries[vector].vector,
649 &igb_msix_rx, 0, ring->name, 601 igb_msix_ring, 0, q_vector->name,
650 &(adapter->rx_ring[i])); 602 q_vector);
651 if (err) 603 if (err)
652 goto out; 604 goto out;
653 ring->itr_register = E1000_EITR(0) + (vector << 2);
654 ring->itr_val = adapter->itr;
655 vector++; 605 vector++;
656 } 606 }
657 607
658 err = request_irq(adapter->msix_entries[vector].vector,
659 &igb_msix_other, 0, netdev->name, netdev);
660 if (err)
661 goto out;
662
663 igb_configure_msix(adapter); 608 igb_configure_msix(adapter);
664 return 0; 609 return 0;
665out: 610out:
@@ -672,11 +617,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
672 pci_disable_msix(adapter->pdev); 617 pci_disable_msix(adapter->pdev);
673 kfree(adapter->msix_entries); 618 kfree(adapter->msix_entries);
674 adapter->msix_entries = NULL; 619 adapter->msix_entries = NULL;
675 } else if (adapter->flags & IGB_FLAG_HAS_MSI) 620 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
676 pci_disable_msi(adapter->pdev); 621 pci_disable_msi(adapter->pdev);
677 return; 622 }
678} 623}
679 624
625/**
626 * igb_free_q_vectors - Free memory allocated for interrupt vectors
627 * @adapter: board private structure to initialize
628 *
629 * This function frees the memory allocated to the q_vectors. In addition if
630 * NAPI is enabled it will delete any references to the NAPI struct prior
631 * to freeing the q_vector.
632 **/
633static void igb_free_q_vectors(struct igb_adapter *adapter)
634{
635 int v_idx;
636
637 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
638 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
639 adapter->q_vector[v_idx] = NULL;
640 netif_napi_del(&q_vector->napi);
641 kfree(q_vector);
642 }
643 adapter->num_q_vectors = 0;
644}
645
646/**
647 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
648 *
649 * This function resets the device so that it has 0 rx queues, tx queues, and
650 * MSI-X interrupts allocated.
651 */
652static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
653{
654 igb_free_queues(adapter);
655 igb_free_q_vectors(adapter);
656 igb_reset_interrupt_capability(adapter);
657}
680 658
681/** 659/**
682 * igb_set_interrupt_capability - set MSI or MSI-X if supported 660 * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -690,11 +668,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
690 int numvecs, i; 668 int numvecs, i;
691 669
692 /* Number of supported queues. */ 670 /* Number of supported queues. */
693 /* Having more queues than CPUs doesn't make sense. */ 671 adapter->num_rx_queues = adapter->rss_queues;
694 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 672 adapter->num_tx_queues = adapter->rss_queues;
695 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 673
674 /* start with one vector for every rx queue */
675 numvecs = adapter->num_rx_queues;
696 676
697 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 677 /* if tx handler is seperate add 1 for every tx queue */
678 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
679 numvecs += adapter->num_tx_queues;
680
681 /* store the number of vectors reserved for queues */
682 adapter->num_q_vectors = numvecs;
683
684 /* add 1 vector for link status interrupts */
685 numvecs++;
698 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 686 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
699 GFP_KERNEL); 687 GFP_KERNEL);
700 if (!adapter->msix_entries) 688 if (!adapter->msix_entries)
@@ -728,8 +716,12 @@ msi_only:
728 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 716 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
729 } 717 }
730#endif 718#endif
719 adapter->vfs_allocated_count = 0;
720 adapter->rss_queues = 1;
721 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
731 adapter->num_rx_queues = 1; 722 adapter->num_rx_queues = 1;
732 adapter->num_tx_queues = 1; 723 adapter->num_tx_queues = 1;
724 adapter->num_q_vectors = 1;
733 if (!pci_enable_msi(adapter->pdev)) 725 if (!pci_enable_msi(adapter->pdev))
734 adapter->flags |= IGB_FLAG_HAS_MSI; 726 adapter->flags |= IGB_FLAG_HAS_MSI;
735out: 727out:
@@ -739,6 +731,143 @@ out:
739} 731}
740 732
741/** 733/**
734 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
735 * @adapter: board private structure to initialize
736 *
737 * We allocate one q_vector per queue interrupt. If allocation fails we
738 * return -ENOMEM.
739 **/
740static int igb_alloc_q_vectors(struct igb_adapter *adapter)
741{
742 struct igb_q_vector *q_vector;
743 struct e1000_hw *hw = &adapter->hw;
744 int v_idx;
745
746 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
747 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
748 if (!q_vector)
749 goto err_out;
750 q_vector->adapter = adapter;
751 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
752 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
753 q_vector->itr_val = IGB_START_ITR;
754 q_vector->set_itr = 1;
755 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
756 adapter->q_vector[v_idx] = q_vector;
757 }
758 return 0;
759
760err_out:
761 while (v_idx) {
762 v_idx--;
763 q_vector = adapter->q_vector[v_idx];
764 netif_napi_del(&q_vector->napi);
765 kfree(q_vector);
766 adapter->q_vector[v_idx] = NULL;
767 }
768 return -ENOMEM;
769}
770
771static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
772 int ring_idx, int v_idx)
773{
774 struct igb_q_vector *q_vector;
775
776 q_vector = adapter->q_vector[v_idx];
777 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
778 q_vector->rx_ring->q_vector = q_vector;
779 q_vector->itr_val = adapter->rx_itr_setting;
780 if (q_vector->itr_val && q_vector->itr_val <= 3)
781 q_vector->itr_val = IGB_START_ITR;
782}
783
784static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
785 int ring_idx, int v_idx)
786{
787 struct igb_q_vector *q_vector;
788
789 q_vector = adapter->q_vector[v_idx];
790 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
791 q_vector->tx_ring->q_vector = q_vector;
792 q_vector->itr_val = adapter->tx_itr_setting;
793 if (q_vector->itr_val && q_vector->itr_val <= 3)
794 q_vector->itr_val = IGB_START_ITR;
795}
796
797/**
798 * igb_map_ring_to_vector - maps allocated queues to vectors
799 *
800 * This function maps the recently allocated queues to vectors.
801 **/
802static int igb_map_ring_to_vector(struct igb_adapter *adapter)
803{
804 int i;
805 int v_idx = 0;
806
807 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
808 (adapter->num_q_vectors < adapter->num_tx_queues))
809 return -ENOMEM;
810
811 if (adapter->num_q_vectors >=
812 (adapter->num_rx_queues + adapter->num_tx_queues)) {
813 for (i = 0; i < adapter->num_rx_queues; i++)
814 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
815 for (i = 0; i < adapter->num_tx_queues; i++)
816 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
817 } else {
818 for (i = 0; i < adapter->num_rx_queues; i++) {
819 if (i < adapter->num_tx_queues)
820 igb_map_tx_ring_to_vector(adapter, i, v_idx);
821 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
822 }
823 for (; i < adapter->num_tx_queues; i++)
824 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
825 }
826 return 0;
827}
828
829/**
830 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
831 *
832 * This function initializes the interrupts and allocates all of the queues.
833 **/
834static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
835{
836 struct pci_dev *pdev = adapter->pdev;
837 int err;
838
839 igb_set_interrupt_capability(adapter);
840
841 err = igb_alloc_q_vectors(adapter);
842 if (err) {
843 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
844 goto err_alloc_q_vectors;
845 }
846
847 err = igb_alloc_queues(adapter);
848 if (err) {
849 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
850 goto err_alloc_queues;
851 }
852
853 err = igb_map_ring_to_vector(adapter);
854 if (err) {
855 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
856 goto err_map_queues;
857 }
858
859
860 return 0;
861err_map_queues:
862 igb_free_queues(adapter);
863err_alloc_queues:
864 igb_free_q_vectors(adapter);
865err_alloc_q_vectors:
866 igb_reset_interrupt_capability(adapter);
867 return err;
868}
869
870/**
742 * igb_request_irq - initialize interrupts 871 * igb_request_irq - initialize interrupts
743 * 872 *
744 * Attempts to configure interrupts using the best available 873 * Attempts to configure interrupts using the best available
@@ -747,6 +876,7 @@ out:
747static int igb_request_irq(struct igb_adapter *adapter) 876static int igb_request_irq(struct igb_adapter *adapter)
748{ 877{
749 struct net_device *netdev = adapter->netdev; 878 struct net_device *netdev = adapter->netdev;
879 struct pci_dev *pdev = adapter->pdev;
750 struct e1000_hw *hw = &adapter->hw; 880 struct e1000_hw *hw = &adapter->hw;
751 int err = 0; 881 int err = 0;
752 882
@@ -755,19 +885,38 @@ static int igb_request_irq(struct igb_adapter *adapter)
755 if (!err) 885 if (!err)
756 goto request_done; 886 goto request_done;
757 /* fall back to MSI */ 887 /* fall back to MSI */
758 igb_reset_interrupt_capability(adapter); 888 igb_clear_interrupt_scheme(adapter);
759 if (!pci_enable_msi(adapter->pdev)) 889 if (!pci_enable_msi(adapter->pdev))
760 adapter->flags |= IGB_FLAG_HAS_MSI; 890 adapter->flags |= IGB_FLAG_HAS_MSI;
761 igb_free_all_tx_resources(adapter); 891 igb_free_all_tx_resources(adapter);
762 igb_free_all_rx_resources(adapter); 892 igb_free_all_rx_resources(adapter);
893 adapter->num_tx_queues = 1;
763 adapter->num_rx_queues = 1; 894 adapter->num_rx_queues = 1;
764 igb_alloc_queues(adapter); 895 adapter->num_q_vectors = 1;
896 err = igb_alloc_q_vectors(adapter);
897 if (err) {
898 dev_err(&pdev->dev,
899 "Unable to allocate memory for vectors\n");
900 goto request_done;
901 }
902 err = igb_alloc_queues(adapter);
903 if (err) {
904 dev_err(&pdev->dev,
905 "Unable to allocate memory for queues\n");
906 igb_free_q_vectors(adapter);
907 goto request_done;
908 }
909 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter);
765 } else { 911 } else {
766 switch (hw->mac.type) { 912 switch (hw->mac.type) {
767 case e1000_82575: 913 case e1000_82575:
768 wr32(E1000_MSIXBM(0), 914 wr32(E1000_MSIXBM(0),
769 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); 915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
770 break; 918 break;
919 case e1000_82580:
771 case e1000_82576: 920 case e1000_82576:
772 wr32(E1000_IVAR0, E1000_IVAR_VALID); 921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
773 break; 922 break;
@@ -777,17 +926,18 @@ static int igb_request_irq(struct igb_adapter *adapter)
777 } 926 }
778 927
779 if (adapter->flags & IGB_FLAG_HAS_MSI) { 928 if (adapter->flags & IGB_FLAG_HAS_MSI) {
780 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 929 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
781 netdev->name, netdev); 930 netdev->name, adapter);
782 if (!err) 931 if (!err)
783 goto request_done; 932 goto request_done;
933
784 /* fall back to legacy interrupts */ 934 /* fall back to legacy interrupts */
785 igb_reset_interrupt_capability(adapter); 935 igb_reset_interrupt_capability(adapter);
786 adapter->flags &= ~IGB_FLAG_HAS_MSI; 936 adapter->flags &= ~IGB_FLAG_HAS_MSI;
787 } 937 }
788 938
789 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 939 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
790 netdev->name, netdev); 940 netdev->name, adapter);
791 941
792 if (err) 942 if (err)
793 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", 943 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -799,23 +949,19 @@ request_done:
799 949
800static void igb_free_irq(struct igb_adapter *adapter) 950static void igb_free_irq(struct igb_adapter *adapter)
801{ 951{
802 struct net_device *netdev = adapter->netdev;
803
804 if (adapter->msix_entries) { 952 if (adapter->msix_entries) {
805 int vector = 0, i; 953 int vector = 0, i;
806 954
807 for (i = 0; i < adapter->num_tx_queues; i++) 955 free_irq(adapter->msix_entries[vector++].vector, adapter);
808 free_irq(adapter->msix_entries[vector++].vector,
809 &(adapter->tx_ring[i]));
810 for (i = 0; i < adapter->num_rx_queues; i++)
811 free_irq(adapter->msix_entries[vector++].vector,
812 &(adapter->rx_ring[i]));
813 956
814 free_irq(adapter->msix_entries[vector++].vector, netdev); 957 for (i = 0; i < adapter->num_q_vectors; i++) {
815 return; 958 struct igb_q_vector *q_vector = adapter->q_vector[i];
959 free_irq(adapter->msix_entries[vector++].vector,
960 q_vector);
961 }
962 } else {
963 free_irq(adapter->pdev->irq, adapter);
816 } 964 }
817
818 free_irq(adapter->pdev->irq, netdev);
819} 965}
820 966
821/** 967/**
@@ -826,6 +972,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
826{ 972{
827 struct e1000_hw *hw = &adapter->hw; 973 struct e1000_hw *hw = &adapter->hw;
828 974
975 /*
976 * we need to be careful when disabling interrupts. The VFs are also
977 * mapped into these registers and so clearing the bits can cause
978 * issues on the VF drivers so we only need to clear what we set
979 */
829 if (adapter->msix_entries) { 980 if (adapter->msix_entries) {
830 u32 regval = rd32(E1000_EIAM); 981 u32 regval = rd32(E1000_EIAM);
831 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 982 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -849,41 +1000,47 @@ static void igb_irq_enable(struct igb_adapter *adapter)
849 struct e1000_hw *hw = &adapter->hw; 1000 struct e1000_hw *hw = &adapter->hw;
850 1001
851 if (adapter->msix_entries) { 1002 if (adapter->msix_entries) {
1003 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
852 u32 regval = rd32(E1000_EIAC); 1004 u32 regval = rd32(E1000_EIAC);
853 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 1005 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
854 regval = rd32(E1000_EIAM); 1006 regval = rd32(E1000_EIAM);
855 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 1007 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
856 wr32(E1000_EIMS, adapter->eims_enable_mask); 1008 wr32(E1000_EIMS, adapter->eims_enable_mask);
857 if (adapter->vfs_allocated_count) 1009 if (adapter->vfs_allocated_count) {
858 wr32(E1000_MBVFIMR, 0xFF); 1010 wr32(E1000_MBVFIMR, 0xFF);
859 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | 1011 ims |= E1000_IMS_VMMB;
860 E1000_IMS_DOUTSYNC)); 1012 }
1013 if (adapter->hw.mac.type == e1000_82580)
1014 ims |= E1000_IMS_DRSTA;
1015
1016 wr32(E1000_IMS, ims);
861 } else { 1017 } else {
862 wr32(E1000_IMS, IMS_ENABLE_MASK); 1018 wr32(E1000_IMS, IMS_ENABLE_MASK |
863 wr32(E1000_IAM, IMS_ENABLE_MASK); 1019 E1000_IMS_DRSTA);
1020 wr32(E1000_IAM, IMS_ENABLE_MASK |
1021 E1000_IMS_DRSTA);
864 } 1022 }
865} 1023}
866 1024
867static void igb_update_mng_vlan(struct igb_adapter *adapter) 1025static void igb_update_mng_vlan(struct igb_adapter *adapter)
868{ 1026{
869 struct net_device *netdev = adapter->netdev; 1027 struct e1000_hw *hw = &adapter->hw;
870 u16 vid = adapter->hw.mng_cookie.vlan_id; 1028 u16 vid = adapter->hw.mng_cookie.vlan_id;
871 u16 old_vid = adapter->mng_vlan_id; 1029 u16 old_vid = adapter->mng_vlan_id;
872 if (adapter->vlgrp) {
873 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
874 if (adapter->hw.mng_cookie.status &
875 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
876 igb_vlan_rx_add_vid(netdev, vid);
877 adapter->mng_vlan_id = vid;
878 } else
879 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
880 1030
881 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && 1031 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
882 (vid != old_vid) && 1032 /* add VID to filter table */
883 !vlan_group_get_device(adapter->vlgrp, old_vid)) 1033 igb_vfta_set(hw, vid, true);
884 igb_vlan_rx_kill_vid(netdev, old_vid); 1034 adapter->mng_vlan_id = vid;
885 } else 1035 } else {
886 adapter->mng_vlan_id = vid; 1036 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1037 }
1038
1039 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1040 (vid != old_vid) &&
1041 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1042 /* remove VID from filter table */
1043 igb_vfta_set(hw, old_vid, false);
887 } 1044 }
888} 1045}
889 1046
@@ -907,7 +1064,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
907 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1064 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
908} 1065}
909 1066
910
911/** 1067/**
912 * igb_get_hw_control - get control of the h/w from f/w 1068 * igb_get_hw_control - get control of the h/w from f/w
913 * @adapter: address of board private structure 1069 * @adapter: address of board private structure
@@ -942,8 +1098,11 @@ static void igb_configure(struct igb_adapter *adapter)
942 1098
943 igb_restore_vlan(adapter); 1099 igb_restore_vlan(adapter);
944 1100
945 igb_configure_tx(adapter); 1101 igb_setup_tctl(adapter);
1102 igb_setup_mrqc(adapter);
946 igb_setup_rctl(adapter); 1103 igb_setup_rctl(adapter);
1104
1105 igb_configure_tx(adapter);
947 igb_configure_rx(adapter); 1106 igb_configure_rx(adapter);
948 1107
949 igb_rx_fifo_flush_82575(&adapter->hw); 1108 igb_rx_fifo_flush_82575(&adapter->hw);
@@ -965,7 +1124,6 @@ static void igb_configure(struct igb_adapter *adapter)
965 * igb_up - Open the interface and prepare it to handle traffic 1124 * igb_up - Open the interface and prepare it to handle traffic
966 * @adapter: board private structure 1125 * @adapter: board private structure
967 **/ 1126 **/
968
969int igb_up(struct igb_adapter *adapter) 1127int igb_up(struct igb_adapter *adapter)
970{ 1128{
971 struct e1000_hw *hw = &adapter->hw; 1129 struct e1000_hw *hw = &adapter->hw;
@@ -976,30 +1134,37 @@ int igb_up(struct igb_adapter *adapter)
976 1134
977 clear_bit(__IGB_DOWN, &adapter->state); 1135 clear_bit(__IGB_DOWN, &adapter->state);
978 1136
979 for (i = 0; i < adapter->num_rx_queues; i++) 1137 for (i = 0; i < adapter->num_q_vectors; i++) {
980 napi_enable(&adapter->rx_ring[i].napi); 1138 struct igb_q_vector *q_vector = adapter->q_vector[i];
1139 napi_enable(&q_vector->napi);
1140 }
981 if (adapter->msix_entries) 1141 if (adapter->msix_entries)
982 igb_configure_msix(adapter); 1142 igb_configure_msix(adapter);
983 1143
984 igb_vmm_control(adapter);
985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
986 igb_set_vmolr(hw, adapter->vfs_allocated_count);
987
988 /* Clear any pending interrupts. */ 1144 /* Clear any pending interrupts. */
989 rd32(E1000_ICR); 1145 rd32(E1000_ICR);
990 igb_irq_enable(adapter); 1146 igb_irq_enable(adapter);
991 1147
1148 /* notify VFs that reset has been completed */
1149 if (adapter->vfs_allocated_count) {
1150 u32 reg_data = rd32(E1000_CTRL_EXT);
1151 reg_data |= E1000_CTRL_EXT_PFRSTD;
1152 wr32(E1000_CTRL_EXT, reg_data);
1153 }
1154
992 netif_tx_start_all_queues(adapter->netdev); 1155 netif_tx_start_all_queues(adapter->netdev);
993 1156
994 /* Fire a link change interrupt to start the watchdog. */ 1157 /* start the watchdog. */
995 wr32(E1000_ICS, E1000_ICS_LSC); 1158 hw->mac.get_link_status = 1;
1159 schedule_work(&adapter->watchdog_task);
1160
996 return 0; 1161 return 0;
997} 1162}
998 1163
999void igb_down(struct igb_adapter *adapter) 1164void igb_down(struct igb_adapter *adapter)
1000{ 1165{
1001 struct e1000_hw *hw = &adapter->hw;
1002 struct net_device *netdev = adapter->netdev; 1166 struct net_device *netdev = adapter->netdev;
1167 struct e1000_hw *hw = &adapter->hw;
1003 u32 tctl, rctl; 1168 u32 tctl, rctl;
1004 int i; 1169 int i;
1005 1170
@@ -1022,8 +1187,10 @@ void igb_down(struct igb_adapter *adapter)
1022 wrfl(); 1187 wrfl();
1023 msleep(10); 1188 msleep(10);
1024 1189
1025 for (i = 0; i < adapter->num_rx_queues; i++) 1190 for (i = 0; i < adapter->num_q_vectors; i++) {
1026 napi_disable(&adapter->rx_ring[i].napi); 1191 struct igb_q_vector *q_vector = adapter->q_vector[i];
1192 napi_disable(&q_vector->napi);
1193 }
1027 1194
1028 igb_irq_disable(adapter); 1195 igb_irq_disable(adapter);
1029 1196
@@ -1062,6 +1229,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1062 1229
1063void igb_reset(struct igb_adapter *adapter) 1230void igb_reset(struct igb_adapter *adapter)
1064{ 1231{
1232 struct pci_dev *pdev = adapter->pdev;
1065 struct e1000_hw *hw = &adapter->hw; 1233 struct e1000_hw *hw = &adapter->hw;
1066 struct e1000_mac_info *mac = &hw->mac; 1234 struct e1000_mac_info *mac = &hw->mac;
1067 struct e1000_fc_info *fc = &hw->fc; 1235 struct e1000_fc_info *fc = &hw->fc;
@@ -1072,8 +1240,13 @@ void igb_reset(struct igb_adapter *adapter)
1072 * To take effect CTRL.RST is required. 1240 * To take effect CTRL.RST is required.
1073 */ 1241 */
1074 switch (mac->type) { 1242 switch (mac->type) {
1243 case e1000_82580:
1244 pba = rd32(E1000_RXPBS);
1245 pba = igb_rxpbs_adjust_82580(pba);
1246 break;
1075 case e1000_82576: 1247 case e1000_82576:
1076 pba = E1000_PBA_64K; 1248 pba = rd32(E1000_RXPBS);
1249 pba &= E1000_RXPBS_SIZE_MASK_82576;
1077 break; 1250 break;
1078 case e1000_82575: 1251 case e1000_82575:
1079 default: 1252 default:
@@ -1148,10 +1321,10 @@ void igb_reset(struct igb_adapter *adapter)
1148 if (adapter->vfs_allocated_count) { 1321 if (adapter->vfs_allocated_count) {
1149 int i; 1322 int i;
1150 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1323 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1151 adapter->vf_data[i].clear_to_send = false; 1324 adapter->vf_data[i].flags = 0;
1152 1325
1153 /* ping all the active vfs to let them know we are going down */ 1326 /* ping all the active vfs to let them know we are going down */
1154 igb_ping_all_vfs(adapter); 1327 igb_ping_all_vfs(adapter);
1155 1328
1156 /* disable transmits and receives */ 1329 /* disable transmits and receives */
1157 wr32(E1000_VFRE, 0); 1330 wr32(E1000_VFRE, 0);
@@ -1159,23 +1332,28 @@ void igb_reset(struct igb_adapter *adapter)
1159 } 1332 }
1160 1333
1161 /* Allow time for pending master requests to run */ 1334 /* Allow time for pending master requests to run */
1162 adapter->hw.mac.ops.reset_hw(&adapter->hw); 1335 hw->mac.ops.reset_hw(hw);
1163 wr32(E1000_WUC, 0); 1336 wr32(E1000_WUC, 0);
1164 1337
1165 if (adapter->hw.mac.ops.init_hw(&adapter->hw)) 1338 if (hw->mac.ops.init_hw(hw))
1166 dev_err(&adapter->pdev->dev, "Hardware Error\n"); 1339 dev_err(&pdev->dev, "Hardware Error\n");
1167 1340
1341 if (hw->mac.type == e1000_82580) {
1342 u32 reg = rd32(E1000_PCIEMISC);
1343 wr32(E1000_PCIEMISC,
1344 reg & ~E1000_PCIEMISC_LX_DECISION);
1345 }
1168 igb_update_mng_vlan(adapter); 1346 igb_update_mng_vlan(adapter);
1169 1347
1170 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1348 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1171 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1349 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1172 1350
1173 igb_reset_adaptive(&adapter->hw); 1351 igb_reset_adaptive(hw);
1174 igb_get_phy_info(&adapter->hw); 1352 igb_get_phy_info(hw);
1175} 1353}
1176 1354
1177static const struct net_device_ops igb_netdev_ops = { 1355static const struct net_device_ops igb_netdev_ops = {
1178 .ndo_open = igb_open, 1356 .ndo_open = igb_open,
1179 .ndo_stop = igb_close, 1357 .ndo_stop = igb_close,
1180 .ndo_start_xmit = igb_xmit_frame_adv, 1358 .ndo_start_xmit = igb_xmit_frame_adv,
1181 .ndo_get_stats = igb_get_stats, 1359 .ndo_get_stats = igb_get_stats,
@@ -1211,10 +1389,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1211 struct net_device *netdev; 1389 struct net_device *netdev;
1212 struct igb_adapter *adapter; 1390 struct igb_adapter *adapter;
1213 struct e1000_hw *hw; 1391 struct e1000_hw *hw;
1392 u16 eeprom_data = 0;
1393 static int global_quad_port_a; /* global quad port a indication */
1214 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1394 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1215 unsigned long mmio_start, mmio_len; 1395 unsigned long mmio_start, mmio_len;
1216 int err, pci_using_dac; 1396 int err, pci_using_dac;
1217 u16 eeprom_data = 0;
1218 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1397 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1219 u32 part_num; 1398 u32 part_num;
1220 1399
@@ -1291,8 +1470,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1291 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1470 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1292 hw->subsystem_device_id = pdev->subsystem_device; 1471 hw->subsystem_device_id = pdev->subsystem_device;
1293 1472
1294 /* setup the private structure */
1295 hw->back = adapter;
1296 /* Copy the default MAC, PHY and NVM function pointers */ 1473 /* Copy the default MAC, PHY and NVM function pointers */
1297 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 1474 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1298 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 1475 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1302,46 +1479,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1302 if (err) 1479 if (err)
1303 goto err_sw_init; 1480 goto err_sw_init;
1304 1481
1305#ifdef CONFIG_PCI_IOV
1306 /* since iov functionality isn't critical to base device function we
1307 * can accept failure. If it fails we don't allow iov to be enabled */
1308 if (hw->mac.type == e1000_82576) {
1309 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1310 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1311 int i;
1312 unsigned char mac_addr[ETH_ALEN];
1313
1314 if (num_vfs) {
1315 adapter->vf_data = kcalloc(num_vfs,
1316 sizeof(struct vf_data_storage),
1317 GFP_KERNEL);
1318 if (!adapter->vf_data) {
1319 dev_err(&pdev->dev,
1320 "Could not allocate VF private data - "
1321 "IOV enable failed\n");
1322 } else {
1323 err = pci_enable_sriov(pdev, num_vfs);
1324 if (!err) {
1325 adapter->vfs_allocated_count = num_vfs;
1326 dev_info(&pdev->dev,
1327 "%d vfs allocated\n",
1328 num_vfs);
1329 for (i = 0;
1330 i < adapter->vfs_allocated_count;
1331 i++) {
1332 random_ether_addr(mac_addr);
1333 igb_set_vf_mac(adapter, i,
1334 mac_addr);
1335 }
1336 } else {
1337 kfree(adapter->vf_data);
1338 adapter->vf_data = NULL;
1339 }
1340 }
1341 }
1342 }
1343
1344#endif
1345 /* setup the private structure */ 1482 /* setup the private structure */
1346 err = igb_sw_init(adapter); 1483 err = igb_sw_init(adapter);
1347 if (err) 1484 if (err)
@@ -1349,16 +1486,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1349 1486
1350 igb_get_bus_info_pcie(hw); 1487 igb_get_bus_info_pcie(hw);
1351 1488
1352 /* set flags */
1353 switch (hw->mac.type) {
1354 case e1000_82575:
1355 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1356 break;
1357 case e1000_82576:
1358 default:
1359 break;
1360 }
1361
1362 hw->phy.autoneg_wait_to_complete = false; 1489 hw->phy.autoneg_wait_to_complete = false;
1363 hw->mac.adaptive_ifs = true; 1490 hw->mac.adaptive_ifs = true;
1364 1491
@@ -1382,7 +1509,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1382 netdev->features |= NETIF_F_IPV6_CSUM; 1509 netdev->features |= NETIF_F_IPV6_CSUM;
1383 netdev->features |= NETIF_F_TSO; 1510 netdev->features |= NETIF_F_TSO;
1384 netdev->features |= NETIF_F_TSO6; 1511 netdev->features |= NETIF_F_TSO6;
1385
1386 netdev->features |= NETIF_F_GRO; 1512 netdev->features |= NETIF_F_GRO;
1387 1513
1388 netdev->vlan_features |= NETIF_F_TSO; 1514 netdev->vlan_features |= NETIF_F_TSO;
@@ -1394,10 +1520,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1394 if (pci_using_dac) 1520 if (pci_using_dac)
1395 netdev->features |= NETIF_F_HIGHDMA; 1521 netdev->features |= NETIF_F_HIGHDMA;
1396 1522
1397 if (adapter->hw.mac.type == e1000_82576) 1523 if (hw->mac.type >= e1000_82576)
1398 netdev->features |= NETIF_F_SCTP_CSUM; 1524 netdev->features |= NETIF_F_SCTP_CSUM;
1399 1525
1400 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1526 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1401 1527
1402 /* before reading the NVM, reset the controller to put the device in a 1528 /* before reading the NVM, reset the controller to put the device in a
1403 * known good starting state */ 1529 * known good starting state */
@@ -1439,9 +1565,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1439 hw->fc.requested_mode = e1000_fc_default; 1565 hw->fc.requested_mode = e1000_fc_default;
1440 hw->fc.current_mode = e1000_fc_default; 1566 hw->fc.current_mode = e1000_fc_default;
1441 1567
1442 adapter->itr_setting = IGB_DEFAULT_ITR;
1443 adapter->itr = IGB_START_ITR;
1444
1445 igb_validate_mdi_setting(hw); 1568 igb_validate_mdi_setting(hw);
1446 1569
1447 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, 1570 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1450,6 +1573,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1450 1573
1451 if (hw->bus.func == 0) 1574 if (hw->bus.func == 0)
1452 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); 1575 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1576 else if (hw->mac.type == e1000_82580)
1577 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1578 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1579 &eeprom_data);
1453 else if (hw->bus.func == 1) 1580 else if (hw->bus.func == 1)
1454 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 1581 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1455 1582
@@ -1508,66 +1635,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1508 dev_info(&pdev->dev, "DCA enabled\n"); 1635 dev_info(&pdev->dev, "DCA enabled\n");
1509 igb_setup_dca(adapter); 1636 igb_setup_dca(adapter);
1510 } 1637 }
1511#endif
1512 1638
1513 /*
1514 * Initialize hardware timer: we keep it running just in case
1515 * that some program needs it later on.
1516 */
1517 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1518 adapter->cycles.read = igb_read_clock;
1519 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1520 adapter->cycles.mult = 1;
1521 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1522 wr32(E1000_TIMINCA,
1523 (1<<24) |
1524 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1525#if 0
1526 /*
1527 * Avoid rollover while we initialize by resetting the time counter.
1528 */
1529 wr32(E1000_SYSTIML, 0x00000000);
1530 wr32(E1000_SYSTIMH, 0x00000000);
1531#else
1532 /*
1533 * Set registers so that rollover occurs soon to test this.
1534 */
1535 wr32(E1000_SYSTIML, 0x00000000);
1536 wr32(E1000_SYSTIMH, 0xFF800000);
1537#endif 1639#endif
1538 wrfl();
1539 timecounter_init(&adapter->clock,
1540 &adapter->cycles,
1541 ktime_to_ns(ktime_get_real()));
1542
1543 /*
1544 * Synchronize our NIC clock against system wall clock. NIC
1545 * time stamp reading requires ~3us per sample, each sample
1546 * was pretty stable even under load => only require 10
1547 * samples for each offset comparison.
1548 */
1549 memset(&adapter->compare, 0, sizeof(adapter->compare));
1550 adapter->compare.source = &adapter->clock;
1551 adapter->compare.target = ktime_get_real;
1552 adapter->compare.num_samples = 10;
1553 timecompare_update(&adapter->compare, 0);
1554
1555#ifdef DEBUG
1556 {
1557 char buffer[160];
1558 printk(KERN_DEBUG
1559 "igb: %s: hw %p initialized timer\n",
1560 igb_get_time_str(adapter, buffer),
1561 &adapter->hw);
1562 }
1563#endif
1564
1565 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1640 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1566 /* print bus type/speed/width info */ 1641 /* print bus type/speed/width info */
1567 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1642 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1568 netdev->name, 1643 netdev->name,
1569 ((hw->bus.speed == e1000_bus_speed_2500) 1644 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1570 ? "2.5Gb/s" : "unknown"), 1645 "unknown"),
1571 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1646 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1572 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1647 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1573 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 1648 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1594,15 +1669,14 @@ err_eeprom:
1594 1669
1595 if (hw->flash_address) 1670 if (hw->flash_address)
1596 iounmap(hw->flash_address); 1671 iounmap(hw->flash_address);
1597
1598 igb_free_queues(adapter);
1599err_sw_init: 1672err_sw_init:
1673 igb_clear_interrupt_scheme(adapter);
1600 iounmap(hw->hw_addr); 1674 iounmap(hw->hw_addr);
1601err_ioremap: 1675err_ioremap:
1602 free_netdev(netdev); 1676 free_netdev(netdev);
1603err_alloc_etherdev: 1677err_alloc_etherdev:
1604 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1678 pci_release_selected_regions(pdev,
1605 IORESOURCE_MEM)); 1679 pci_select_bars(pdev, IORESOURCE_MEM));
1606err_pci_reg: 1680err_pci_reg:
1607err_dma: 1681err_dma:
1608 pci_disable_device(pdev); 1682 pci_disable_device(pdev);
@@ -1647,12 +1721,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1647 1721
1648 unregister_netdev(netdev); 1722 unregister_netdev(netdev);
1649 1723
1650 if (!igb_check_reset_block(&adapter->hw)) 1724 if (!igb_check_reset_block(hw))
1651 igb_reset_phy(&adapter->hw); 1725 igb_reset_phy(hw);
1652
1653 igb_reset_interrupt_capability(adapter);
1654 1726
1655 igb_free_queues(adapter); 1727 igb_clear_interrupt_scheme(adapter);
1656 1728
1657#ifdef CONFIG_PCI_IOV 1729#ifdef CONFIG_PCI_IOV
1658 /* reclaim resources allocated to VFs */ 1730 /* reclaim resources allocated to VFs */
@@ -1668,11 +1740,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1668 dev_info(&pdev->dev, "IOV Disabled\n"); 1740 dev_info(&pdev->dev, "IOV Disabled\n");
1669 } 1741 }
1670#endif 1742#endif
1743
1671 iounmap(hw->hw_addr); 1744 iounmap(hw->hw_addr);
1672 if (hw->flash_address) 1745 if (hw->flash_address)
1673 iounmap(hw->flash_address); 1746 iounmap(hw->flash_address);
1674 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1747 pci_release_selected_regions(pdev,
1675 IORESOURCE_MEM)); 1748 pci_select_bars(pdev, IORESOURCE_MEM));
1676 1749
1677 free_netdev(netdev); 1750 free_netdev(netdev);
1678 1751
@@ -1682,6 +1755,160 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682} 1755}
1683 1756
1684/** 1757/**
1758 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1759 * @adapter: board private structure to initialize
1760 *
1761 * This function initializes the vf specific data storage and then attempts to
1762 * allocate the VFs. The reason for ordering it this way is because it is much
1763 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1764 * the memory for the VFs.
1765 **/
1766static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1767{
1768#ifdef CONFIG_PCI_IOV
1769 struct pci_dev *pdev = adapter->pdev;
1770
1771 if (adapter->vfs_allocated_count > 7)
1772 adapter->vfs_allocated_count = 7;
1773
1774 if (adapter->vfs_allocated_count) {
1775 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1776 sizeof(struct vf_data_storage),
1777 GFP_KERNEL);
1778 /* if allocation failed then we do not support SR-IOV */
1779 if (!adapter->vf_data) {
1780 adapter->vfs_allocated_count = 0;
1781 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1782 "Data Storage\n");
1783 }
1784 }
1785
1786 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1787 kfree(adapter->vf_data);
1788 adapter->vf_data = NULL;
1789#endif /* CONFIG_PCI_IOV */
1790 adapter->vfs_allocated_count = 0;
1791#ifdef CONFIG_PCI_IOV
1792 } else {
1793 unsigned char mac_addr[ETH_ALEN];
1794 int i;
1795 dev_info(&pdev->dev, "%d vfs allocated\n",
1796 adapter->vfs_allocated_count);
1797 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1798 random_ether_addr(mac_addr);
1799 igb_set_vf_mac(adapter, i, mac_addr);
1800 }
1801 }
1802#endif /* CONFIG_PCI_IOV */
1803}
1804
1805
1806/**
1807 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1808 * @adapter: board private structure to initialize
1809 *
1810 * igb_init_hw_timer initializes the function pointer and values for the hw
1811 * timer found in hardware.
1812 **/
1813static void igb_init_hw_timer(struct igb_adapter *adapter)
1814{
1815 struct e1000_hw *hw = &adapter->hw;
1816
1817 switch (hw->mac.type) {
1818 case e1000_82580:
1819 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1820 adapter->cycles.read = igb_read_clock;
1821 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1822 adapter->cycles.mult = 1;
1823 /*
1824 * The 82580 timesync updates the system timer every 8ns by 8ns
1825 * and the value cannot be shifted. Instead we need to shift
1826 * the registers to generate a 64bit timer value. As a result
1827 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1828 * 24 in order to generate a larger value for synchronization.
1829 */
1830 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1831 /* disable system timer temporarily by setting bit 31 */
1832 wr32(E1000_TSAUXC, 0x80000000);
1833 wrfl();
1834
1835 /* Set registers so that rollover occurs soon to test this. */
1836 wr32(E1000_SYSTIMR, 0x00000000);
1837 wr32(E1000_SYSTIML, 0x80000000);
1838 wr32(E1000_SYSTIMH, 0x000000FF);
1839 wrfl();
1840
1841 /* enable system timer by clearing bit 31 */
1842 wr32(E1000_TSAUXC, 0x0);
1843 wrfl();
1844
1845 timecounter_init(&adapter->clock,
1846 &adapter->cycles,
1847 ktime_to_ns(ktime_get_real()));
1848 /*
1849 * Synchronize our NIC clock against system wall clock. NIC
1850 * time stamp reading requires ~3us per sample, each sample
1851 * was pretty stable even under load => only require 10
1852 * samples for each offset comparison.
1853 */
1854 memset(&adapter->compare, 0, sizeof(adapter->compare));
1855 adapter->compare.source = &adapter->clock;
1856 adapter->compare.target = ktime_get_real;
1857 adapter->compare.num_samples = 10;
1858 timecompare_update(&adapter->compare, 0);
1859 break;
1860 case e1000_82576:
1861 /*
1862 * Initialize hardware timer: we keep it running just in case
1863 * that some program needs it later on.
1864 */
1865 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1866 adapter->cycles.read = igb_read_clock;
1867 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1868 adapter->cycles.mult = 1;
1869 /**
1870 * Scale the NIC clock cycle by a large factor so that
1871 * relatively small clock corrections can be added or
1872 * substracted at each clock tick. The drawbacks of a large
1873 * factor are a) that the clock register overflows more quickly
1874 * (not such a big deal) and b) that the increment per tick has
1875 * to fit into 24 bits. As a result we need to use a shift of
1876 * 19 so we can fit a value of 16 into the TIMINCA register.
1877 */
1878 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1879 wr32(E1000_TIMINCA,
1880 (1 << E1000_TIMINCA_16NS_SHIFT) |
1881 (16 << IGB_82576_TSYNC_SHIFT));
1882
1883 /* Set registers so that rollover occurs soon to test this. */
1884 wr32(E1000_SYSTIML, 0x00000000);
1885 wr32(E1000_SYSTIMH, 0xFF800000);
1886 wrfl();
1887
1888 timecounter_init(&adapter->clock,
1889 &adapter->cycles,
1890 ktime_to_ns(ktime_get_real()));
1891 /*
1892 * Synchronize our NIC clock against system wall clock. NIC
1893 * time stamp reading requires ~3us per sample, each sample
1894 * was pretty stable even under load => only require 10
1895 * samples for each offset comparison.
1896 */
1897 memset(&adapter->compare, 0, sizeof(adapter->compare));
1898 adapter->compare.source = &adapter->clock;
1899 adapter->compare.target = ktime_get_real;
1900 adapter->compare.num_samples = 10;
1901 timecompare_update(&adapter->compare, 0);
1902 break;
1903 case e1000_82575:
1904 /* 82575 does not support timesync */
1905 default:
1906 break;
1907 }
1908
1909}
1910
1911/**
1685 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1912 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1686 * @adapter: board private structure to initialize 1913 * @adapter: board private structure to initialize
1687 * 1914 *
@@ -1699,20 +1926,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1699 1926
1700 adapter->tx_ring_count = IGB_DEFAULT_TXD; 1927 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1701 adapter->rx_ring_count = IGB_DEFAULT_RXD; 1928 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1702 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1929 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1703 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1930 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1931
1704 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1932 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1705 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1933 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1706 1934
1707 /* This call may decrease the number of queues depending on 1935#ifdef CONFIG_PCI_IOV
1708 * interrupt mode. */ 1936 if (hw->mac.type == e1000_82576)
1709 igb_set_interrupt_capability(adapter); 1937 adapter->vfs_allocated_count = max_vfs;
1710 1938
1711 if (igb_alloc_queues(adapter)) { 1939#endif /* CONFIG_PCI_IOV */
1940 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1941
1942 /*
1943 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1944 * then we should combine the queues into a queue pair in order to
1945 * conserve interrupts due to limited supply
1946 */
1947 if ((adapter->rss_queues > 4) ||
1948 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1949 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1950
1951 /* This call may decrease the number of queues */
1952 if (igb_init_interrupt_scheme(adapter)) {
1712 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1953 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1713 return -ENOMEM; 1954 return -ENOMEM;
1714 } 1955 }
1715 1956
1957 igb_init_hw_timer(adapter);
1958 igb_probe_vfs(adapter);
1959
1716 /* Explicitly disable IRQ since the NIC can be in any state. */ 1960 /* Explicitly disable IRQ since the NIC can be in any state. */
1717 igb_irq_disable(adapter); 1961 igb_irq_disable(adapter);
1718 1962
@@ -1757,21 +2001,12 @@ static int igb_open(struct net_device *netdev)
1757 2001
1758 /* e1000_power_up_phy(adapter); */ 2002 /* e1000_power_up_phy(adapter); */
1759 2003
1760 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1761 if ((adapter->hw.mng_cookie.status &
1762 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1763 igb_update_mng_vlan(adapter);
1764
1765 /* before we allocate an interrupt, we must be ready to handle it. 2004 /* before we allocate an interrupt, we must be ready to handle it.
1766 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2005 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1767 * as soon as we call pci_request_irq, so we have to setup our 2006 * as soon as we call pci_request_irq, so we have to setup our
1768 * clean_rx handler before we do so. */ 2007 * clean_rx handler before we do so. */
1769 igb_configure(adapter); 2008 igb_configure(adapter);
1770 2009
1771 igb_vmm_control(adapter);
1772 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1773 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1774
1775 err = igb_request_irq(adapter); 2010 err = igb_request_irq(adapter);
1776 if (err) 2011 if (err)
1777 goto err_req_irq; 2012 goto err_req_irq;
@@ -1779,18 +2014,28 @@ static int igb_open(struct net_device *netdev)
1779 /* From here on the code is the same as igb_up() */ 2014 /* From here on the code is the same as igb_up() */
1780 clear_bit(__IGB_DOWN, &adapter->state); 2015 clear_bit(__IGB_DOWN, &adapter->state);
1781 2016
1782 for (i = 0; i < adapter->num_rx_queues; i++) 2017 for (i = 0; i < adapter->num_q_vectors; i++) {
1783 napi_enable(&adapter->rx_ring[i].napi); 2018 struct igb_q_vector *q_vector = adapter->q_vector[i];
2019 napi_enable(&q_vector->napi);
2020 }
1784 2021
1785 /* Clear any pending interrupts. */ 2022 /* Clear any pending interrupts. */
1786 rd32(E1000_ICR); 2023 rd32(E1000_ICR);
1787 2024
1788 igb_irq_enable(adapter); 2025 igb_irq_enable(adapter);
1789 2026
2027 /* notify VFs that reset has been completed */
2028 if (adapter->vfs_allocated_count) {
2029 u32 reg_data = rd32(E1000_CTRL_EXT);
2030 reg_data |= E1000_CTRL_EXT_PFRSTD;
2031 wr32(E1000_CTRL_EXT, reg_data);
2032 }
2033
1790 netif_tx_start_all_queues(netdev); 2034 netif_tx_start_all_queues(netdev);
1791 2035
1792 /* Fire a link status change interrupt to start the watchdog. */ 2036 /* start the watchdog. */
1793 wr32(E1000_ICS, E1000_ICS_LSC); 2037 hw->mac.get_link_status = 1;
2038 schedule_work(&adapter->watchdog_task);
1794 2039
1795 return 0; 2040 return 0;
1796 2041
@@ -1829,28 +2074,18 @@ static int igb_close(struct net_device *netdev)
1829 igb_free_all_tx_resources(adapter); 2074 igb_free_all_tx_resources(adapter);
1830 igb_free_all_rx_resources(adapter); 2075 igb_free_all_rx_resources(adapter);
1831 2076
1832 /* kill manageability vlan ID if supported, but not if a vlan with
1833 * the same ID is registered on the host OS (let 8021q kill it) */
1834 if ((adapter->hw.mng_cookie.status &
1835 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1836 !(adapter->vlgrp &&
1837 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1838 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1839
1840 return 0; 2077 return 0;
1841} 2078}
1842 2079
1843/** 2080/**
1844 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 2081 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1845 * @adapter: board private structure
1846 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2082 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1847 * 2083 *
1848 * Return 0 on success, negative on failure 2084 * Return 0 on success, negative on failure
1849 **/ 2085 **/
1850int igb_setup_tx_resources(struct igb_adapter *adapter, 2086int igb_setup_tx_resources(struct igb_ring *tx_ring)
1851 struct igb_ring *tx_ring)
1852{ 2087{
1853 struct pci_dev *pdev = adapter->pdev; 2088 struct pci_dev *pdev = tx_ring->pdev;
1854 int size; 2089 int size;
1855 2090
1856 size = sizeof(struct igb_buffer) * tx_ring->count; 2091 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1863,20 +2098,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1863 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2098 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1864 tx_ring->size = ALIGN(tx_ring->size, 4096); 2099 tx_ring->size = ALIGN(tx_ring->size, 4096);
1865 2100
1866 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 2101 tx_ring->desc = pci_alloc_consistent(pdev,
2102 tx_ring->size,
1867 &tx_ring->dma); 2103 &tx_ring->dma);
1868 2104
1869 if (!tx_ring->desc) 2105 if (!tx_ring->desc)
1870 goto err; 2106 goto err;
1871 2107
1872 tx_ring->adapter = adapter;
1873 tx_ring->next_to_use = 0; 2108 tx_ring->next_to_use = 0;
1874 tx_ring->next_to_clean = 0; 2109 tx_ring->next_to_clean = 0;
1875 return 0; 2110 return 0;
1876 2111
1877err: 2112err:
1878 vfree(tx_ring->buffer_info); 2113 vfree(tx_ring->buffer_info);
1879 dev_err(&adapter->pdev->dev, 2114 dev_err(&pdev->dev,
1880 "Unable to allocate memory for the transmit descriptor ring\n"); 2115 "Unable to allocate memory for the transmit descriptor ring\n");
1881 return -ENOMEM; 2116 return -ENOMEM;
1882} 2117}
@@ -1890,13 +2125,13 @@ err:
1890 **/ 2125 **/
1891static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2126static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1892{ 2127{
2128 struct pci_dev *pdev = adapter->pdev;
1893 int i, err = 0; 2129 int i, err = 0;
1894 int r_idx;
1895 2130
1896 for (i = 0; i < adapter->num_tx_queues; i++) { 2131 for (i = 0; i < adapter->num_tx_queues; i++) {
1897 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2132 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
1898 if (err) { 2133 if (err) {
1899 dev_err(&adapter->pdev->dev, 2134 dev_err(&pdev->dev,
1900 "Allocation for Tx Queue %u failed\n", i); 2135 "Allocation for Tx Queue %u failed\n", i);
1901 for (i--; i >= 0; i--) 2136 for (i--; i >= 0; i--)
1902 igb_free_tx_resources(&adapter->tx_ring[i]); 2137 igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1904,57 +2139,24 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1904 } 2139 }
1905 } 2140 }
1906 2141
1907 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2142 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
1908 r_idx = i % adapter->num_tx_queues; 2143 int r_idx = i % adapter->num_tx_queues;
1909 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2144 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1910 } 2145 }
1911 return err; 2146 return err;
1912} 2147}
1913 2148
1914/** 2149/**
1915 * igb_configure_tx - Configure transmit Unit after Reset 2150 * igb_setup_tctl - configure the transmit control registers
1916 * @adapter: board private structure 2151 * @adapter: Board private structure
1917 *
1918 * Configure the Tx unit of the MAC after a reset.
1919 **/ 2152 **/
1920static void igb_configure_tx(struct igb_adapter *adapter) 2153void igb_setup_tctl(struct igb_adapter *adapter)
1921{ 2154{
1922 u64 tdba;
1923 struct e1000_hw *hw = &adapter->hw; 2155 struct e1000_hw *hw = &adapter->hw;
1924 u32 tctl; 2156 u32 tctl;
1925 u32 txdctl, txctrl;
1926 int i, j;
1927
1928 for (i = 0; i < adapter->num_tx_queues; i++) {
1929 struct igb_ring *ring = &adapter->tx_ring[i];
1930 j = ring->reg_idx;
1931 wr32(E1000_TDLEN(j),
1932 ring->count * sizeof(union e1000_adv_tx_desc));
1933 tdba = ring->dma;
1934 wr32(E1000_TDBAL(j),
1935 tdba & 0x00000000ffffffffULL);
1936 wr32(E1000_TDBAH(j), tdba >> 32);
1937
1938 ring->head = E1000_TDH(j);
1939 ring->tail = E1000_TDT(j);
1940 writel(0, hw->hw_addr + ring->tail);
1941 writel(0, hw->hw_addr + ring->head);
1942 txdctl = rd32(E1000_TXDCTL(j));
1943 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1944 wr32(E1000_TXDCTL(j), txdctl);
1945
1946 /* Turn off Relaxed Ordering on head write-backs. The
1947 * writebacks MUST be delivered in order or it will
1948 * completely screw up our bookeeping.
1949 */
1950 txctrl = rd32(E1000_DCA_TXCTRL(j));
1951 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1952 wr32(E1000_DCA_TXCTRL(j), txctrl);
1953 }
1954 2157
1955 /* disable queue 0 to prevent tail bump w/o re-configuration */ 2158 /* disable queue 0 which is enabled by default on 82575 and 82576 */
1956 if (adapter->vfs_allocated_count) 2159 wr32(E1000_TXDCTL(0), 0);
1957 wr32(E1000_TXDCTL(0), 0);
1958 2160
1959 /* Program the Transmit Control Register */ 2161 /* Program the Transmit Control Register */
1960 tctl = rd32(E1000_TCTL); 2162 tctl = rd32(E1000_TCTL);
@@ -1964,9 +2166,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1964 2166
1965 igb_config_collision_dist(hw); 2167 igb_config_collision_dist(hw);
1966 2168
1967 /* Setup Transmit Descriptor Settings for eop descriptor */
1968 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1969
1970 /* Enable transmits */ 2169 /* Enable transmits */
1971 tctl |= E1000_TCTL_EN; 2170 tctl |= E1000_TCTL_EN;
1972 2171
@@ -1974,16 +2173,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1974} 2173}
1975 2174
1976/** 2175/**
1977 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 2176 * igb_configure_tx_ring - Configure transmit ring after Reset
2177 * @adapter: board private structure
2178 * @ring: tx ring to configure
2179 *
2180 * Configure a transmit ring after a reset.
2181 **/
2182void igb_configure_tx_ring(struct igb_adapter *adapter,
2183 struct igb_ring *ring)
2184{
2185 struct e1000_hw *hw = &adapter->hw;
2186 u32 txdctl;
2187 u64 tdba = ring->dma;
2188 int reg_idx = ring->reg_idx;
2189
2190 /* disable the queue */
2191 txdctl = rd32(E1000_TXDCTL(reg_idx));
2192 wr32(E1000_TXDCTL(reg_idx),
2193 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2194 wrfl();
2195 mdelay(10);
2196
2197 wr32(E1000_TDLEN(reg_idx),
2198 ring->count * sizeof(union e1000_adv_tx_desc));
2199 wr32(E1000_TDBAL(reg_idx),
2200 tdba & 0x00000000ffffffffULL);
2201 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2202
2203 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2204 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2205 writel(0, ring->head);
2206 writel(0, ring->tail);
2207
2208 txdctl |= IGB_TX_PTHRESH;
2209 txdctl |= IGB_TX_HTHRESH << 8;
2210 txdctl |= IGB_TX_WTHRESH << 16;
2211
2212 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2213 wr32(E1000_TXDCTL(reg_idx), txdctl);
2214}
2215
2216/**
2217 * igb_configure_tx - Configure transmit Unit after Reset
1978 * @adapter: board private structure 2218 * @adapter: board private structure
2219 *
2220 * Configure the Tx unit of the MAC after a reset.
2221 **/
2222static void igb_configure_tx(struct igb_adapter *adapter)
2223{
2224 int i;
2225
2226 for (i = 0; i < adapter->num_tx_queues; i++)
2227 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2228}
2229
2230/**
2231 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1979 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2232 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1980 * 2233 *
1981 * Returns 0 on success, negative on failure 2234 * Returns 0 on success, negative on failure
1982 **/ 2235 **/
1983int igb_setup_rx_resources(struct igb_adapter *adapter, 2236int igb_setup_rx_resources(struct igb_ring *rx_ring)
1984 struct igb_ring *rx_ring)
1985{ 2237{
1986 struct pci_dev *pdev = adapter->pdev; 2238 struct pci_dev *pdev = rx_ring->pdev;
1987 int size, desc_len; 2239 int size, desc_len;
1988 2240
1989 size = sizeof(struct igb_buffer) * rx_ring->count; 2241 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2007,13 +2259,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
2007 rx_ring->next_to_clean = 0; 2259 rx_ring->next_to_clean = 0;
2008 rx_ring->next_to_use = 0; 2260 rx_ring->next_to_use = 0;
2009 2261
2010 rx_ring->adapter = adapter;
2011
2012 return 0; 2262 return 0;
2013 2263
2014err: 2264err:
2015 vfree(rx_ring->buffer_info); 2265 vfree(rx_ring->buffer_info);
2016 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 2266 rx_ring->buffer_info = NULL;
2267 dev_err(&pdev->dev, "Unable to allocate memory for "
2017 "the receive descriptor ring\n"); 2268 "the receive descriptor ring\n");
2018 return -ENOMEM; 2269 return -ENOMEM;
2019} 2270}
@@ -2027,12 +2278,13 @@ err:
2027 **/ 2278 **/
2028static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 2279static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2029{ 2280{
2281 struct pci_dev *pdev = adapter->pdev;
2030 int i, err = 0; 2282 int i, err = 0;
2031 2283
2032 for (i = 0; i < adapter->num_rx_queues; i++) { 2284 for (i = 0; i < adapter->num_rx_queues; i++) {
2033 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2285 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2034 if (err) { 2286 if (err) {
2035 dev_err(&adapter->pdev->dev, 2287 dev_err(&pdev->dev,
2036 "Allocation for Rx Queue %u failed\n", i); 2288 "Allocation for Rx Queue %u failed\n", i);
2037 for (i--; i >= 0; i--) 2289 for (i--; i >= 0; i--)
2038 igb_free_rx_resources(&adapter->rx_ring[i]); 2290 igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2044,15 +2296,122 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2044} 2296}
2045 2297
2046/** 2298/**
2299 * igb_setup_mrqc - configure the multiple receive queue control registers
2300 * @adapter: Board private structure
2301 **/
2302static void igb_setup_mrqc(struct igb_adapter *adapter)
2303{
2304 struct e1000_hw *hw = &adapter->hw;
2305 u32 mrqc, rxcsum;
2306 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2307 union e1000_reta {
2308 u32 dword;
2309 u8 bytes[4];
2310 } reta;
2311 static const u8 rsshash[40] = {
2312 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2313 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2314 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2315 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2316
2317 /* Fill out hash function seeds */
2318 for (j = 0; j < 10; j++) {
2319 u32 rsskey = rsshash[(j * 4)];
2320 rsskey |= rsshash[(j * 4) + 1] << 8;
2321 rsskey |= rsshash[(j * 4) + 2] << 16;
2322 rsskey |= rsshash[(j * 4) + 3] << 24;
2323 array_wr32(E1000_RSSRK(0), j, rsskey);
2324 }
2325
2326 num_rx_queues = adapter->rss_queues;
2327
2328 if (adapter->vfs_allocated_count) {
2329 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2330 switch (hw->mac.type) {
2331 case e1000_82580:
2332 num_rx_queues = 1;
2333 shift = 0;
2334 break;
2335 case e1000_82576:
2336 shift = 3;
2337 num_rx_queues = 2;
2338 break;
2339 case e1000_82575:
2340 shift = 2;
2341 shift2 = 6;
2342 default:
2343 break;
2344 }
2345 } else {
2346 if (hw->mac.type == e1000_82575)
2347 shift = 6;
2348 }
2349
2350 for (j = 0; j < (32 * 4); j++) {
2351 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2352 if (shift2)
2353 reta.bytes[j & 3] |= num_rx_queues << shift2;
2354 if ((j & 3) == 3)
2355 wr32(E1000_RETA(j >> 2), reta.dword);
2356 }
2357
2358 /*
2359 * Disable raw packet checksumming so that RSS hash is placed in
2360 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2361 * offloads as they are enabled by default
2362 */
2363 rxcsum = rd32(E1000_RXCSUM);
2364 rxcsum |= E1000_RXCSUM_PCSD;
2365
2366 if (adapter->hw.mac.type >= e1000_82576)
2367 /* Enable Receive Checksum Offload for SCTP */
2368 rxcsum |= E1000_RXCSUM_CRCOFL;
2369
2370 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2371 wr32(E1000_RXCSUM, rxcsum);
2372
2373 /* If VMDq is enabled then we set the appropriate mode for that, else
2374 * we default to RSS so that an RSS hash is calculated per packet even
2375 * if we are only using one queue */
2376 if (adapter->vfs_allocated_count) {
2377 if (hw->mac.type > e1000_82575) {
2378 /* Set the default pool for the PF's first queue */
2379 u32 vtctl = rd32(E1000_VT_CTL);
2380 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2381 E1000_VT_CTL_DISABLE_DEF_POOL);
2382 vtctl |= adapter->vfs_allocated_count <<
2383 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2384 wr32(E1000_VT_CTL, vtctl);
2385 }
2386 if (adapter->rss_queues > 1)
2387 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2388 else
2389 mrqc = E1000_MRQC_ENABLE_VMDQ;
2390 } else {
2391 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2392 }
2393 igb_vmm_control(adapter);
2394
2395 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2396 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2397 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2398 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2399 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2400 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2401 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2402 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2403
2404 wr32(E1000_MRQC, mrqc);
2405}
2406
2407/**
2047 * igb_setup_rctl - configure the receive control registers 2408 * igb_setup_rctl - configure the receive control registers
2048 * @adapter: Board private structure 2409 * @adapter: Board private structure
2049 **/ 2410 **/
2050static void igb_setup_rctl(struct igb_adapter *adapter) 2411void igb_setup_rctl(struct igb_adapter *adapter)
2051{ 2412{
2052 struct e1000_hw *hw = &adapter->hw; 2413 struct e1000_hw *hw = &adapter->hw;
2053 u32 rctl; 2414 u32 rctl;
2054 u32 srrctl = 0;
2055 int i;
2056 2415
2057 rctl = rd32(E1000_RCTL); 2416 rctl = rd32(E1000_RCTL);
2058 2417
@@ -2069,75 +2428,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2069 */ 2428 */
2070 rctl |= E1000_RCTL_SECRC; 2429 rctl |= E1000_RCTL_SECRC;
2071 2430
2072 /* 2431 /* disable store bad packets and clear size bits. */
2073 * disable store bad packets and clear size bits.
2074 */
2075 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 2432 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2076 2433
2077 /* enable LPE when to prevent packets larger than max_frame_size */ 2434 /* enable LPE to prevent packets larger than max_frame_size */
2078 rctl |= E1000_RCTL_LPE; 2435 rctl |= E1000_RCTL_LPE;
2079
2080 /* Setup buffer sizes */
2081 switch (adapter->rx_buffer_len) {
2082 case IGB_RXBUFFER_256:
2083 rctl |= E1000_RCTL_SZ_256;
2084 break;
2085 case IGB_RXBUFFER_512:
2086 rctl |= E1000_RCTL_SZ_512;
2087 break;
2088 default:
2089 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2090 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2091 break;
2092 }
2093 2436
2094 /* 82575 and greater support packet-split where the protocol 2437 /* disable queue 0 to prevent tail write w/o re-config */
2095 * header is placed in skb->data and the packet data is 2438 wr32(E1000_RXDCTL(0), 0);
2096 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2097 * In the case of a non-split, skb->data is linearly filled,
2098 * followed by the page buffers. Therefore, skb->data is
2099 * sized to hold the largest protocol header.
2100 */
2101 /* allocations using alloc_page take too long for regular MTU
2102 * so only enable packet split for jumbo frames */
2103 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2104 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2105 srrctl |= adapter->rx_ps_hdr_size <<
2106 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2107 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2108 } else {
2109 adapter->rx_ps_hdr_size = 0;
2110 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 }
2112 2439
2113 /* Attention!!! For SR-IOV PF driver operations you must enable 2440 /* Attention!!! For SR-IOV PF driver operations you must enable
2114 * queue drop for all VF and PF queues to prevent head of line blocking 2441 * queue drop for all VF and PF queues to prevent head of line blocking
2115 * if an un-trusted VF does not provide descriptors to hardware. 2442 * if an un-trusted VF does not provide descriptors to hardware.
2116 */ 2443 */
2117 if (adapter->vfs_allocated_count) { 2444 if (adapter->vfs_allocated_count) {
2118 u32 vmolr;
2119
2120 /* set all queue drop enable bits */ 2445 /* set all queue drop enable bits */
2121 wr32(E1000_QDE, ALL_QUEUES); 2446 wr32(E1000_QDE, ALL_QUEUES);
2122 srrctl |= E1000_SRRCTL_DROP_EN; 2447 }
2123 2448
2124 /* disable queue 0 to prevent tail write w/o re-config */ 2449 wr32(E1000_RCTL, rctl);
2125 wr32(E1000_RXDCTL(0), 0); 2450}
2126 2451
2127 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2452static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2128 if (rctl & E1000_RCTL_LPE) 2453 int vfn)
2129 vmolr |= E1000_VMOLR_LPE; 2454{
2130 if (adapter->num_rx_queues > 1) 2455 struct e1000_hw *hw = &adapter->hw;
2131 vmolr |= E1000_VMOLR_RSSE; 2456 u32 vmolr;
2132 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2133 }
2134 2457
2135 for (i = 0; i < adapter->num_rx_queues; i++) { 2458 /* if it isn't the PF check to see if VFs are enabled and
2136 int j = adapter->rx_ring[i].reg_idx; 2459 * increase the size to support vlan tags */
2137 wr32(E1000_SRRCTL(j), srrctl); 2460 if (vfn < adapter->vfs_allocated_count &&
2138 } 2461 adapter->vf_data[vfn].vlans_enabled)
2462 size += VLAN_TAG_SIZE;
2139 2463
2140 wr32(E1000_RCTL, rctl); 2464 vmolr = rd32(E1000_VMOLR(vfn));
2465 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2466 vmolr |= size | E1000_VMOLR_LPE;
2467 wr32(E1000_VMOLR(vfn), vmolr);
2468
2469 return 0;
2141} 2470}
2142 2471
2143/** 2472/**
@@ -2159,33 +2488,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2159 * size and set the VMOLR RLPML to the size we need */ 2488 * size and set the VMOLR RLPML to the size we need */
2160 if (pf_id) { 2489 if (pf_id) {
2161 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 2490 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2162 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; 2491 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2163 } 2492 }
2164 2493
2165 wr32(E1000_RLPML, max_frame_size); 2494 wr32(E1000_RLPML, max_frame_size);
2166} 2495}
2167 2496
2497static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2498{
2499 struct e1000_hw *hw = &adapter->hw;
2500 u32 vmolr;
2501
2502 /*
2503 * This register exists only on 82576 and newer so if we are older then
2504 * we should exit and do nothing
2505 */
2506 if (hw->mac.type < e1000_82576)
2507 return;
2508
2509 vmolr = rd32(E1000_VMOLR(vfn));
2510 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2511 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2512
2513 /* clear all bits that might not be set */
2514 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2515
2516 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2517 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2518 /*
2519 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2520 * multicast packets
2521 */
2522 if (vfn <= adapter->vfs_allocated_count)
2523 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2524
2525 wr32(E1000_VMOLR(vfn), vmolr);
2526}
2527
2168/** 2528/**
2169 * igb_configure_vt_default_pool - Configure VT default pool 2529 * igb_configure_rx_ring - Configure a receive ring after Reset
2170 * @adapter: board private structure 2530 * @adapter: board private structure
2531 * @ring: receive ring to be configured
2171 * 2532 *
2172 * Configure the default pool 2533 * Configure the Rx unit of the MAC after a reset.
2173 **/ 2534 **/
2174static void igb_configure_vt_default_pool(struct igb_adapter *adapter) 2535void igb_configure_rx_ring(struct igb_adapter *adapter,
2536 struct igb_ring *ring)
2175{ 2537{
2176 struct e1000_hw *hw = &adapter->hw; 2538 struct e1000_hw *hw = &adapter->hw;
2177 u16 pf_id = adapter->vfs_allocated_count; 2539 u64 rdba = ring->dma;
2178 u32 vtctl; 2540 int reg_idx = ring->reg_idx;
2541 u32 srrctl, rxdctl;
2542
2543 /* disable the queue */
2544 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2545 wr32(E1000_RXDCTL(reg_idx),
2546 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2547
2548 /* Set DMA base address registers */
2549 wr32(E1000_RDBAL(reg_idx),
2550 rdba & 0x00000000ffffffffULL);
2551 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2552 wr32(E1000_RDLEN(reg_idx),
2553 ring->count * sizeof(union e1000_adv_rx_desc));
2554
2555 /* initialize head and tail */
2556 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2557 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2558 writel(0, ring->head);
2559 writel(0, ring->tail);
2560
2561 /* set descriptor configuration */
2562 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2563 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2564 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2565#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2566 srrctl |= IGB_RXBUFFER_16384 >>
2567 E1000_SRRCTL_BSIZEPKT_SHIFT;
2568#else
2569 srrctl |= (PAGE_SIZE / 2) >>
2570 E1000_SRRCTL_BSIZEPKT_SHIFT;
2571#endif
2572 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2573 } else {
2574 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2575 E1000_SRRCTL_BSIZEPKT_SHIFT;
2576 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2577 }
2179 2578
2180 /* not in sr-iov mode - do nothing */ 2579 wr32(E1000_SRRCTL(reg_idx), srrctl);
2181 if (!pf_id) 2580
2182 return; 2581 /* set filtering for VMDQ pools */
2582 igb_set_vmolr(adapter, reg_idx & 0x7);
2183 2583
2184 vtctl = rd32(E1000_VT_CTL); 2584 /* enable receive descriptor fetching */
2185 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 2585 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2186 E1000_VT_CTL_DISABLE_DEF_POOL); 2586 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2187 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2587 rxdctl &= 0xFFF00000;
2188 wr32(E1000_VT_CTL, vtctl); 2588 rxdctl |= IGB_RX_PTHRESH;
2589 rxdctl |= IGB_RX_HTHRESH << 8;
2590 rxdctl |= IGB_RX_WTHRESH << 16;
2591 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2189} 2592}
2190 2593
2191/** 2594/**
@@ -2196,112 +2599,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2196 **/ 2599 **/
2197static void igb_configure_rx(struct igb_adapter *adapter) 2600static void igb_configure_rx(struct igb_adapter *adapter)
2198{ 2601{
2199 u64 rdba;
2200 struct e1000_hw *hw = &adapter->hw;
2201 u32 rctl, rxcsum;
2202 u32 rxdctl;
2203 int i; 2602 int i;
2204 2603
2205 /* disable receives while setting up the descriptors */ 2604 /* set UTA to appropriate mode */
2206 rctl = rd32(E1000_RCTL); 2605 igb_set_uta(adapter);
2207 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2208 wrfl();
2209 mdelay(10);
2210 2606
2211 if (adapter->itr_setting > 3) 2607 /* set the correct pool for the PF default MAC address in entry 0 */
2212 wr32(E1000_ITR, adapter->itr); 2608 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2609 adapter->vfs_allocated_count);
2213 2610
2214 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2611 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2215 * the Base and Length of the Rx Descriptor Ring */ 2612 * the Base and Length of the Rx Descriptor Ring */
2216 for (i = 0; i < adapter->num_rx_queues; i++) { 2613 for (i = 0; i < adapter->num_rx_queues; i++)
2217 struct igb_ring *ring = &adapter->rx_ring[i]; 2614 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2218 int j = ring->reg_idx;
2219 rdba = ring->dma;
2220 wr32(E1000_RDBAL(j),
2221 rdba & 0x00000000ffffffffULL);
2222 wr32(E1000_RDBAH(j), rdba >> 32);
2223 wr32(E1000_RDLEN(j),
2224 ring->count * sizeof(union e1000_adv_rx_desc));
2225
2226 ring->head = E1000_RDH(j);
2227 ring->tail = E1000_RDT(j);
2228 writel(0, hw->hw_addr + ring->tail);
2229 writel(0, hw->hw_addr + ring->head);
2230
2231 rxdctl = rd32(E1000_RXDCTL(j));
2232 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2233 rxdctl &= 0xFFF00000;
2234 rxdctl |= IGB_RX_PTHRESH;
2235 rxdctl |= IGB_RX_HTHRESH << 8;
2236 rxdctl |= IGB_RX_WTHRESH << 16;
2237 wr32(E1000_RXDCTL(j), rxdctl);
2238 }
2239
2240 if (adapter->num_rx_queues > 1) {
2241 u32 random[10];
2242 u32 mrqc;
2243 u32 j, shift;
2244 union e1000_reta {
2245 u32 dword;
2246 u8 bytes[4];
2247 } reta;
2248
2249 get_random_bytes(&random[0], 40);
2250
2251 if (hw->mac.type >= e1000_82576)
2252 shift = 0;
2253 else
2254 shift = 6;
2255 for (j = 0; j < (32 * 4); j++) {
2256 reta.bytes[j & 3] =
2257 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2258 if ((j & 3) == 3)
2259 writel(reta.dword,
2260 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2261 }
2262 if (adapter->vfs_allocated_count)
2263 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2264 else
2265 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2266
2267 /* Fill out hash function seeds */
2268 for (j = 0; j < 10; j++)
2269 array_wr32(E1000_RSSRK(0), j, random[j]);
2270
2271 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2272 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2273 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2274 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2275 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2276 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2277 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2278 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2279
2280 wr32(E1000_MRQC, mrqc);
2281 } else if (adapter->vfs_allocated_count) {
2282 /* Enable multi-queue for sr-iov */
2283 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2284 }
2285
2286 /* Enable Receive Checksum Offload for TCP and UDP */
2287 rxcsum = rd32(E1000_RXCSUM);
2288 /* Disable raw packet checksumming */
2289 rxcsum |= E1000_RXCSUM_PCSD;
2290
2291 if (adapter->hw.mac.type == e1000_82576)
2292 /* Enable Receive Checksum Offload for SCTP */
2293 rxcsum |= E1000_RXCSUM_CRCOFL;
2294
2295 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2296 wr32(E1000_RXCSUM, rxcsum);
2297
2298 /* Set the default pool for the PF's first queue */
2299 igb_configure_vt_default_pool(adapter);
2300
2301 igb_rlpml_set(adapter);
2302
2303 /* Enable Receives */
2304 wr32(E1000_RCTL, rctl);
2305} 2615}
2306 2616
2307/** 2617/**
@@ -2312,14 +2622,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2312 **/ 2622 **/
2313void igb_free_tx_resources(struct igb_ring *tx_ring) 2623void igb_free_tx_resources(struct igb_ring *tx_ring)
2314{ 2624{
2315 struct pci_dev *pdev = tx_ring->adapter->pdev;
2316
2317 igb_clean_tx_ring(tx_ring); 2625 igb_clean_tx_ring(tx_ring);
2318 2626
2319 vfree(tx_ring->buffer_info); 2627 vfree(tx_ring->buffer_info);
2320 tx_ring->buffer_info = NULL; 2628 tx_ring->buffer_info = NULL;
2321 2629
2322 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2630 /* if not set, then don't free */
2631 if (!tx_ring->desc)
2632 return;
2633
2634 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2635 tx_ring->desc, tx_ring->dma);
2323 2636
2324 tx_ring->desc = NULL; 2637 tx_ring->desc = NULL;
2325} 2638}
@@ -2338,12 +2651,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2338 igb_free_tx_resources(&adapter->tx_ring[i]); 2651 igb_free_tx_resources(&adapter->tx_ring[i]);
2339} 2652}
2340 2653
2341static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, 2654void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2342 struct igb_buffer *buffer_info) 2655 struct igb_buffer *buffer_info)
2343{ 2656{
2344 buffer_info->dma = 0; 2657 buffer_info->dma = 0;
2345 if (buffer_info->skb) { 2658 if (buffer_info->skb) {
2346 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, 2659 skb_dma_unmap(&tx_ring->pdev->dev,
2660 buffer_info->skb,
2347 DMA_TO_DEVICE); 2661 DMA_TO_DEVICE);
2348 dev_kfree_skb_any(buffer_info->skb); 2662 dev_kfree_skb_any(buffer_info->skb);
2349 buffer_info->skb = NULL; 2663 buffer_info->skb = NULL;
@@ -2358,7 +2672,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2358 **/ 2672 **/
2359static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2673static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2360{ 2674{
2361 struct igb_adapter *adapter = tx_ring->adapter;
2362 struct igb_buffer *buffer_info; 2675 struct igb_buffer *buffer_info;
2363 unsigned long size; 2676 unsigned long size;
2364 unsigned int i; 2677 unsigned int i;
@@ -2369,21 +2682,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2369 2682
2370 for (i = 0; i < tx_ring->count; i++) { 2683 for (i = 0; i < tx_ring->count; i++) {
2371 buffer_info = &tx_ring->buffer_info[i]; 2684 buffer_info = &tx_ring->buffer_info[i];
2372 igb_unmap_and_free_tx_resource(adapter, buffer_info); 2685 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2373 } 2686 }
2374 2687
2375 size = sizeof(struct igb_buffer) * tx_ring->count; 2688 size = sizeof(struct igb_buffer) * tx_ring->count;
2376 memset(tx_ring->buffer_info, 0, size); 2689 memset(tx_ring->buffer_info, 0, size);
2377 2690
2378 /* Zero out the descriptor ring */ 2691 /* Zero out the descriptor ring */
2379
2380 memset(tx_ring->desc, 0, tx_ring->size); 2692 memset(tx_ring->desc, 0, tx_ring->size);
2381 2693
2382 tx_ring->next_to_use = 0; 2694 tx_ring->next_to_use = 0;
2383 tx_ring->next_to_clean = 0; 2695 tx_ring->next_to_clean = 0;
2384
2385 writel(0, adapter->hw.hw_addr + tx_ring->head);
2386 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2387} 2696}
2388 2697
2389/** 2698/**
@@ -2406,14 +2715,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2406 **/ 2715 **/
2407void igb_free_rx_resources(struct igb_ring *rx_ring) 2716void igb_free_rx_resources(struct igb_ring *rx_ring)
2408{ 2717{
2409 struct pci_dev *pdev = rx_ring->adapter->pdev;
2410
2411 igb_clean_rx_ring(rx_ring); 2718 igb_clean_rx_ring(rx_ring);
2412 2719
2413 vfree(rx_ring->buffer_info); 2720 vfree(rx_ring->buffer_info);
2414 rx_ring->buffer_info = NULL; 2721 rx_ring->buffer_info = NULL;
2415 2722
2416 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2723 /* if not set, then don't free */
2724 if (!rx_ring->desc)
2725 return;
2726
2727 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2728 rx_ring->desc, rx_ring->dma);
2417 2729
2418 rx_ring->desc = NULL; 2730 rx_ring->desc = NULL;
2419} 2731}
@@ -2438,26 +2750,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2438 **/ 2750 **/
2439static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2751static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2440{ 2752{
2441 struct igb_adapter *adapter = rx_ring->adapter;
2442 struct igb_buffer *buffer_info; 2753 struct igb_buffer *buffer_info;
2443 struct pci_dev *pdev = adapter->pdev;
2444 unsigned long size; 2754 unsigned long size;
2445 unsigned int i; 2755 unsigned int i;
2446 2756
2447 if (!rx_ring->buffer_info) 2757 if (!rx_ring->buffer_info)
2448 return; 2758 return;
2759
2449 /* Free all the Rx ring sk_buffs */ 2760 /* Free all the Rx ring sk_buffs */
2450 for (i = 0; i < rx_ring->count; i++) { 2761 for (i = 0; i < rx_ring->count; i++) {
2451 buffer_info = &rx_ring->buffer_info[i]; 2762 buffer_info = &rx_ring->buffer_info[i];
2452 if (buffer_info->dma) { 2763 if (buffer_info->dma) {
2453 if (adapter->rx_ps_hdr_size) 2764 pci_unmap_single(rx_ring->pdev,
2454 pci_unmap_single(pdev, buffer_info->dma, 2765 buffer_info->dma,
2455 adapter->rx_ps_hdr_size, 2766 rx_ring->rx_buffer_len,
2456 PCI_DMA_FROMDEVICE); 2767 PCI_DMA_FROMDEVICE);
2457 else
2458 pci_unmap_single(pdev, buffer_info->dma,
2459 adapter->rx_buffer_len,
2460 PCI_DMA_FROMDEVICE);
2461 buffer_info->dma = 0; 2768 buffer_info->dma = 0;
2462 } 2769 }
2463 2770
@@ -2465,14 +2772,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2465 dev_kfree_skb(buffer_info->skb); 2772 dev_kfree_skb(buffer_info->skb);
2466 buffer_info->skb = NULL; 2773 buffer_info->skb = NULL;
2467 } 2774 }
2775 if (buffer_info->page_dma) {
2776 pci_unmap_page(rx_ring->pdev,
2777 buffer_info->page_dma,
2778 PAGE_SIZE / 2,
2779 PCI_DMA_FROMDEVICE);
2780 buffer_info->page_dma = 0;
2781 }
2468 if (buffer_info->page) { 2782 if (buffer_info->page) {
2469 if (buffer_info->page_dma)
2470 pci_unmap_page(pdev, buffer_info->page_dma,
2471 PAGE_SIZE / 2,
2472 PCI_DMA_FROMDEVICE);
2473 put_page(buffer_info->page); 2783 put_page(buffer_info->page);
2474 buffer_info->page = NULL; 2784 buffer_info->page = NULL;
2475 buffer_info->page_dma = 0;
2476 buffer_info->page_offset = 0; 2785 buffer_info->page_offset = 0;
2477 } 2786 }
2478 } 2787 }
@@ -2485,9 +2794,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2485 2794
2486 rx_ring->next_to_clean = 0; 2795 rx_ring->next_to_clean = 0;
2487 rx_ring->next_to_use = 0; 2796 rx_ring->next_to_use = 0;
2488
2489 writel(0, adapter->hw.hw_addr + rx_ring->head);
2490 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2491} 2797}
2492 2798
2493/** 2799/**
@@ -2521,61 +2827,90 @@ static int igb_set_mac(struct net_device *netdev, void *p)
2521 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2827 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2522 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2828 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2523 2829
2524 igb_rar_set(hw, hw->mac.addr, 0); 2830 /* set the correct pool for the new PF MAC address in entry 0 */
2525 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 2831 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2832 adapter->vfs_allocated_count);
2526 2833
2527 return 0; 2834 return 0;
2528} 2835}
2529 2836
2530/** 2837/**
2531 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2838 * igb_write_mc_addr_list - write multicast addresses to MTA
2532 * @netdev: network interface device structure 2839 * @netdev: network interface device structure
2533 * 2840 *
2534 * The set_rx_mode entry point is called whenever the unicast or multicast 2841 * Writes multicast address list to the MTA hash table.
2535 * address lists or the network interface flags are updated. This routine is 2842 * Returns: -ENOMEM on failure
2536 * responsible for configuring the hardware for proper unicast, multicast, 2843 * 0 on no addresses written
2537 * promiscuous mode, and all-multi behavior. 2844 * X on writing X addresses to MTA
2538 **/ 2845 **/
2539static void igb_set_rx_mode(struct net_device *netdev) 2846static int igb_write_mc_addr_list(struct net_device *netdev)
2540{ 2847{
2541 struct igb_adapter *adapter = netdev_priv(netdev); 2848 struct igb_adapter *adapter = netdev_priv(netdev);
2542 struct e1000_hw *hw = &adapter->hw; 2849 struct e1000_hw *hw = &adapter->hw;
2543 unsigned int rar_entries = hw->mac.rar_entry_count -
2544 (adapter->vfs_allocated_count + 1);
2545 struct dev_mc_list *mc_ptr = netdev->mc_list; 2850 struct dev_mc_list *mc_ptr = netdev->mc_list;
2546 u8 *mta_list = NULL; 2851 u8 *mta_list;
2547 u32 rctl; 2852 u32 vmolr = 0;
2548 int i; 2853 int i;
2549 2854
2550 /* Check for Promiscuous and All Multicast modes */ 2855 if (!netdev->mc_count) {
2551 rctl = rd32(E1000_RCTL); 2856 /* nothing to program, so clear mc list */
2857 igb_update_mc_addr_list(hw, NULL, 0);
2858 igb_restore_vf_multicasts(adapter);
2859 return 0;
2860 }
2552 2861
2553 if (netdev->flags & IFF_PROMISC) { 2862 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2554 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2863 if (!mta_list)
2555 rctl &= ~E1000_RCTL_VFE; 2864 return -ENOMEM;
2556 } else {
2557 if (netdev->flags & IFF_ALLMULTI)
2558 rctl |= E1000_RCTL_MPE;
2559 else
2560 rctl &= ~E1000_RCTL_MPE;
2561 2865
2562 if (netdev->uc.count > rar_entries) 2866 /* set vmolr receive overflow multicast bit */
2563 rctl |= E1000_RCTL_UPE; 2867 vmolr |= E1000_VMOLR_ROMPE;
2564 else 2868
2565 rctl &= ~E1000_RCTL_UPE; 2869 /* The shared function expects a packed array of only addresses. */
2566 rctl |= E1000_RCTL_VFE; 2870 mc_ptr = netdev->mc_list;
2871
2872 for (i = 0; i < netdev->mc_count; i++) {
2873 if (!mc_ptr)
2874 break;
2875 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2876 mc_ptr = mc_ptr->next;
2567 } 2877 }
2568 wr32(E1000_RCTL, rctl); 2878 igb_update_mc_addr_list(hw, mta_list, i);
2879 kfree(mta_list);
2880
2881 return netdev->mc_count;
2882}
2883
2884/**
2885 * igb_write_uc_addr_list - write unicast addresses to RAR table
2886 * @netdev: network interface device structure
2887 *
2888 * Writes unicast address list to the RAR table.
2889 * Returns: -ENOMEM on failure/insufficient address space
2890 * 0 on no addresses written
2891 * X on writing X addresses to the RAR table
2892 **/
2893static int igb_write_uc_addr_list(struct net_device *netdev)
2894{
2895 struct igb_adapter *adapter = netdev_priv(netdev);
2896 struct e1000_hw *hw = &adapter->hw;
2897 unsigned int vfn = adapter->vfs_allocated_count;
2898 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2899 int count = 0;
2900
2901 /* return ENOMEM indicating insufficient memory for addresses */
2902 if (netdev->uc.count > rar_entries)
2903 return -ENOMEM;
2569 2904
2570 if (netdev->uc.count && rar_entries) { 2905 if (netdev->uc.count && rar_entries) {
2571 struct netdev_hw_addr *ha; 2906 struct netdev_hw_addr *ha;
2572 list_for_each_entry(ha, &netdev->uc.list, list) { 2907 list_for_each_entry(ha, &netdev->uc.list, list) {
2573 if (!rar_entries) 2908 if (!rar_entries)
2574 break; 2909 break;
2575 igb_rar_set(hw, ha->addr, rar_entries); 2910 igb_rar_set_qsel(adapter, ha->addr,
2576 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 2911 rar_entries--,
2577 rar_entries); 2912 vfn);
2578 rar_entries--; 2913 count++;
2579 } 2914 }
2580 } 2915 }
2581 /* write the addresses in reverse order to avoid write combining */ 2916 /* write the addresses in reverse order to avoid write combining */
@@ -2585,29 +2920,79 @@ static void igb_set_rx_mode(struct net_device *netdev)
2585 } 2920 }
2586 wrfl(); 2921 wrfl();
2587 2922
2588 if (!netdev->mc_count) { 2923 return count;
2589 /* nothing to program, so clear mc list */ 2924}
2590 igb_update_mc_addr_list(hw, NULL, 0); 2925
2591 igb_restore_vf_multicasts(adapter); 2926/**
2592 return; 2927 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2928 * @netdev: network interface device structure
2929 *
2930 * The set_rx_mode entry point is called whenever the unicast or multicast
2931 * address lists or the network interface flags are updated. This routine is
2932 * responsible for configuring the hardware for proper unicast, multicast,
2933 * promiscuous mode, and all-multi behavior.
2934 **/
2935static void igb_set_rx_mode(struct net_device *netdev)
2936{
2937 struct igb_adapter *adapter = netdev_priv(netdev);
2938 struct e1000_hw *hw = &adapter->hw;
2939 unsigned int vfn = adapter->vfs_allocated_count;
2940 u32 rctl, vmolr = 0;
2941 int count;
2942
2943 /* Check for Promiscuous and All Multicast modes */
2944 rctl = rd32(E1000_RCTL);
2945
2946 /* clear the effected bits */
2947 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2948
2949 if (netdev->flags & IFF_PROMISC) {
2950 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2951 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2952 } else {
2953 if (netdev->flags & IFF_ALLMULTI) {
2954 rctl |= E1000_RCTL_MPE;
2955 vmolr |= E1000_VMOLR_MPME;
2956 } else {
2957 /*
2958 * Write addresses to the MTA, if the attempt fails
2959 * then we should just turn on promiscous mode so
2960 * that we can at least receive multicast traffic
2961 */
2962 count = igb_write_mc_addr_list(netdev);
2963 if (count < 0) {
2964 rctl |= E1000_RCTL_MPE;
2965 vmolr |= E1000_VMOLR_MPME;
2966 } else if (count) {
2967 vmolr |= E1000_VMOLR_ROMPE;
2968 }
2969 }
2970 /*
2971 * Write addresses to available RAR registers, if there is not
2972 * sufficient space to store all the addresses then enable
2973 * unicast promiscous mode
2974 */
2975 count = igb_write_uc_addr_list(netdev);
2976 if (count < 0) {
2977 rctl |= E1000_RCTL_UPE;
2978 vmolr |= E1000_VMOLR_ROPE;
2979 }
2980 rctl |= E1000_RCTL_VFE;
2593 } 2981 }
2982 wr32(E1000_RCTL, rctl);
2594 2983
2595 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); 2984 /*
2596 if (!mta_list) { 2985 * In order to support SR-IOV and eventually VMDq it is necessary to set
2597 dev_err(&adapter->pdev->dev, 2986 * the VMOLR to enable the appropriate modes. Without this workaround
2598 "failed to allocate multicast filter list\n"); 2987 * we will have issues with VLAN tag stripping not being done for frames
2988 * that are only arriving because we are the default pool
2989 */
2990 if (hw->mac.type < e1000_82576)
2599 return; 2991 return;
2600 }
2601 2992
2602 /* The shared function expects a packed array of only addresses. */ 2993 vmolr |= rd32(E1000_VMOLR(vfn)) &
2603 for (i = 0; i < netdev->mc_count; i++) { 2994 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2604 if (!mc_ptr) 2995 wr32(E1000_VMOLR(vfn), vmolr);
2605 break;
2606 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2607 mc_ptr = mc_ptr->next;
2608 }
2609 igb_update_mc_addr_list(hw, mta_list, i);
2610 kfree(mta_list);
2611 igb_restore_vf_multicasts(adapter); 2996 igb_restore_vf_multicasts(adapter);
2612} 2997}
2613 2998
@@ -2669,37 +3054,33 @@ static void igb_watchdog(unsigned long data)
2669static void igb_watchdog_task(struct work_struct *work) 3054static void igb_watchdog_task(struct work_struct *work)
2670{ 3055{
2671 struct igb_adapter *adapter = container_of(work, 3056 struct igb_adapter *adapter = container_of(work,
2672 struct igb_adapter, watchdog_task); 3057 struct igb_adapter,
3058 watchdog_task);
2673 struct e1000_hw *hw = &adapter->hw; 3059 struct e1000_hw *hw = &adapter->hw;
2674 struct net_device *netdev = adapter->netdev; 3060 struct net_device *netdev = adapter->netdev;
2675 struct igb_ring *tx_ring = adapter->tx_ring;
2676 u32 link; 3061 u32 link;
2677 u32 eics = 0;
2678 int i; 3062 int i;
2679 3063
2680 link = igb_has_link(adapter); 3064 link = igb_has_link(adapter);
2681 if ((netif_carrier_ok(netdev)) && link)
2682 goto link_up;
2683
2684 if (link) { 3065 if (link) {
2685 if (!netif_carrier_ok(netdev)) { 3066 if (!netif_carrier_ok(netdev)) {
2686 u32 ctrl; 3067 u32 ctrl;
2687 hw->mac.ops.get_speed_and_duplex(&adapter->hw, 3068 hw->mac.ops.get_speed_and_duplex(hw,
2688 &adapter->link_speed, 3069 &adapter->link_speed,
2689 &adapter->link_duplex); 3070 &adapter->link_duplex);
2690 3071
2691 ctrl = rd32(E1000_CTRL); 3072 ctrl = rd32(E1000_CTRL);
2692 /* Links status message must follow this format */ 3073 /* Links status message must follow this format */
2693 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " 3074 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2694 "Flow Control: %s\n", 3075 "Flow Control: %s\n",
2695 netdev->name, 3076 netdev->name,
2696 adapter->link_speed, 3077 adapter->link_speed,
2697 adapter->link_duplex == FULL_DUPLEX ? 3078 adapter->link_duplex == FULL_DUPLEX ?
2698 "Full Duplex" : "Half Duplex", 3079 "Full Duplex" : "Half Duplex",
2699 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 3080 ((ctrl & E1000_CTRL_TFCE) &&
2700 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 3081 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
2701 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 3082 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2702 E1000_CTRL_TFCE) ? "TX" : "None"))); 3083 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
2703 3084
2704 /* tweak tx_queue_len according to speed/duplex and 3085 /* tweak tx_queue_len according to speed/duplex and
2705 * adjust the timeout factor */ 3086 * adjust the timeout factor */
@@ -2743,46 +3124,40 @@ static void igb_watchdog_task(struct work_struct *work)
2743 } 3124 }
2744 } 3125 }
2745 3126
2746link_up:
2747 igb_update_stats(adapter); 3127 igb_update_stats(adapter);
3128 igb_update_adaptive(hw);
2748 3129
2749 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 3130 for (i = 0; i < adapter->num_tx_queues; i++) {
2750 adapter->tpt_old = adapter->stats.tpt; 3131 struct igb_ring *tx_ring = &adapter->tx_ring[i];
2751 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old; 3132 if (!netif_carrier_ok(netdev)) {
2752 adapter->colc_old = adapter->stats.colc;
2753
2754 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2755 adapter->gorc_old = adapter->stats.gorc;
2756 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2757 adapter->gotc_old = adapter->stats.gotc;
2758
2759 igb_update_adaptive(&adapter->hw);
2760
2761 if (!netif_carrier_ok(netdev)) {
2762 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2763 /* We've lost link, so the controller stops DMA, 3133 /* We've lost link, so the controller stops DMA,
2764 * but we've got queued Tx work that's never going 3134 * but we've got queued Tx work that's never going
2765 * to get done, so reset controller to flush Tx. 3135 * to get done, so reset controller to flush Tx.
2766 * (Do the reset outside of interrupt context). */ 3136 * (Do the reset outside of interrupt context). */
2767 adapter->tx_timeout_count++; 3137 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2768 schedule_work(&adapter->reset_task); 3138 adapter->tx_timeout_count++;
2769 /* return immediately since reset is imminent */ 3139 schedule_work(&adapter->reset_task);
2770 return; 3140 /* return immediately since reset is imminent */
3141 return;
3142 }
2771 } 3143 }
3144
3145 /* Force detection of hung controller every watchdog period */
3146 tx_ring->detect_tx_hung = true;
2772 } 3147 }
2773 3148
2774 /* Cause software interrupt to ensure rx ring is cleaned */ 3149 /* Cause software interrupt to ensure rx ring is cleaned */
2775 if (adapter->msix_entries) { 3150 if (adapter->msix_entries) {
2776 for (i = 0; i < adapter->num_rx_queues; i++) 3151 u32 eics = 0;
2777 eics |= adapter->rx_ring[i].eims_value; 3152 for (i = 0; i < adapter->num_q_vectors; i++) {
3153 struct igb_q_vector *q_vector = adapter->q_vector[i];
3154 eics |= q_vector->eims_value;
3155 }
2778 wr32(E1000_EICS, eics); 3156 wr32(E1000_EICS, eics);
2779 } else { 3157 } else {
2780 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3158 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2781 } 3159 }
2782 3160
2783 /* Force detection of hung controller every watchdog period */
2784 tx_ring->detect_tx_hung = true;
2785
2786 /* Reset the timer */ 3161 /* Reset the timer */
2787 if (!test_bit(__IGB_DOWN, &adapter->state)) 3162 if (!test_bit(__IGB_DOWN, &adapter->state))
2788 mod_timer(&adapter->watchdog_timer, 3163 mod_timer(&adapter->watchdog_timer,
@@ -2796,7 +3171,6 @@ enum latency_range {
2796 latency_invalid = 255 3171 latency_invalid = 255
2797}; 3172};
2798 3173
2799
2800/** 3174/**
2801 * igb_update_ring_itr - update the dynamic ITR value based on packet size 3175 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2802 * 3176 *
@@ -2811,25 +3185,37 @@ enum latency_range {
2811 * parameter (see igb_param.c) 3185 * parameter (see igb_param.c)
2812 * NOTE: This function is called only when operating in a multiqueue 3186 * NOTE: This function is called only when operating in a multiqueue
2813 * receive environment. 3187 * receive environment.
2814 * @rx_ring: pointer to ring 3188 * @q_vector: pointer to q_vector
2815 **/ 3189 **/
2816static void igb_update_ring_itr(struct igb_ring *rx_ring) 3190static void igb_update_ring_itr(struct igb_q_vector *q_vector)
2817{ 3191{
2818 int new_val = rx_ring->itr_val; 3192 int new_val = q_vector->itr_val;
2819 int avg_wire_size = 0; 3193 int avg_wire_size = 0;
2820 struct igb_adapter *adapter = rx_ring->adapter; 3194 struct igb_adapter *adapter = q_vector->adapter;
2821
2822 if (!rx_ring->total_packets)
2823 goto clear_counts; /* no packets, so don't do anything */
2824 3195
2825 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3196 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2826 * ints/sec - ITR timer value of 120 ticks. 3197 * ints/sec - ITR timer value of 120 ticks.
2827 */ 3198 */
2828 if (adapter->link_speed != SPEED_1000) { 3199 if (adapter->link_speed != SPEED_1000) {
2829 new_val = 120; 3200 new_val = 976;
2830 goto set_itr_val; 3201 goto set_itr_val;
2831 } 3202 }
2832 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; 3203
3204 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3205 struct igb_ring *ring = q_vector->rx_ring;
3206 avg_wire_size = ring->total_bytes / ring->total_packets;
3207 }
3208
3209 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3210 struct igb_ring *ring = q_vector->tx_ring;
3211 avg_wire_size = max_t(u32, avg_wire_size,
3212 (ring->total_bytes /
3213 ring->total_packets));
3214 }
3215
3216 /* if avg_wire_size isn't set no work was done */
3217 if (!avg_wire_size)
3218 goto clear_counts;
2833 3219
2834 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3220 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2835 avg_wire_size += 24; 3221 avg_wire_size += 24;
@@ -2844,13 +3230,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
2844 new_val = avg_wire_size / 2; 3230 new_val = avg_wire_size / 2;
2845 3231
2846set_itr_val: 3232set_itr_val:
2847 if (new_val != rx_ring->itr_val) { 3233 if (new_val != q_vector->itr_val) {
2848 rx_ring->itr_val = new_val; 3234 q_vector->itr_val = new_val;
2849 rx_ring->set_itr = 1; 3235 q_vector->set_itr = 1;
2850 } 3236 }
2851clear_counts: 3237clear_counts:
2852 rx_ring->total_bytes = 0; 3238 if (q_vector->rx_ring) {
2853 rx_ring->total_packets = 0; 3239 q_vector->rx_ring->total_bytes = 0;
3240 q_vector->rx_ring->total_packets = 0;
3241 }
3242 if (q_vector->tx_ring) {
3243 q_vector->tx_ring->total_bytes = 0;
3244 q_vector->tx_ring->total_packets = 0;
3245 }
2854} 3246}
2855 3247
2856/** 3248/**
@@ -2867,7 +3259,7 @@ clear_counts:
2867 * NOTE: These calculations are only valid when operating in a single- 3259 * NOTE: These calculations are only valid when operating in a single-
2868 * queue environment. 3260 * queue environment.
2869 * @adapter: pointer to adapter 3261 * @adapter: pointer to adapter
2870 * @itr_setting: current adapter->itr 3262 * @itr_setting: current q_vector->itr_val
2871 * @packets: the number of packets during this measurement interval 3263 * @packets: the number of packets during this measurement interval
2872 * @bytes: the number of bytes during this measurement interval 3264 * @bytes: the number of bytes during this measurement interval
2873 **/ 3265 **/
@@ -2919,8 +3311,9 @@ update_itr_done:
2919 3311
2920static void igb_set_itr(struct igb_adapter *adapter) 3312static void igb_set_itr(struct igb_adapter *adapter)
2921{ 3313{
3314 struct igb_q_vector *q_vector = adapter->q_vector[0];
2922 u16 current_itr; 3315 u16 current_itr;
2923 u32 new_itr = adapter->itr; 3316 u32 new_itr = q_vector->itr_val;
2924 3317
2925 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3318 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2926 if (adapter->link_speed != SPEED_1000) { 3319 if (adapter->link_speed != SPEED_1000) {
@@ -2934,18 +3327,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
2934 adapter->rx_ring->total_packets, 3327 adapter->rx_ring->total_packets,
2935 adapter->rx_ring->total_bytes); 3328 adapter->rx_ring->total_bytes);
2936 3329
2937 if (adapter->rx_ring->buddy) { 3330 adapter->tx_itr = igb_update_itr(adapter,
2938 adapter->tx_itr = igb_update_itr(adapter, 3331 adapter->tx_itr,
2939 adapter->tx_itr, 3332 adapter->tx_ring->total_packets,
2940 adapter->tx_ring->total_packets, 3333 adapter->tx_ring->total_bytes);
2941 adapter->tx_ring->total_bytes); 3334 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2942 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2943 } else {
2944 current_itr = adapter->rx_itr;
2945 }
2946 3335
2947 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3336 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2948 if (adapter->itr_setting == 3 && current_itr == lowest_latency) 3337 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
2949 current_itr = low_latency; 3338 current_itr = low_latency;
2950 3339
2951 switch (current_itr) { 3340 switch (current_itr) {
@@ -2966,18 +3355,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
2966set_itr_now: 3355set_itr_now:
2967 adapter->rx_ring->total_bytes = 0; 3356 adapter->rx_ring->total_bytes = 0;
2968 adapter->rx_ring->total_packets = 0; 3357 adapter->rx_ring->total_packets = 0;
2969 if (adapter->rx_ring->buddy) { 3358 adapter->tx_ring->total_bytes = 0;
2970 adapter->rx_ring->buddy->total_bytes = 0; 3359 adapter->tx_ring->total_packets = 0;
2971 adapter->rx_ring->buddy->total_packets = 0;
2972 }
2973 3360
2974 if (new_itr != adapter->itr) { 3361 if (new_itr != q_vector->itr_val) {
2975 /* this attempts to bias the interrupt rate towards Bulk 3362 /* this attempts to bias the interrupt rate towards Bulk
2976 * by adding intermediate steps when interrupt rate is 3363 * by adding intermediate steps when interrupt rate is
2977 * increasing */ 3364 * increasing */
2978 new_itr = new_itr > adapter->itr ? 3365 new_itr = new_itr > q_vector->itr_val ?
2979 max((new_itr * adapter->itr) / 3366 max((new_itr * q_vector->itr_val) /
2980 (new_itr + (adapter->itr >> 2)), new_itr) : 3367 (new_itr + (q_vector->itr_val >> 2)),
3368 new_itr) :
2981 new_itr; 3369 new_itr;
2982 /* Don't write the value here; it resets the adapter's 3370 /* Don't write the value here; it resets the adapter's
2983 * internal timer, and causes us to delay far longer than 3371 * internal timer, and causes us to delay far longer than
@@ -2985,25 +3373,22 @@ set_itr_now:
2985 * value at the beginning of the next interrupt so the timing 3373 * value at the beginning of the next interrupt so the timing
2986 * ends up being correct. 3374 * ends up being correct.
2987 */ 3375 */
2988 adapter->itr = new_itr; 3376 q_vector->itr_val = new_itr;
2989 adapter->rx_ring->itr_val = new_itr; 3377 q_vector->set_itr = 1;
2990 adapter->rx_ring->set_itr = 1;
2991 } 3378 }
2992 3379
2993 return; 3380 return;
2994} 3381}
2995 3382
2996
2997#define IGB_TX_FLAGS_CSUM 0x00000001 3383#define IGB_TX_FLAGS_CSUM 0x00000001
2998#define IGB_TX_FLAGS_VLAN 0x00000002 3384#define IGB_TX_FLAGS_VLAN 0x00000002
2999#define IGB_TX_FLAGS_TSO 0x00000004 3385#define IGB_TX_FLAGS_TSO 0x00000004
3000#define IGB_TX_FLAGS_IPV4 0x00000008 3386#define IGB_TX_FLAGS_IPV4 0x00000008
3001#define IGB_TX_FLAGS_TSTAMP 0x00000010 3387#define IGB_TX_FLAGS_TSTAMP 0x00000010
3002#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3388#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3003#define IGB_TX_FLAGS_VLAN_SHIFT 16 3389#define IGB_TX_FLAGS_VLAN_SHIFT 16
3004 3390
3005static inline int igb_tso_adv(struct igb_adapter *adapter, 3391static inline int igb_tso_adv(struct igb_ring *tx_ring,
3006 struct igb_ring *tx_ring,
3007 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3392 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3008{ 3393{
3009 struct e1000_adv_tx_context_desc *context_desc; 3394 struct e1000_adv_tx_context_desc *context_desc;
@@ -3065,8 +3450,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3065 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 3450 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3066 3451
3067 /* For 82575, context index must be unique per ring. */ 3452 /* For 82575, context index must be unique per ring. */
3068 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3453 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3069 mss_l4len_idx |= tx_ring->queue_index << 4; 3454 mss_l4len_idx |= tx_ring->reg_idx << 4;
3070 3455
3071 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3456 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3072 context_desc->seqnum_seed = 0; 3457 context_desc->seqnum_seed = 0;
@@ -3083,14 +3468,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3083 return true; 3468 return true;
3084} 3469}
3085 3470
3086static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, 3471static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3087 struct igb_ring *tx_ring, 3472 struct sk_buff *skb, u32 tx_flags)
3088 struct sk_buff *skb, u32 tx_flags)
3089{ 3473{
3090 struct e1000_adv_tx_context_desc *context_desc; 3474 struct e1000_adv_tx_context_desc *context_desc;
3091 unsigned int i; 3475 struct pci_dev *pdev = tx_ring->pdev;
3092 struct igb_buffer *buffer_info; 3476 struct igb_buffer *buffer_info;
3093 u32 info = 0, tu_cmd = 0; 3477 u32 info = 0, tu_cmd = 0;
3478 unsigned int i;
3094 3479
3095 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 3480 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3096 (tx_flags & IGB_TX_FLAGS_VLAN)) { 3481 (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3100,6 +3485,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3100 3485
3101 if (tx_flags & IGB_TX_FLAGS_VLAN) 3486 if (tx_flags & IGB_TX_FLAGS_VLAN)
3102 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3487 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3488
3103 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 3489 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3104 if (skb->ip_summed == CHECKSUM_PARTIAL) 3490 if (skb->ip_summed == CHECKSUM_PARTIAL)
3105 info |= skb_network_header_len(skb); 3491 info |= skb_network_header_len(skb);
@@ -3137,7 +3523,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3137 break; 3523 break;
3138 default: 3524 default:
3139 if (unlikely(net_ratelimit())) 3525 if (unlikely(net_ratelimit()))
3140 dev_warn(&adapter->pdev->dev, 3526 dev_warn(&pdev->dev,
3141 "partial checksum but proto=%x!\n", 3527 "partial checksum but proto=%x!\n",
3142 skb->protocol); 3528 skb->protocol);
3143 break; 3529 break;
@@ -3146,11 +3532,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3146 3532
3147 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 3533 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3148 context_desc->seqnum_seed = 0; 3534 context_desc->seqnum_seed = 0;
3149 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3535 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3150 context_desc->mss_l4len_idx = 3536 context_desc->mss_l4len_idx =
3151 cpu_to_le32(tx_ring->queue_index << 4); 3537 cpu_to_le32(tx_ring->reg_idx << 4);
3152 else
3153 context_desc->mss_l4len_idx = 0;
3154 3538
3155 buffer_info->time_stamp = jiffies; 3539 buffer_info->time_stamp = jiffies;
3156 buffer_info->next_to_watch = i; 3540 buffer_info->next_to_watch = i;
@@ -3169,11 +3553,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3169#define IGB_MAX_TXD_PWR 16 3553#define IGB_MAX_TXD_PWR 16
3170#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 3554#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3171 3555
3172static inline int igb_tx_map_adv(struct igb_adapter *adapter, 3556static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3173 struct igb_ring *tx_ring, struct sk_buff *skb,
3174 unsigned int first) 3557 unsigned int first)
3175{ 3558{
3176 struct igb_buffer *buffer_info; 3559 struct igb_buffer *buffer_info;
3560 struct pci_dev *pdev = tx_ring->pdev;
3177 unsigned int len = skb_headlen(skb); 3561 unsigned int len = skb_headlen(skb);
3178 unsigned int count = 0, i; 3562 unsigned int count = 0, i;
3179 unsigned int f; 3563 unsigned int f;
@@ -3181,8 +3565,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3181 3565
3182 i = tx_ring->next_to_use; 3566 i = tx_ring->next_to_use;
3183 3567
3184 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 3568 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3185 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3569 dev_err(&pdev->dev, "TX DMA map failed\n");
3186 return 0; 3570 return 0;
3187 } 3571 }
3188 3572
@@ -3218,18 +3602,17 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3218 tx_ring->buffer_info[i].skb = skb; 3602 tx_ring->buffer_info[i].skb = skb;
3219 tx_ring->buffer_info[first].next_to_watch = i; 3603 tx_ring->buffer_info[first].next_to_watch = i;
3220 3604
3221 return count + 1; 3605 return ++count;
3222} 3606}
3223 3607
3224static inline void igb_tx_queue_adv(struct igb_adapter *adapter, 3608static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3225 struct igb_ring *tx_ring,
3226 int tx_flags, int count, u32 paylen, 3609 int tx_flags, int count, u32 paylen,
3227 u8 hdr_len) 3610 u8 hdr_len)
3228{ 3611{
3229 union e1000_adv_tx_desc *tx_desc = NULL; 3612 union e1000_adv_tx_desc *tx_desc;
3230 struct igb_buffer *buffer_info; 3613 struct igb_buffer *buffer_info;
3231 u32 olinfo_status = 0, cmd_type_len; 3614 u32 olinfo_status = 0, cmd_type_len;
3232 unsigned int i; 3615 unsigned int i = tx_ring->next_to_use;
3233 3616
3234 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 3617 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3235 E1000_ADVTXD_DCMD_DEXT); 3618 E1000_ADVTXD_DCMD_DEXT);
@@ -3254,27 +3637,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3254 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3637 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3255 } 3638 }
3256 3639
3257 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && 3640 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3258 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 3641 (tx_flags & (IGB_TX_FLAGS_CSUM |
3642 IGB_TX_FLAGS_TSO |
3259 IGB_TX_FLAGS_VLAN))) 3643 IGB_TX_FLAGS_VLAN)))
3260 olinfo_status |= tx_ring->queue_index << 4; 3644 olinfo_status |= tx_ring->reg_idx << 4;
3261 3645
3262 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 3646 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3263 3647
3264 i = tx_ring->next_to_use; 3648 do {
3265 while (count--) {
3266 buffer_info = &tx_ring->buffer_info[i]; 3649 buffer_info = &tx_ring->buffer_info[i];
3267 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 3650 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3268 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 3651 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3269 tx_desc->read.cmd_type_len = 3652 tx_desc->read.cmd_type_len =
3270 cpu_to_le32(cmd_type_len | buffer_info->length); 3653 cpu_to_le32(cmd_type_len | buffer_info->length);
3271 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3654 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3655 count--;
3272 i++; 3656 i++;
3273 if (i == tx_ring->count) 3657 if (i == tx_ring->count)
3274 i = 0; 3658 i = 0;
3275 } 3659 } while (count > 0);
3276 3660
3277 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 3661 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3278 /* Force memory writes to complete before letting h/w 3662 /* Force memory writes to complete before letting h/w
3279 * know there are new descriptors to fetch. (Only 3663 * know there are new descriptors to fetch. (Only
3280 * applicable for weak-ordered memory model archs, 3664 * applicable for weak-ordered memory model archs,
@@ -3282,16 +3666,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3282 wmb(); 3666 wmb();
3283 3667
3284 tx_ring->next_to_use = i; 3668 tx_ring->next_to_use = i;
3285 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3669 writel(i, tx_ring->tail);
3286 /* we need this if more than one processor can write to our tail 3670 /* we need this if more than one processor can write to our tail
3287 * at a time, it syncronizes IO on IA64/Altix systems */ 3671 * at a time, it syncronizes IO on IA64/Altix systems */
3288 mmiowb(); 3672 mmiowb();
3289} 3673}
3290 3674
3291static int __igb_maybe_stop_tx(struct net_device *netdev, 3675static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3292 struct igb_ring *tx_ring, int size)
3293{ 3676{
3294 struct igb_adapter *adapter = netdev_priv(netdev); 3677 struct net_device *netdev = tx_ring->netdev;
3295 3678
3296 netif_stop_subqueue(netdev, tx_ring->queue_index); 3679 netif_stop_subqueue(netdev, tx_ring->queue_index);
3297 3680
@@ -3307,66 +3690,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
3307 3690
3308 /* A reprieve! */ 3691 /* A reprieve! */
3309 netif_wake_subqueue(netdev, tx_ring->queue_index); 3692 netif_wake_subqueue(netdev, tx_ring->queue_index);
3310 ++adapter->restart_queue; 3693 tx_ring->tx_stats.restart_queue++;
3311 return 0; 3694 return 0;
3312} 3695}
3313 3696
3314static int igb_maybe_stop_tx(struct net_device *netdev, 3697static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3315 struct igb_ring *tx_ring, int size)
3316{ 3698{
3317 if (igb_desc_unused(tx_ring) >= size) 3699 if (igb_desc_unused(tx_ring) >= size)
3318 return 0; 3700 return 0;
3319 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3701 return __igb_maybe_stop_tx(tx_ring, size);
3320} 3702}
3321 3703
3322static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 3704netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3323 struct net_device *netdev, 3705 struct igb_ring *tx_ring)
3324 struct igb_ring *tx_ring)
3325{ 3706{
3326 struct igb_adapter *adapter = netdev_priv(netdev); 3707 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3327 unsigned int first; 3708 unsigned int first;
3328 unsigned int tx_flags = 0; 3709 unsigned int tx_flags = 0;
3329 u8 hdr_len = 0; 3710 u8 hdr_len = 0;
3330 int count = 0; 3711 int tso = 0, count;
3331 int tso = 0; 3712 union skb_shared_tx *shtx = skb_tx(skb);
3332 union skb_shared_tx *shtx;
3333
3334 if (test_bit(__IGB_DOWN, &adapter->state)) {
3335 dev_kfree_skb_any(skb);
3336 return NETDEV_TX_OK;
3337 }
3338
3339 if (skb->len <= 0) {
3340 dev_kfree_skb_any(skb);
3341 return NETDEV_TX_OK;
3342 }
3343 3713
3344 /* need: 1 descriptor per page, 3714 /* need: 1 descriptor per page,
3345 * + 2 desc gap to keep tail from touching head, 3715 * + 2 desc gap to keep tail from touching head,
3346 * + 1 desc for skb->data, 3716 * + 1 desc for skb->data,
3347 * + 1 desc for context descriptor, 3717 * + 1 desc for context descriptor,
3348 * otherwise try next time */ 3718 * otherwise try next time */
3349 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3719 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3350 /* this is a hard error */ 3720 /* this is a hard error */
3351 return NETDEV_TX_BUSY; 3721 return NETDEV_TX_BUSY;
3352 } 3722 }
3353 3723
3354 /*
3355 * TODO: check that there currently is no other packet with
3356 * time stamping in the queue
3357 *
3358 * When doing time stamping, keep the connection to the socket
3359 * a while longer: it is still needed by skb_hwtstamp_tx(),
3360 * called either in igb_tx_hwtstamp() or by our caller when
3361 * doing software time stamping.
3362 */
3363 shtx = skb_tx(skb);
3364 if (unlikely(shtx->hardware)) { 3724 if (unlikely(shtx->hardware)) {
3365 shtx->in_progress = 1; 3725 shtx->in_progress = 1;
3366 tx_flags |= IGB_TX_FLAGS_TSTAMP; 3726 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3367 } 3727 }
3368 3728
3369 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3729 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3370 tx_flags |= IGB_TX_FLAGS_VLAN; 3730 tx_flags |= IGB_TX_FLAGS_VLAN;
3371 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3731 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3372 } 3732 }
@@ -3375,37 +3735,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3375 tx_flags |= IGB_TX_FLAGS_IPV4; 3735 tx_flags |= IGB_TX_FLAGS_IPV4;
3376 3736
3377 first = tx_ring->next_to_use; 3737 first = tx_ring->next_to_use;
3378 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3738 if (skb_is_gso(skb)) {
3379 &hdr_len) : 0; 3739 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3380 3740
3381 if (tso < 0) { 3741 if (tso < 0) {
3382 dev_kfree_skb_any(skb); 3742 dev_kfree_skb_any(skb);
3383 return NETDEV_TX_OK; 3743 return NETDEV_TX_OK;
3744 }
3384 } 3745 }
3385 3746
3386 if (tso) 3747 if (tso)
3387 tx_flags |= IGB_TX_FLAGS_TSO; 3748 tx_flags |= IGB_TX_FLAGS_TSO;
3388 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && 3749 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3389 (skb->ip_summed == CHECKSUM_PARTIAL)) 3750 (skb->ip_summed == CHECKSUM_PARTIAL))
3390 tx_flags |= IGB_TX_FLAGS_CSUM; 3751 tx_flags |= IGB_TX_FLAGS_CSUM;
3391 3752
3392 /* 3753 /*
3393 * count reflects descriptors mapped, if 0 then mapping error 3754 * count reflects descriptors mapped, if 0 or less then mapping error
3394 * has occured and we need to rewind the descriptor queue 3755 * has occured and we need to rewind the descriptor queue
3395 */ 3756 */
3396 count = igb_tx_map_adv(adapter, tx_ring, skb, first); 3757 count = igb_tx_map_adv(tx_ring, skb, first);
3397 3758 if (count <= 0) {
3398 if (count) {
3399 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3400 skb->len, hdr_len);
3401 /* Make sure there is space in the ring for the next send. */
3402 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3403 } else {
3404 dev_kfree_skb_any(skb); 3759 dev_kfree_skb_any(skb);
3405 tx_ring->buffer_info[first].time_stamp = 0; 3760 tx_ring->buffer_info[first].time_stamp = 0;
3406 tx_ring->next_to_use = first; 3761 tx_ring->next_to_use = first;
3762 return NETDEV_TX_OK;
3407 } 3763 }
3408 3764
3765 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3766
3767 /* Make sure there is space in the ring for the next send. */
3768 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3769
3409 return NETDEV_TX_OK; 3770 return NETDEV_TX_OK;
3410} 3771}
3411 3772
@@ -3414,8 +3775,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3414{ 3775{
3415 struct igb_adapter *adapter = netdev_priv(netdev); 3776 struct igb_adapter *adapter = netdev_priv(netdev);
3416 struct igb_ring *tx_ring; 3777 struct igb_ring *tx_ring;
3417
3418 int r_idx = 0; 3778 int r_idx = 0;
3779
3780 if (test_bit(__IGB_DOWN, &adapter->state)) {
3781 dev_kfree_skb_any(skb);
3782 return NETDEV_TX_OK;
3783 }
3784
3785 if (skb->len <= 0) {
3786 dev_kfree_skb_any(skb);
3787 return NETDEV_TX_OK;
3788 }
3789
3419 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); 3790 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3420 tx_ring = adapter->multi_tx_table[r_idx]; 3791 tx_ring = adapter->multi_tx_table[r_idx];
3421 3792
@@ -3423,7 +3794,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3423 * to a flow. Right now, performance is impacted slightly negatively 3794 * to a flow. Right now, performance is impacted slightly negatively
3424 * if using multiple tx queues. If the stack breaks away from a 3795 * if using multiple tx queues. If the stack breaks away from a
3425 * single qdisc implementation, we can look at this again. */ 3796 * single qdisc implementation, we can look at this again. */
3426 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); 3797 return igb_xmit_frame_ring_adv(skb, tx_ring);
3427} 3798}
3428 3799
3429/** 3800/**
@@ -3437,6 +3808,10 @@ static void igb_tx_timeout(struct net_device *netdev)
3437 3808
3438 /* Do the reset outside of interrupt context */ 3809 /* Do the reset outside of interrupt context */
3439 adapter->tx_timeout_count++; 3810 adapter->tx_timeout_count++;
3811
3812 if (hw->mac.type == e1000_82580)
3813 hw->dev_spec._82575.global_device_reset = true;
3814
3440 schedule_work(&adapter->reset_task); 3815 schedule_work(&adapter->reset_task);
3441 wr32(E1000_EICS, 3816 wr32(E1000_EICS,
3442 (adapter->eims_enable_mask & ~adapter->eims_other)); 3817 (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3459,10 +3834,8 @@ static void igb_reset_task(struct work_struct *work)
3459 **/ 3834 **/
3460static struct net_device_stats *igb_get_stats(struct net_device *netdev) 3835static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3461{ 3836{
3462 struct igb_adapter *adapter = netdev_priv(netdev);
3463
3464 /* only return the current stats */ 3837 /* only return the current stats */
3465 return &adapter->net_stats; 3838 return &netdev->stats;
3466} 3839}
3467 3840
3468/** 3841/**
@@ -3475,16 +3848,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3475static int igb_change_mtu(struct net_device *netdev, int new_mtu) 3848static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3476{ 3849{
3477 struct igb_adapter *adapter = netdev_priv(netdev); 3850 struct igb_adapter *adapter = netdev_priv(netdev);
3851 struct pci_dev *pdev = adapter->pdev;
3478 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3852 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3853 u32 rx_buffer_len, i;
3479 3854
3480 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3855 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3481 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3856 dev_err(&pdev->dev, "Invalid MTU setting\n");
3482 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3483 return -EINVAL; 3857 return -EINVAL;
3484 } 3858 }
3485 3859
3486 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3860 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3487 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3861 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3488 return -EINVAL; 3862 return -EINVAL;
3489 } 3863 }
3490 3864
@@ -3493,8 +3867,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3493 3867
3494 /* igb_down has a dependency on max_frame_size */ 3868 /* igb_down has a dependency on max_frame_size */
3495 adapter->max_frame_size = max_frame; 3869 adapter->max_frame_size = max_frame;
3496 if (netif_running(netdev))
3497 igb_down(adapter);
3498 3870
3499 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3871 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3500 * means we reserve 2 more, this pushes us to allocate from the next 3872 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3502,35 +3874,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3502 * i.e. RXBUFFER_2048 --> size-4096 slab 3874 * i.e. RXBUFFER_2048 --> size-4096 slab
3503 */ 3875 */
3504 3876
3505 if (max_frame <= IGB_RXBUFFER_256) 3877 if (max_frame <= IGB_RXBUFFER_1024)
3506 adapter->rx_buffer_len = IGB_RXBUFFER_256; 3878 rx_buffer_len = IGB_RXBUFFER_1024;
3507 else if (max_frame <= IGB_RXBUFFER_512) 3879 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3508 adapter->rx_buffer_len = IGB_RXBUFFER_512; 3880 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3509 else if (max_frame <= IGB_RXBUFFER_1024)
3510 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3511 else if (max_frame <= IGB_RXBUFFER_2048)
3512 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3513 else 3881 else
3514#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3882 rx_buffer_len = IGB_RXBUFFER_128;
3515 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3516#else
3517 adapter->rx_buffer_len = PAGE_SIZE / 2;
3518#endif
3519
3520 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3521 if (adapter->vfs_allocated_count &&
3522 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3523 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3524 3883
3525 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3884 if (netif_running(netdev))
3526 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3885 igb_down(adapter);
3527 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3528 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3529 3886
3530 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 3887 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3531 netdev->mtu, new_mtu); 3888 netdev->mtu, new_mtu);
3532 netdev->mtu = new_mtu; 3889 netdev->mtu = new_mtu;
3533 3890
3891 for (i = 0; i < adapter->num_rx_queues; i++)
3892 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3893
3534 if (netif_running(netdev)) 3894 if (netif_running(netdev))
3535 igb_up(adapter); 3895 igb_up(adapter);
3536 else 3896 else
@@ -3548,9 +3908,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3548 3908
3549void igb_update_stats(struct igb_adapter *adapter) 3909void igb_update_stats(struct igb_adapter *adapter)
3550{ 3910{
3911 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3551 struct e1000_hw *hw = &adapter->hw; 3912 struct e1000_hw *hw = &adapter->hw;
3552 struct pci_dev *pdev = adapter->pdev; 3913 struct pci_dev *pdev = adapter->pdev;
3914 u32 rnbc;
3553 u16 phy_tmp; 3915 u16 phy_tmp;
3916 int i;
3917 u64 bytes, packets;
3554 3918
3555#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3919#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3556 3920
@@ -3563,6 +3927,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3563 if (pci_channel_offline(pdev)) 3927 if (pci_channel_offline(pdev))
3564 return; 3928 return;
3565 3929
3930 bytes = 0;
3931 packets = 0;
3932 for (i = 0; i < adapter->num_rx_queues; i++) {
3933 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3934 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3935 net_stats->rx_fifo_errors += rqdpc_tmp;
3936 bytes += adapter->rx_ring[i].rx_stats.bytes;
3937 packets += adapter->rx_ring[i].rx_stats.packets;
3938 }
3939
3940 net_stats->rx_bytes = bytes;
3941 net_stats->rx_packets = packets;
3942
3943 bytes = 0;
3944 packets = 0;
3945 for (i = 0; i < adapter->num_tx_queues; i++) {
3946 bytes += adapter->tx_ring[i].tx_stats.bytes;
3947 packets += adapter->tx_ring[i].tx_stats.packets;
3948 }
3949 net_stats->tx_bytes = bytes;
3950 net_stats->tx_packets = packets;
3951
3952 /* read stats registers */
3566 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3953 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3567 adapter->stats.gprc += rd32(E1000_GPRC); 3954 adapter->stats.gprc += rd32(E1000_GPRC);
3568 adapter->stats.gorc += rd32(E1000_GORCL); 3955 adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3595,7 +3982,9 @@ void igb_update_stats(struct igb_adapter *adapter)
3595 adapter->stats.gptc += rd32(E1000_GPTC); 3982 adapter->stats.gptc += rd32(E1000_GPTC);
3596 adapter->stats.gotc += rd32(E1000_GOTCL); 3983 adapter->stats.gotc += rd32(E1000_GOTCL);
3597 rd32(E1000_GOTCH); /* clear GOTCL */ 3984 rd32(E1000_GOTCH); /* clear GOTCL */
3598 adapter->stats.rnbc += rd32(E1000_RNBC); 3985 rnbc = rd32(E1000_RNBC);
3986 adapter->stats.rnbc += rnbc;
3987 net_stats->rx_fifo_errors += rnbc;
3599 adapter->stats.ruc += rd32(E1000_RUC); 3988 adapter->stats.ruc += rd32(E1000_RUC);
3600 adapter->stats.rfc += rd32(E1000_RFC); 3989 adapter->stats.rfc += rd32(E1000_RFC);
3601 adapter->stats.rjc += rd32(E1000_RJC); 3990 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3614,7 +4003,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3614 adapter->stats.bptc += rd32(E1000_BPTC); 4003 adapter->stats.bptc += rd32(E1000_BPTC);
3615 4004
3616 /* used for adaptive IFS */ 4005 /* used for adaptive IFS */
3617
3618 hw->mac.tx_packet_delta = rd32(E1000_TPT); 4006 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3619 adapter->stats.tpt += hw->mac.tx_packet_delta; 4007 adapter->stats.tpt += hw->mac.tx_packet_delta;
3620 hw->mac.collision_delta = rd32(E1000_COLC); 4008 hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3637,56 +4025,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3637 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 4025 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3638 4026
3639 /* Fill out the OS statistics structure */ 4027 /* Fill out the OS statistics structure */
3640 adapter->net_stats.multicast = adapter->stats.mprc; 4028 net_stats->multicast = adapter->stats.mprc;
3641 adapter->net_stats.collisions = adapter->stats.colc; 4029 net_stats->collisions = adapter->stats.colc;
3642 4030
3643 /* Rx Errors */ 4031 /* Rx Errors */
3644 4032
3645 if (hw->mac.type != e1000_82575) {
3646 u32 rqdpc_tmp;
3647 u64 rqdpc_total = 0;
3648 int i;
3649 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3650 * Queue Drop Packet Count) stats only gets incremented, if
3651 * the DROP_EN but it set (in the SRRCTL register for that
3652 * queue). If DROP_EN bit is NOT set, then the some what
3653 * equivalent count is stored in RNBC (not per queue basis).
3654 * Also note the drop count is due to lack of available
3655 * descriptors.
3656 */
3657 for (i = 0; i < adapter->num_rx_queues; i++) {
3658 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3659 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3660 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3661 }
3662 adapter->net_stats.rx_fifo_errors = rqdpc_total;
3663 }
3664
3665 /* Note RNBC (Receive No Buffers Count) is an not an exact
3666 * drop count as the hardware FIFO might save the day. Thats
3667 * one of the reason for saving it in rx_fifo_errors, as its
3668 * potentially not a true drop.
3669 */
3670 adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc;
3671
3672 /* RLEC on some newer hardware can be incorrect so build 4033 /* RLEC on some newer hardware can be incorrect so build
3673 * our own version based on RUC and ROC */ 4034 * our own version based on RUC and ROC */
3674 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 4035 net_stats->rx_errors = adapter->stats.rxerrc +
3675 adapter->stats.crcerrs + adapter->stats.algnerrc + 4036 adapter->stats.crcerrs + adapter->stats.algnerrc +
3676 adapter->stats.ruc + adapter->stats.roc + 4037 adapter->stats.ruc + adapter->stats.roc +
3677 adapter->stats.cexterr; 4038 adapter->stats.cexterr;
3678 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 4039 net_stats->rx_length_errors = adapter->stats.ruc +
3679 adapter->stats.roc; 4040 adapter->stats.roc;
3680 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 4041 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3681 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 4042 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3682 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 4043 net_stats->rx_missed_errors = adapter->stats.mpc;
3683 4044
3684 /* Tx Errors */ 4045 /* Tx Errors */
3685 adapter->net_stats.tx_errors = adapter->stats.ecol + 4046 net_stats->tx_errors = adapter->stats.ecol +
3686 adapter->stats.latecol; 4047 adapter->stats.latecol;
3687 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 4048 net_stats->tx_aborted_errors = adapter->stats.ecol;
3688 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 4049 net_stats->tx_window_errors = adapter->stats.latecol;
3689 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 4050 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3690 4051
3691 /* Tx Dropped needs to be maintained elsewhere */ 4052 /* Tx Dropped needs to be maintained elsewhere */
3692 4053
@@ -3707,14 +4068,12 @@ void igb_update_stats(struct igb_adapter *adapter)
3707 4068
3708static irqreturn_t igb_msix_other(int irq, void *data) 4069static irqreturn_t igb_msix_other(int irq, void *data)
3709{ 4070{
3710 struct net_device *netdev = data; 4071 struct igb_adapter *adapter = data;
3711 struct igb_adapter *adapter = netdev_priv(netdev);
3712 struct e1000_hw *hw = &adapter->hw; 4072 struct e1000_hw *hw = &adapter->hw;
3713 u32 icr = rd32(E1000_ICR); 4073 u32 icr = rd32(E1000_ICR);
3714
3715 /* reading ICR causes bit 31 of EICR to be cleared */ 4074 /* reading ICR causes bit 31 of EICR to be cleared */
3716 4075
3717 if(icr & E1000_ICR_DOUTSYNC) { 4076 if (icr & E1000_ICR_DOUTSYNC) {
3718 /* HW is reporting DMA is out of sync */ 4077 /* HW is reporting DMA is out of sync */
3719 adapter->stats.doosync++; 4078 adapter->stats.doosync++;
3720 } 4079 }
@@ -3730,125 +4089,90 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3730 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4089 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3731 } 4090 }
3732 4091
3733 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); 4092 if (adapter->vfs_allocated_count)
4093 wr32(E1000_IMS, E1000_IMS_LSC |
4094 E1000_IMS_VMMB |
4095 E1000_IMS_DOUTSYNC);
4096 else
4097 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3734 wr32(E1000_EIMS, adapter->eims_other); 4098 wr32(E1000_EIMS, adapter->eims_other);
3735 4099
3736 return IRQ_HANDLED; 4100 return IRQ_HANDLED;
3737} 4101}
3738 4102
3739static irqreturn_t igb_msix_tx(int irq, void *data) 4103static void igb_write_itr(struct igb_q_vector *q_vector)
3740{ 4104{
3741 struct igb_ring *tx_ring = data; 4105 u32 itr_val = q_vector->itr_val & 0x7FFC;
3742 struct igb_adapter *adapter = tx_ring->adapter;
3743 struct e1000_hw *hw = &adapter->hw;
3744 4106
3745#ifdef CONFIG_IGB_DCA 4107 if (!q_vector->set_itr)
3746 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4108 return;
3747 igb_update_tx_dca(tx_ring);
3748#endif
3749 4109
3750 tx_ring->total_bytes = 0; 4110 if (!itr_val)
3751 tx_ring->total_packets = 0; 4111 itr_val = 0x4;
3752 4112
3753 /* auto mask will automatically reenable the interrupt when we write 4113 if (q_vector->itr_shift)
3754 * EICS */ 4114 itr_val |= itr_val << q_vector->itr_shift;
3755 if (!igb_clean_tx_irq(tx_ring))
3756 /* Ring was not completely cleaned, so fire another interrupt */
3757 wr32(E1000_EICS, tx_ring->eims_value);
3758 else 4115 else
3759 wr32(E1000_EIMS, tx_ring->eims_value); 4116 itr_val |= 0x8000000;
3760 4117
3761 return IRQ_HANDLED; 4118 writel(itr_val, q_vector->itr_register);
4119 q_vector->set_itr = 0;
3762} 4120}
3763 4121
3764static void igb_write_itr(struct igb_ring *ring) 4122static irqreturn_t igb_msix_ring(int irq, void *data)
3765{ 4123{
3766 struct e1000_hw *hw = &ring->adapter->hw; 4124 struct igb_q_vector *q_vector = data;
3767 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3768 switch (hw->mac.type) {
3769 case e1000_82576:
3770 wr32(ring->itr_register, ring->itr_val |
3771 0x80000000);
3772 break;
3773 default:
3774 wr32(ring->itr_register, ring->itr_val |
3775 (ring->itr_val << 16));
3776 break;
3777 }
3778 ring->set_itr = 0;
3779 }
3780}
3781
3782static irqreturn_t igb_msix_rx(int irq, void *data)
3783{
3784 struct igb_ring *rx_ring = data;
3785
3786 /* Write the ITR value calculated at the end of the
3787 * previous interrupt.
3788 */
3789 4125
3790 igb_write_itr(rx_ring); 4126 /* Write the ITR value calculated from the previous interrupt. */
4127 igb_write_itr(q_vector);
3791 4128
3792 if (napi_schedule_prep(&rx_ring->napi)) 4129 napi_schedule(&q_vector->napi);
3793 __napi_schedule(&rx_ring->napi);
3794 4130
3795#ifdef CONFIG_IGB_DCA 4131 return IRQ_HANDLED;
3796 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3797 igb_update_rx_dca(rx_ring);
3798#endif
3799 return IRQ_HANDLED;
3800} 4132}
3801 4133
3802#ifdef CONFIG_IGB_DCA 4134#ifdef CONFIG_IGB_DCA
3803static void igb_update_rx_dca(struct igb_ring *rx_ring) 4135static void igb_update_dca(struct igb_q_vector *q_vector)
3804{ 4136{
3805 u32 dca_rxctrl; 4137 struct igb_adapter *adapter = q_vector->adapter;
3806 struct igb_adapter *adapter = rx_ring->adapter;
3807 struct e1000_hw *hw = &adapter->hw; 4138 struct e1000_hw *hw = &adapter->hw;
3808 int cpu = get_cpu(); 4139 int cpu = get_cpu();
3809 int q = rx_ring->reg_idx;
3810 4140
3811 if (rx_ring->cpu != cpu) { 4141 if (q_vector->cpu == cpu)
3812 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 4142 goto out_no_update;
3813 if (hw->mac.type == e1000_82576) { 4143
3814 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; 4144 if (q_vector->tx_ring) {
3815 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << 4145 int q = q_vector->tx_ring->reg_idx;
3816 E1000_DCA_RXCTRL_CPUID_SHIFT; 4146 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4147 if (hw->mac.type == e1000_82575) {
4148 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4149 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3817 } else { 4150 } else {
4151 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4152 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4153 E1000_DCA_TXCTRL_CPUID_SHIFT;
4154 }
4155 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4156 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4157 }
4158 if (q_vector->rx_ring) {
4159 int q = q_vector->rx_ring->reg_idx;
4160 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4161 if (hw->mac.type == e1000_82575) {
3818 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 4162 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3819 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4163 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4164 } else {
4165 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4166 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4167 E1000_DCA_RXCTRL_CPUID_SHIFT;
3820 } 4168 }
3821 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; 4169 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3822 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; 4170 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3823 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; 4171 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3824 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); 4172 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3825 rx_ring->cpu = cpu;
3826 }
3827 put_cpu();
3828}
3829
3830static void igb_update_tx_dca(struct igb_ring *tx_ring)
3831{
3832 u32 dca_txctrl;
3833 struct igb_adapter *adapter = tx_ring->adapter;
3834 struct e1000_hw *hw = &adapter->hw;
3835 int cpu = get_cpu();
3836 int q = tx_ring->reg_idx;
3837
3838 if (tx_ring->cpu != cpu) {
3839 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3840 if (hw->mac.type == e1000_82576) {
3841 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3842 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3843 E1000_DCA_TXCTRL_CPUID_SHIFT;
3844 } else {
3845 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3846 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3847 }
3848 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3849 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3850 tx_ring->cpu = cpu;
3851 } 4173 }
4174 q_vector->cpu = cpu;
4175out_no_update:
3852 put_cpu(); 4176 put_cpu();
3853} 4177}
3854 4178
@@ -3863,13 +4187,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
3863 /* Always use CB2 mode, difference is masked in the CB driver. */ 4187 /* Always use CB2 mode, difference is masked in the CB driver. */
3864 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 4188 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3865 4189
3866 for (i = 0; i < adapter->num_tx_queues; i++) { 4190 for (i = 0; i < adapter->num_q_vectors; i++) {
3867 adapter->tx_ring[i].cpu = -1; 4191 struct igb_q_vector *q_vector = adapter->q_vector[i];
3868 igb_update_tx_dca(&adapter->tx_ring[i]); 4192 q_vector->cpu = -1;
3869 } 4193 igb_update_dca(q_vector);
3870 for (i = 0; i < adapter->num_rx_queues; i++) {
3871 adapter->rx_ring[i].cpu = -1;
3872 igb_update_rx_dca(&adapter->rx_ring[i]);
3873 } 4194 }
3874} 4195}
3875 4196
@@ -3877,6 +4198,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3877{ 4198{
3878 struct net_device *netdev = dev_get_drvdata(dev); 4199 struct net_device *netdev = dev_get_drvdata(dev);
3879 struct igb_adapter *adapter = netdev_priv(netdev); 4200 struct igb_adapter *adapter = netdev_priv(netdev);
4201 struct pci_dev *pdev = adapter->pdev;
3880 struct e1000_hw *hw = &adapter->hw; 4202 struct e1000_hw *hw = &adapter->hw;
3881 unsigned long event = *(unsigned long *)data; 4203 unsigned long event = *(unsigned long *)data;
3882 4204
@@ -3885,12 +4207,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3885 /* if already enabled, don't do it again */ 4207 /* if already enabled, don't do it again */
3886 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4208 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3887 break; 4209 break;
3888 /* Always use CB2 mode, difference is masked
3889 * in the CB driver. */
3890 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3891 if (dca_add_requester(dev) == 0) { 4210 if (dca_add_requester(dev) == 0) {
3892 adapter->flags |= IGB_FLAG_DCA_ENABLED; 4211 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3893 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 4212 dev_info(&pdev->dev, "DCA enabled\n");
3894 igb_setup_dca(adapter); 4213 igb_setup_dca(adapter);
3895 break; 4214 break;
3896 } 4215 }
@@ -3898,9 +4217,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3898 case DCA_PROVIDER_REMOVE: 4217 case DCA_PROVIDER_REMOVE:
3899 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 4218 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3900 /* without this a class_device is left 4219 /* without this a class_device is left
3901 * hanging around in the sysfs model */ 4220 * hanging around in the sysfs model */
3902 dca_remove_requester(dev); 4221 dca_remove_requester(dev);
3903 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 4222 dev_info(&pdev->dev, "DCA disabled\n");
3904 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 4223 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3905 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 4224 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3906 } 4225 }
@@ -3930,12 +4249,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
3930 4249
3931 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 4250 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3932 ping = E1000_PF_CONTROL_MSG; 4251 ping = E1000_PF_CONTROL_MSG;
3933 if (adapter->vf_data[i].clear_to_send) 4252 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
3934 ping |= E1000_VT_MSGTYPE_CTS; 4253 ping |= E1000_VT_MSGTYPE_CTS;
3935 igb_write_mbx(hw, &ping, 1, i); 4254 igb_write_mbx(hw, &ping, 1, i);
3936 } 4255 }
3937} 4256}
3938 4257
4258static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4259{
4260 struct e1000_hw *hw = &adapter->hw;
4261 u32 vmolr = rd32(E1000_VMOLR(vf));
4262 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4263
4264 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4265 IGB_VF_FLAG_MULTI_PROMISC);
4266 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4267
4268 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4269 vmolr |= E1000_VMOLR_MPME;
4270 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4271 } else {
4272 /*
4273 * if we have hashes and we are clearing a multicast promisc
4274 * flag we need to write the hashes to the MTA as this step
4275 * was previously skipped
4276 */
4277 if (vf_data->num_vf_mc_hashes > 30) {
4278 vmolr |= E1000_VMOLR_MPME;
4279 } else if (vf_data->num_vf_mc_hashes) {
4280 int j;
4281 vmolr |= E1000_VMOLR_ROMPE;
4282 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4283 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4284 }
4285 }
4286
4287 wr32(E1000_VMOLR(vf), vmolr);
4288
4289 /* there are flags left unprocessed, likely not supported */
4290 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4291 return -EINVAL;
4292
4293 return 0;
4294
4295}
4296
3939static int igb_set_vf_multicasts(struct igb_adapter *adapter, 4297static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3940 u32 *msgbuf, u32 vf) 4298 u32 *msgbuf, u32 vf)
3941{ 4299{
@@ -3944,18 +4302,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3944 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4302 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3945 int i; 4303 int i;
3946 4304
3947 /* only up to 30 hash values supported */ 4305 /* salt away the number of multicast addresses assigned
3948 if (n > 30)
3949 n = 30;
3950
3951 /* salt away the number of multi cast addresses assigned
3952 * to this VF for later use to restore when the PF multi cast 4306 * to this VF for later use to restore when the PF multi cast
3953 * list changes 4307 * list changes
3954 */ 4308 */
3955 vf_data->num_vf_mc_hashes = n; 4309 vf_data->num_vf_mc_hashes = n;
3956 4310
3957 /* VFs are limited to using the MTA hash table for their multicast 4311 /* only up to 30 hash values supported */
3958 * addresses */ 4312 if (n > 30)
4313 n = 30;
4314
4315 /* store the hashes for later use */
3959 for (i = 0; i < n; i++) 4316 for (i = 0; i < n; i++)
3960 vf_data->vf_mc_hashes[i] = hash_list[i]; 4317 vf_data->vf_mc_hashes[i] = hash_list[i];
3961 4318
@@ -3972,9 +4329,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3972 int i, j; 4329 int i, j;
3973 4330
3974 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4331 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4332 u32 vmolr = rd32(E1000_VMOLR(i));
4333 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4334
3975 vf_data = &adapter->vf_data[i]; 4335 vf_data = &adapter->vf_data[i];
3976 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 4336
3977 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 4337 if ((vf_data->num_vf_mc_hashes > 30) ||
4338 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4339 vmolr |= E1000_VMOLR_MPME;
4340 } else if (vf_data->num_vf_mc_hashes) {
4341 vmolr |= E1000_VMOLR_ROMPE;
4342 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4343 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4344 }
4345 wr32(E1000_VMOLR(i), vmolr);
3978 } 4346 }
3979} 4347}
3980 4348
@@ -4012,7 +4380,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4012 struct e1000_hw *hw = &adapter->hw; 4380 struct e1000_hw *hw = &adapter->hw;
4013 u32 reg, i; 4381 u32 reg, i;
4014 4382
4015 /* It is an error to call this function when VFs are not enabled */ 4383 /* The vlvf table only exists on 82576 hardware and newer */
4384 if (hw->mac.type < e1000_82576)
4385 return -1;
4386
4387 /* we only need to do this if VMDq is enabled */
4016 if (!adapter->vfs_allocated_count) 4388 if (!adapter->vfs_allocated_count)
4017 return -1; 4389 return -1;
4018 4390
@@ -4042,16 +4414,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4042 4414
4043 /* if !enabled we need to set this up in vfta */ 4415 /* if !enabled we need to set this up in vfta */
4044 if (!(reg & E1000_VLVF_VLANID_ENABLE)) { 4416 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4045 /* add VID to filter table, if bit already set 4417 /* add VID to filter table */
4046 * PF must have added it outside of table */ 4418 igb_vfta_set(hw, vid, true);
4047 if (igb_vfta_set(hw, vid, true))
4048 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4049 adapter->vfs_allocated_count);
4050 reg |= E1000_VLVF_VLANID_ENABLE; 4419 reg |= E1000_VLVF_VLANID_ENABLE;
4051 } 4420 }
4052 reg &= ~E1000_VLVF_VLANID_MASK; 4421 reg &= ~E1000_VLVF_VLANID_MASK;
4053 reg |= vid; 4422 reg |= vid;
4054
4055 wr32(E1000_VLVF(i), reg); 4423 wr32(E1000_VLVF(i), reg);
4056 4424
4057 /* do not modify RLPML for PF devices */ 4425 /* do not modify RLPML for PF devices */
@@ -4067,8 +4435,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4067 reg |= size; 4435 reg |= size;
4068 wr32(E1000_VMOLR(vf), reg); 4436 wr32(E1000_VMOLR(vf), reg);
4069 } 4437 }
4070 adapter->vf_data[vf].vlans_enabled++;
4071 4438
4439 adapter->vf_data[vf].vlans_enabled++;
4072 return 0; 4440 return 0;
4073 } 4441 }
4074 } else { 4442 } else {
@@ -4110,15 +4478,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4110 return igb_vlvf_set(adapter, vid, add, vf); 4478 return igb_vlvf_set(adapter, vid, add, vf);
4111} 4479}
4112 4480
4113static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 4481static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4114{ 4482{
4115 struct e1000_hw *hw = &adapter->hw; 4483 /* clear all flags */
4116 4484 adapter->vf_data[vf].flags = 0;
4117 /* disable mailbox functionality for vf */ 4485 adapter->vf_data[vf].last_nack = jiffies;
4118 adapter->vf_data[vf].clear_to_send = false;
4119 4486
4120 /* reset offloads to defaults */ 4487 /* reset offloads to defaults */
4121 igb_set_vmolr(hw, vf); 4488 igb_set_vmolr(adapter, vf);
4122 4489
4123 /* reset vlans for device */ 4490 /* reset vlans for device */
4124 igb_clear_vf_vfta(adapter, vf); 4491 igb_clear_vf_vfta(adapter, vf);
@@ -4130,7 +4497,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4130 igb_set_rx_mode(adapter->netdev); 4497 igb_set_rx_mode(adapter->netdev);
4131} 4498}
4132 4499
4133static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 4500static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4501{
4502 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4503
4504 /* generate a new mac address as we were hotplug removed/added */
4505 random_ether_addr(vf_mac);
4506
4507 /* process remaining reset events */
4508 igb_vf_reset(adapter, vf);
4509}
4510
4511static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4134{ 4512{
4135 struct e1000_hw *hw = &adapter->hw; 4513 struct e1000_hw *hw = &adapter->hw;
4136 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 4514 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4139,11 +4517,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4139 u8 *addr = (u8 *)(&msgbuf[1]); 4517 u8 *addr = (u8 *)(&msgbuf[1]);
4140 4518
4141 /* process all the same items cleared in a function level reset */ 4519 /* process all the same items cleared in a function level reset */
4142 igb_vf_reset_event(adapter, vf); 4520 igb_vf_reset(adapter, vf);
4143 4521
4144 /* set vf mac address */ 4522 /* set vf mac address */
4145 igb_rar_set(hw, vf_mac, rar_entry); 4523 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4146 igb_set_rah_pool(hw, vf, rar_entry);
4147 4524
4148 /* enable transmit and receive for vf */ 4525 /* enable transmit and receive for vf */
4149 reg = rd32(E1000_VFTE); 4526 reg = rd32(E1000_VFTE);
@@ -4151,8 +4528,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4151 reg = rd32(E1000_VFRE); 4528 reg = rd32(E1000_VFRE);
4152 wr32(E1000_VFRE, reg | (1 << vf)); 4529 wr32(E1000_VFRE, reg | (1 << vf));
4153 4530
4154 /* enable mailbox functionality for vf */ 4531 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4155 adapter->vf_data[vf].clear_to_send = true;
4156 4532
4157 /* reply to reset with ack and vf mac address */ 4533 /* reply to reset with ack and vf mac address */
4158 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 4534 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4162,66 +4538,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4162 4538
4163static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 4539static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4164{ 4540{
4165 unsigned char *addr = (char *)&msg[1]; 4541 unsigned char *addr = (char *)&msg[1];
4166 int err = -1; 4542 int err = -1;
4167 4543
4168 if (is_valid_ether_addr(addr)) 4544 if (is_valid_ether_addr(addr))
4169 err = igb_set_vf_mac(adapter, vf, addr); 4545 err = igb_set_vf_mac(adapter, vf, addr);
4170
4171 return err;
4172 4546
4547 return err;
4173} 4548}
4174 4549
4175static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 4550static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4176{ 4551{
4177 struct e1000_hw *hw = &adapter->hw; 4552 struct e1000_hw *hw = &adapter->hw;
4553 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4178 u32 msg = E1000_VT_MSGTYPE_NACK; 4554 u32 msg = E1000_VT_MSGTYPE_NACK;
4179 4555
4180 /* if device isn't clear to send it shouldn't be reading either */ 4556 /* if device isn't clear to send it shouldn't be reading either */
4181 if (!adapter->vf_data[vf].clear_to_send) 4557 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4558 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4182 igb_write_mbx(hw, &msg, 1, vf); 4559 igb_write_mbx(hw, &msg, 1, vf);
4183} 4560 vf_data->last_nack = jiffies;
4184
4185
4186static void igb_msg_task(struct igb_adapter *adapter)
4187{
4188 struct e1000_hw *hw = &adapter->hw;
4189 u32 vf;
4190
4191 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4192 /* process any reset requests */
4193 if (!igb_check_for_rst(hw, vf)) {
4194 adapter->vf_data[vf].clear_to_send = false;
4195 igb_vf_reset_event(adapter, vf);
4196 }
4197
4198 /* process any messages pending */
4199 if (!igb_check_for_msg(hw, vf))
4200 igb_rcv_msg_from_vf(adapter, vf);
4201
4202 /* process any acks */
4203 if (!igb_check_for_ack(hw, vf))
4204 igb_rcv_ack_from_vf(adapter, vf);
4205
4206 } 4561 }
4207} 4562}
4208 4563
4209static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 4564static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4210{ 4565{
4211 u32 mbx_size = E1000_VFMAILBOX_SIZE; 4566 struct pci_dev *pdev = adapter->pdev;
4212 u32 msgbuf[mbx_size]; 4567 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4213 struct e1000_hw *hw = &adapter->hw; 4568 struct e1000_hw *hw = &adapter->hw;
4569 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4214 s32 retval; 4570 s32 retval;
4215 4571
4216 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); 4572 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4217 4573
4218 if (retval) 4574 if (retval)
4219 dev_err(&adapter->pdev->dev, 4575 dev_err(&pdev->dev, "Error receiving message from VF\n");
4220 "Error receiving message from VF\n");
4221 4576
4222 /* this is a message we already processed, do nothing */ 4577 /* this is a message we already processed, do nothing */
4223 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 4578 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4224 return retval; 4579 return;
4225 4580
4226 /* 4581 /*
4227 * until the vf completes a reset it should not be 4582 * until the vf completes a reset it should not be
@@ -4230,20 +4585,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4230 4585
4231 if (msgbuf[0] == E1000_VF_RESET) { 4586 if (msgbuf[0] == E1000_VF_RESET) {
4232 igb_vf_reset_msg(adapter, vf); 4587 igb_vf_reset_msg(adapter, vf);
4233 4588 return;
4234 return retval;
4235 } 4589 }
4236 4590
4237 if (!adapter->vf_data[vf].clear_to_send) { 4591 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4238 msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 4592 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4239 igb_write_mbx(hw, msgbuf, 1, vf); 4593 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4240 return retval; 4594 igb_write_mbx(hw, msgbuf, 1, vf);
4595 vf_data->last_nack = jiffies;
4596 }
4597 return;
4241 } 4598 }
4242 4599
4243 switch ((msgbuf[0] & 0xFFFF)) { 4600 switch ((msgbuf[0] & 0xFFFF)) {
4244 case E1000_VF_SET_MAC_ADDR: 4601 case E1000_VF_SET_MAC_ADDR:
4245 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 4602 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4246 break; 4603 break;
4604 case E1000_VF_SET_PROMISC:
4605 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4606 break;
4247 case E1000_VF_SET_MULTICAST: 4607 case E1000_VF_SET_MULTICAST:
4248 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 4608 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4249 break; 4609 break;
@@ -4254,7 +4614,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4254 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4614 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4255 break; 4615 break;
4256 default: 4616 default:
4257 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4617 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4258 retval = -1; 4618 retval = -1;
4259 break; 4619 break;
4260 } 4620 }
@@ -4268,8 +4628,53 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4268 msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 4628 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4269 4629
4270 igb_write_mbx(hw, msgbuf, 1, vf); 4630 igb_write_mbx(hw, msgbuf, 1, vf);
4631}
4271 4632
4272 return retval; 4633static void igb_msg_task(struct igb_adapter *adapter)
4634{
4635 struct e1000_hw *hw = &adapter->hw;
4636 u32 vf;
4637
4638 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4639 /* process any reset requests */
4640 if (!igb_check_for_rst(hw, vf))
4641 igb_vf_reset_event(adapter, vf);
4642
4643 /* process any messages pending */
4644 if (!igb_check_for_msg(hw, vf))
4645 igb_rcv_msg_from_vf(adapter, vf);
4646
4647 /* process any acks */
4648 if (!igb_check_for_ack(hw, vf))
4649 igb_rcv_ack_from_vf(adapter, vf);
4650 }
4651}
4652
4653/**
4654 * igb_set_uta - Set unicast filter table address
4655 * @adapter: board private structure
4656 *
4657 * The unicast table address is a register array of 32-bit registers.
4658 * The table is meant to be used in a way similar to how the MTA is used
4659 * however due to certain limitations in the hardware it is necessary to
4660 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4661 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4662 **/
4663static void igb_set_uta(struct igb_adapter *adapter)
4664{
4665 struct e1000_hw *hw = &adapter->hw;
4666 int i;
4667
4668 /* The UTA table only exists on 82576 hardware and newer */
4669 if (hw->mac.type < e1000_82576)
4670 return;
4671
4672 /* we only need to do this if VMDq is enabled */
4673 if (!adapter->vfs_allocated_count)
4674 return;
4675
4676 for (i = 0; i < hw->mac.uta_reg_count; i++)
4677 array_wr32(E1000_UTA, i, ~0);
4273} 4678}
4274 4679
4275/** 4680/**
@@ -4279,15 +4684,15 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4279 **/ 4684 **/
4280static irqreturn_t igb_intr_msi(int irq, void *data) 4685static irqreturn_t igb_intr_msi(int irq, void *data)
4281{ 4686{
4282 struct net_device *netdev = data; 4687 struct igb_adapter *adapter = data;
4283 struct igb_adapter *adapter = netdev_priv(netdev); 4688 struct igb_q_vector *q_vector = adapter->q_vector[0];
4284 struct e1000_hw *hw = &adapter->hw; 4689 struct e1000_hw *hw = &adapter->hw;
4285 /* read ICR disables interrupts using IAM */ 4690 /* read ICR disables interrupts using IAM */
4286 u32 icr = rd32(E1000_ICR); 4691 u32 icr = rd32(E1000_ICR);
4287 4692
4288 igb_write_itr(adapter->rx_ring); 4693 igb_write_itr(q_vector);
4289 4694
4290 if(icr & E1000_ICR_DOUTSYNC) { 4695 if (icr & E1000_ICR_DOUTSYNC) {
4291 /* HW is reporting DMA is out of sync */ 4696 /* HW is reporting DMA is out of sync */
4292 adapter->stats.doosync++; 4697 adapter->stats.doosync++;
4293 } 4698 }
@@ -4298,7 +4703,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4298 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4703 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4299 } 4704 }
4300 4705
4301 napi_schedule(&adapter->rx_ring[0].napi); 4706 napi_schedule(&q_vector->napi);
4302 4707
4303 return IRQ_HANDLED; 4708 return IRQ_HANDLED;
4304} 4709}
@@ -4310,8 +4715,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4310 **/ 4715 **/
4311static irqreturn_t igb_intr(int irq, void *data) 4716static irqreturn_t igb_intr(int irq, void *data)
4312{ 4717{
4313 struct net_device *netdev = data; 4718 struct igb_adapter *adapter = data;
4314 struct igb_adapter *adapter = netdev_priv(netdev); 4719 struct igb_q_vector *q_vector = adapter->q_vector[0];
4315 struct e1000_hw *hw = &adapter->hw; 4720 struct e1000_hw *hw = &adapter->hw;
4316 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4721 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4317 * need for the IMC write */ 4722 * need for the IMC write */
@@ -4319,14 +4724,14 @@ static irqreturn_t igb_intr(int irq, void *data)
4319 if (!icr) 4724 if (!icr)
4320 return IRQ_NONE; /* Not our interrupt */ 4725 return IRQ_NONE; /* Not our interrupt */
4321 4726
4322 igb_write_itr(adapter->rx_ring); 4727 igb_write_itr(q_vector);
4323 4728
4324 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 4729 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4325 * not set, then the adapter didn't send an interrupt */ 4730 * not set, then the adapter didn't send an interrupt */
4326 if (!(icr & E1000_ICR_INT_ASSERTED)) 4731 if (!(icr & E1000_ICR_INT_ASSERTED))
4327 return IRQ_NONE; 4732 return IRQ_NONE;
4328 4733
4329 if(icr & E1000_ICR_DOUTSYNC) { 4734 if (icr & E1000_ICR_DOUTSYNC) {
4330 /* HW is reporting DMA is out of sync */ 4735 /* HW is reporting DMA is out of sync */
4331 adapter->stats.doosync++; 4736 adapter->stats.doosync++;
4332 } 4737 }
@@ -4338,26 +4743,27 @@ static irqreturn_t igb_intr(int irq, void *data)
4338 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4743 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4339 } 4744 }
4340 4745
4341 napi_schedule(&adapter->rx_ring[0].napi); 4746 napi_schedule(&q_vector->napi);
4342 4747
4343 return IRQ_HANDLED; 4748 return IRQ_HANDLED;
4344} 4749}
4345 4750
4346static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) 4751static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4347{ 4752{
4348 struct igb_adapter *adapter = rx_ring->adapter; 4753 struct igb_adapter *adapter = q_vector->adapter;
4349 struct e1000_hw *hw = &adapter->hw; 4754 struct e1000_hw *hw = &adapter->hw;
4350 4755
4351 if (adapter->itr_setting & 3) { 4756 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4352 if (adapter->num_rx_queues == 1) 4757 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4758 if (!adapter->msix_entries)
4353 igb_set_itr(adapter); 4759 igb_set_itr(adapter);
4354 else 4760 else
4355 igb_update_ring_itr(rx_ring); 4761 igb_update_ring_itr(q_vector);
4356 } 4762 }
4357 4763
4358 if (!test_bit(__IGB_DOWN, &adapter->state)) { 4764 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4359 if (adapter->msix_entries) 4765 if (adapter->msix_entries)
4360 wr32(E1000_EIMS, rx_ring->eims_value); 4766 wr32(E1000_EIMS, q_vector->eims_value);
4361 else 4767 else
4362 igb_irq_enable(adapter); 4768 igb_irq_enable(adapter);
4363 } 4769 }
@@ -4370,76 +4776,101 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4370 **/ 4776 **/
4371static int igb_poll(struct napi_struct *napi, int budget) 4777static int igb_poll(struct napi_struct *napi, int budget)
4372{ 4778{
4373 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4779 struct igb_q_vector *q_vector = container_of(napi,
4374 int work_done = 0; 4780 struct igb_q_vector,
4781 napi);
4782 int tx_clean_complete = 1, work_done = 0;
4375 4783
4376#ifdef CONFIG_IGB_DCA 4784#ifdef CONFIG_IGB_DCA
4377 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4785 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4378 igb_update_rx_dca(rx_ring); 4786 igb_update_dca(q_vector);
4379#endif 4787#endif
4380 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 4788 if (q_vector->tx_ring)
4789 tx_clean_complete = igb_clean_tx_irq(q_vector);
4381 4790
4382 if (rx_ring->buddy) { 4791 if (q_vector->rx_ring)
4383#ifdef CONFIG_IGB_DCA 4792 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4384 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4793
4385 igb_update_tx_dca(rx_ring->buddy); 4794 if (!tx_clean_complete)
4386#endif 4795 work_done = budget;
4387 if (!igb_clean_tx_irq(rx_ring->buddy))
4388 work_done = budget;
4389 }
4390 4796
4391 /* If not enough Rx work done, exit the polling mode */ 4797 /* If not enough Rx work done, exit the polling mode */
4392 if (work_done < budget) { 4798 if (work_done < budget) {
4393 napi_complete(napi); 4799 napi_complete(napi);
4394 igb_rx_irq_enable(rx_ring); 4800 igb_ring_irq_enable(q_vector);
4395 } 4801 }
4396 4802
4397 return work_done; 4803 return work_done;
4398} 4804}
4399 4805
4400/** 4806/**
4401 * igb_hwtstamp - utility function which checks for TX time stamp 4807 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4402 * @adapter: board private structure 4808 * @adapter: board private structure
4809 * @shhwtstamps: timestamp structure to update
4810 * @regval: unsigned 64bit system time value.
4811 *
4812 * We need to convert the system time value stored in the RX/TXSTMP registers
4813 * into a hwtstamp which can be used by the upper level timestamping functions
4814 */
4815static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4816 struct skb_shared_hwtstamps *shhwtstamps,
4817 u64 regval)
4818{
4819 u64 ns;
4820
4821 /*
4822 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4823 * 24 to match clock shift we setup earlier.
4824 */
4825 if (adapter->hw.mac.type == e1000_82580)
4826 regval <<= IGB_82580_TSYNC_SHIFT;
4827
4828 ns = timecounter_cyc2time(&adapter->clock, regval);
4829 timecompare_update(&adapter->compare, ns);
4830 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4831 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4832 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4833}
4834
4835/**
4836 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4837 * @q_vector: pointer to q_vector containing needed info
4403 * @skb: packet that was just sent 4838 * @skb: packet that was just sent
4404 * 4839 *
4405 * If we were asked to do hardware stamping and such a time stamp is 4840 * If we were asked to do hardware stamping and such a time stamp is
4406 * available, then it must have been for this skb here because we only 4841 * available, then it must have been for this skb here because we only
4407 * allow only one such packet into the queue. 4842 * allow only one such packet into the queue.
4408 */ 4843 */
4409static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) 4844static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4410{ 4845{
4846 struct igb_adapter *adapter = q_vector->adapter;
4411 union skb_shared_tx *shtx = skb_tx(skb); 4847 union skb_shared_tx *shtx = skb_tx(skb);
4412 struct e1000_hw *hw = &adapter->hw; 4848 struct e1000_hw *hw = &adapter->hw;
4849 struct skb_shared_hwtstamps shhwtstamps;
4850 u64 regval;
4413 4851
4414 if (unlikely(shtx->hardware)) { 4852 /* if skb does not support hw timestamp or TX stamp not valid exit */
4415 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; 4853 if (likely(!shtx->hardware) ||
4416 if (valid) { 4854 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4417 u64 regval = rd32(E1000_TXSTMPL); 4855 return;
4418 u64 ns; 4856
4419 struct skb_shared_hwtstamps shhwtstamps; 4857 regval = rd32(E1000_TXSTMPL);
4420 4858 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4421 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 4859
4422 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 4860 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4423 ns = timecounter_cyc2time(&adapter->clock, 4861 skb_tstamp_tx(skb, &shhwtstamps);
4424 regval);
4425 timecompare_update(&adapter->compare, ns);
4426 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4427 shhwtstamps.syststamp =
4428 timecompare_transform(&adapter->compare, ns);
4429 skb_tstamp_tx(skb, &shhwtstamps);
4430 }
4431 }
4432} 4862}
4433 4863
4434/** 4864/**
4435 * igb_clean_tx_irq - Reclaim resources after transmit completes 4865 * igb_clean_tx_irq - Reclaim resources after transmit completes
4436 * @adapter: board private structure 4866 * @q_vector: pointer to q_vector containing needed info
4437 * returns true if ring is completely cleaned 4867 * returns true if ring is completely cleaned
4438 **/ 4868 **/
4439static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 4869static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4440{ 4870{
4441 struct igb_adapter *adapter = tx_ring->adapter; 4871 struct igb_adapter *adapter = q_vector->adapter;
4442 struct net_device *netdev = adapter->netdev; 4872 struct igb_ring *tx_ring = q_vector->tx_ring;
4873 struct net_device *netdev = tx_ring->netdev;
4443 struct e1000_hw *hw = &adapter->hw; 4874 struct e1000_hw *hw = &adapter->hw;
4444 struct igb_buffer *buffer_info; 4875 struct igb_buffer *buffer_info;
4445 struct sk_buff *skb; 4876 struct sk_buff *skb;
@@ -4470,10 +4901,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4470 total_packets += segs; 4901 total_packets += segs;
4471 total_bytes += bytecount; 4902 total_bytes += bytecount;
4472 4903
4473 igb_tx_hwtstamp(adapter, skb); 4904 igb_tx_hwtstamp(q_vector, skb);
4474 } 4905 }
4475 4906
4476 igb_unmap_and_free_tx_resource(adapter, buffer_info); 4907 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4477 tx_desc->wb.status = 0; 4908 tx_desc->wb.status = 0;
4478 4909
4479 i++; 4910 i++;
@@ -4496,7 +4927,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4496 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 4927 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4497 !(test_bit(__IGB_DOWN, &adapter->state))) { 4928 !(test_bit(__IGB_DOWN, &adapter->state))) {
4498 netif_wake_subqueue(netdev, tx_ring->queue_index); 4929 netif_wake_subqueue(netdev, tx_ring->queue_index);
4499 ++adapter->restart_queue; 4930 tx_ring->tx_stats.restart_queue++;
4500 } 4931 }
4501 } 4932 }
4502 4933
@@ -4511,7 +4942,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4511 E1000_STATUS_TXOFF)) { 4942 E1000_STATUS_TXOFF)) {
4512 4943
4513 /* detected Tx unit hang */ 4944 /* detected Tx unit hang */
4514 dev_err(&adapter->pdev->dev, 4945 dev_err(&tx_ring->pdev->dev,
4515 "Detected Tx Unit Hang\n" 4946 "Detected Tx Unit Hang\n"
4516 " Tx Queue <%d>\n" 4947 " Tx Queue <%d>\n"
4517 " TDH <%x>\n" 4948 " TDH <%x>\n"
@@ -4524,11 +4955,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4524 " jiffies <%lx>\n" 4955 " jiffies <%lx>\n"
4525 " desc.status <%x>\n", 4956 " desc.status <%x>\n",
4526 tx_ring->queue_index, 4957 tx_ring->queue_index,
4527 readl(adapter->hw.hw_addr + tx_ring->head), 4958 readl(tx_ring->head),
4528 readl(adapter->hw.hw_addr + tx_ring->tail), 4959 readl(tx_ring->tail),
4529 tx_ring->next_to_use, 4960 tx_ring->next_to_use,
4530 tx_ring->next_to_clean, 4961 tx_ring->next_to_clean,
4531 tx_ring->buffer_info[i].time_stamp, 4962 tx_ring->buffer_info[eop].time_stamp,
4532 eop, 4963 eop,
4533 jiffies, 4964 jiffies,
4534 eop_desc->wb.status); 4965 eop_desc->wb.status);
@@ -4539,43 +4970,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4539 tx_ring->total_packets += total_packets; 4970 tx_ring->total_packets += total_packets;
4540 tx_ring->tx_stats.bytes += total_bytes; 4971 tx_ring->tx_stats.bytes += total_bytes;
4541 tx_ring->tx_stats.packets += total_packets; 4972 tx_ring->tx_stats.packets += total_packets;
4542 adapter->net_stats.tx_bytes += total_bytes;
4543 adapter->net_stats.tx_packets += total_packets;
4544 return (count < tx_ring->count); 4973 return (count < tx_ring->count);
4545} 4974}
4546 4975
4547/** 4976/**
4548 * igb_receive_skb - helper function to handle rx indications 4977 * igb_receive_skb - helper function to handle rx indications
4549 * @ring: pointer to receive ring receving this packet 4978 * @q_vector: structure containing interrupt and ring information
4550 * @status: descriptor status field as written by hardware 4979 * @skb: packet to send up
4551 * @rx_desc: receive descriptor containing vlan and type information. 4980 * @vlan_tag: vlan tag for packet
4552 * @skb: pointer to sk_buff to be indicated to stack
4553 **/ 4981 **/
4554static void igb_receive_skb(struct igb_ring *ring, u8 status, 4982static void igb_receive_skb(struct igb_q_vector *q_vector,
4555 union e1000_adv_rx_desc * rx_desc, 4983 struct sk_buff *skb,
4556 struct sk_buff *skb) 4984 u16 vlan_tag)
4557{ 4985{
4558 struct igb_adapter * adapter = ring->adapter; 4986 struct igb_adapter *adapter = q_vector->adapter;
4559 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 4987
4560 4988 if (vlan_tag)
4561 skb_record_rx_queue(skb, ring->queue_index); 4989 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4562 if (vlan_extracted) 4990 vlan_tag, skb);
4563 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4564 le16_to_cpu(rx_desc->wb.upper.vlan),
4565 skb);
4566 else 4991 else
4567 napi_gro_receive(&ring->napi, skb); 4992 napi_gro_receive(&q_vector->napi, skb);
4568} 4993}
4569 4994
4570static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 4995static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4571 u32 status_err, struct sk_buff *skb) 4996 u32 status_err, struct sk_buff *skb)
4572{ 4997{
4573 skb->ip_summed = CHECKSUM_NONE; 4998 skb->ip_summed = CHECKSUM_NONE;
4574 4999
4575 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 5000 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4576 if ((status_err & E1000_RXD_STAT_IXSM) || 5001 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4577 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) 5002 (status_err & E1000_RXD_STAT_IXSM))
4578 return; 5003 return;
5004
4579 /* TCP/UDP checksum error bit is set */ 5005 /* TCP/UDP checksum error bit is set */
4580 if (status_err & 5006 if (status_err &
4581 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 5007 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4584,9 +5010,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4584 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 5010 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4585 * packets, (aka let the stack check the crc32c) 5011 * packets, (aka let the stack check the crc32c)
4586 */ 5012 */
4587 if (!((adapter->hw.mac.type == e1000_82576) && 5013 if ((skb->len == 60) &&
4588 (skb->len == 60))) 5014 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
4589 adapter->hw_csum_err++; 5015 ring->rx_stats.csum_err++;
5016
4590 /* let the stack verify checksum errors */ 5017 /* let the stack verify checksum errors */
4591 return; 5018 return;
4592 } 5019 }
@@ -4594,11 +5021,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4594 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5021 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4595 skb->ip_summed = CHECKSUM_UNNECESSARY; 5022 skb->ip_summed = CHECKSUM_UNNECESSARY;
4596 5023
4597 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); 5024 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
4598 adapter->hw_csum_good++;
4599} 5025}
4600 5026
4601static inline u16 igb_get_hlen(struct igb_adapter *adapter, 5027static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5028 struct sk_buff *skb)
5029{
5030 struct igb_adapter *adapter = q_vector->adapter;
5031 struct e1000_hw *hw = &adapter->hw;
5032 u64 regval;
5033
5034 /*
5035 * If this bit is set, then the RX registers contain the time stamp. No
5036 * other packet will be time stamped until we read these registers, so
5037 * read the registers to make them available again. Because only one
5038 * packet can be time stamped at a time, we know that the register
5039 * values must belong to this one here and therefore we don't need to
5040 * compare any of the additional attributes stored for it.
5041 *
5042 * If nothing went wrong, then it should have a skb_shared_tx that we
5043 * can turn into a skb_shared_hwtstamps.
5044 */
5045 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5046 return;
5047 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5048 return;
5049
5050 regval = rd32(E1000_RXSTMPL);
5051 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5052
5053 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5054}
5055static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4602 union e1000_adv_rx_desc *rx_desc) 5056 union e1000_adv_rx_desc *rx_desc)
4603{ 5057{
4604 /* HW will not DMA in data larger than the given buffer, even if it 5058 /* HW will not DMA in data larger than the given buffer, even if it
@@ -4607,27 +5061,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4607 */ 5061 */
4608 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 5062 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4609 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 5063 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4610 if (hlen > adapter->rx_ps_hdr_size) 5064 if (hlen > rx_ring->rx_buffer_len)
4611 hlen = adapter->rx_ps_hdr_size; 5065 hlen = rx_ring->rx_buffer_len;
4612 return hlen; 5066 return hlen;
4613} 5067}
4614 5068
4615static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, 5069static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4616 int *work_done, int budget) 5070 int *work_done, int budget)
4617{ 5071{
4618 struct igb_adapter *adapter = rx_ring->adapter; 5072 struct igb_ring *rx_ring = q_vector->rx_ring;
4619 struct net_device *netdev = adapter->netdev; 5073 struct net_device *netdev = rx_ring->netdev;
4620 struct e1000_hw *hw = &adapter->hw; 5074 struct pci_dev *pdev = rx_ring->pdev;
4621 struct pci_dev *pdev = adapter->pdev;
4622 union e1000_adv_rx_desc *rx_desc , *next_rxd; 5075 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4623 struct igb_buffer *buffer_info , *next_buffer; 5076 struct igb_buffer *buffer_info , *next_buffer;
4624 struct sk_buff *skb; 5077 struct sk_buff *skb;
4625 bool cleaned = false; 5078 bool cleaned = false;
4626 int cleaned_count = 0; 5079 int cleaned_count = 0;
5080 int current_node = numa_node_id();
4627 unsigned int total_bytes = 0, total_packets = 0; 5081 unsigned int total_bytes = 0, total_packets = 0;
4628 unsigned int i; 5082 unsigned int i;
4629 u32 staterr; 5083 u32 staterr;
4630 u16 length; 5084 u16 length;
5085 u16 vlan_tag;
4631 5086
4632 i = rx_ring->next_to_clean; 5087 i = rx_ring->next_to_clean;
4633 buffer_info = &rx_ring->buffer_info[i]; 5088 buffer_info = &rx_ring->buffer_info[i];
@@ -4646,6 +5101,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4646 i++; 5101 i++;
4647 if (i == rx_ring->count) 5102 if (i == rx_ring->count)
4648 i = 0; 5103 i = 0;
5104
4649 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 5105 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4650 prefetch(next_rxd); 5106 prefetch(next_rxd);
4651 next_buffer = &rx_ring->buffer_info[i]; 5107 next_buffer = &rx_ring->buffer_info[i];
@@ -4654,23 +5110,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4654 cleaned = true; 5110 cleaned = true;
4655 cleaned_count++; 5111 cleaned_count++;
4656 5112
4657 /* this is the fast path for the non-packet split case */
4658 if (!adapter->rx_ps_hdr_size) {
4659 pci_unmap_single(pdev, buffer_info->dma,
4660 adapter->rx_buffer_len,
4661 PCI_DMA_FROMDEVICE);
4662 buffer_info->dma = 0;
4663 skb_put(skb, length);
4664 goto send_up;
4665 }
4666
4667 if (buffer_info->dma) { 5113 if (buffer_info->dma) {
4668 u16 hlen = igb_get_hlen(adapter, rx_desc);
4669 pci_unmap_single(pdev, buffer_info->dma, 5114 pci_unmap_single(pdev, buffer_info->dma,
4670 adapter->rx_ps_hdr_size, 5115 rx_ring->rx_buffer_len,
4671 PCI_DMA_FROMDEVICE); 5116 PCI_DMA_FROMDEVICE);
4672 buffer_info->dma = 0; 5117 buffer_info->dma = 0;
4673 skb_put(skb, hlen); 5118 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5119 skb_put(skb, length);
5120 goto send_up;
5121 }
5122 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4674 } 5123 }
4675 5124
4676 if (length) { 5125 if (length) {
@@ -4683,15 +5132,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4683 buffer_info->page_offset, 5132 buffer_info->page_offset,
4684 length); 5133 length);
4685 5134
4686 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 5135 if ((page_count(buffer_info->page) != 1) ||
4687 (page_count(buffer_info->page) != 1)) 5136 (page_to_nid(buffer_info->page) != current_node))
4688 buffer_info->page = NULL; 5137 buffer_info->page = NULL;
4689 else 5138 else
4690 get_page(buffer_info->page); 5139 get_page(buffer_info->page);
4691 5140
4692 skb->len += length; 5141 skb->len += length;
4693 skb->data_len += length; 5142 skb->data_len += length;
4694
4695 skb->truesize += length; 5143 skb->truesize += length;
4696 } 5144 }
4697 5145
@@ -4703,60 +5151,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4703 goto next_desc; 5151 goto next_desc;
4704 } 5152 }
4705send_up: 5153send_up:
4706 /*
4707 * If this bit is set, then the RX registers contain
4708 * the time stamp. No other packet will be time
4709 * stamped until we read these registers, so read the
4710 * registers to make them available again. Because
4711 * only one packet can be time stamped at a time, we
4712 * know that the register values must belong to this
4713 * one here and therefore we don't need to compare
4714 * any of the additional attributes stored for it.
4715 *
4716 * If nothing went wrong, then it should have a
4717 * skb_shared_tx that we can turn into a
4718 * skb_shared_hwtstamps.
4719 *
4720 * TODO: can time stamping be triggered (thus locking
4721 * the registers) without the packet reaching this point
4722 * here? In that case RX time stamping would get stuck.
4723 *
4724 * TODO: in "time stamp all packets" mode this bit is
4725 * not set. Need a global flag for this mode and then
4726 * always read the registers. Cannot be done without
4727 * a race condition.
4728 */
4729 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4730 u64 regval;
4731 u64 ns;
4732 struct skb_shared_hwtstamps *shhwtstamps =
4733 skb_hwtstamps(skb);
4734
4735 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4736 "igb: no RX time stamp available for time stamped packet");
4737 regval = rd32(E1000_RXSTMPL);
4738 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4739 ns = timecounter_cyc2time(&adapter->clock, regval);
4740 timecompare_update(&adapter->compare, ns);
4741 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4742 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4743 shhwtstamps->syststamp =
4744 timecompare_transform(&adapter->compare, ns);
4745 }
4746
4747 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 5154 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4748 dev_kfree_skb_irq(skb); 5155 dev_kfree_skb_irq(skb);
4749 goto next_desc; 5156 goto next_desc;
4750 } 5157 }
4751 5158
5159 igb_rx_hwtstamp(q_vector, staterr, skb);
4752 total_bytes += skb->len; 5160 total_bytes += skb->len;
4753 total_packets++; 5161 total_packets++;
4754 5162
4755 igb_rx_checksum_adv(adapter, staterr, skb); 5163 igb_rx_checksum_adv(rx_ring, staterr, skb);
4756 5164
4757 skb->protocol = eth_type_trans(skb, netdev); 5165 skb->protocol = eth_type_trans(skb, netdev);
5166 skb_record_rx_queue(skb, rx_ring->queue_index);
5167
5168 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5169 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
4758 5170
4759 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 5171 igb_receive_skb(q_vector, skb, vlan_tag);
4760 5172
4761next_desc: 5173next_desc:
4762 rx_desc->wb.upper.status_error = 0; 5174 rx_desc->wb.upper.status_error = 0;
@@ -4783,8 +5195,6 @@ next_desc:
4783 rx_ring->total_bytes += total_bytes; 5195 rx_ring->total_bytes += total_bytes;
4784 rx_ring->rx_stats.packets += total_packets; 5196 rx_ring->rx_stats.packets += total_packets;
4785 rx_ring->rx_stats.bytes += total_bytes; 5197 rx_ring->rx_stats.bytes += total_bytes;
4786 adapter->net_stats.rx_bytes += total_bytes;
4787 adapter->net_stats.rx_packets += total_packets;
4788 return cleaned; 5198 return cleaned;
4789} 5199}
4790 5200
@@ -4792,12 +5202,9 @@ next_desc:
4792 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 5202 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4793 * @adapter: address of board private structure 5203 * @adapter: address of board private structure
4794 **/ 5204 **/
4795static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, 5205void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
4796 int cleaned_count)
4797{ 5206{
4798 struct igb_adapter *adapter = rx_ring->adapter; 5207 struct net_device *netdev = rx_ring->netdev;
4799 struct net_device *netdev = adapter->netdev;
4800 struct pci_dev *pdev = adapter->pdev;
4801 union e1000_adv_rx_desc *rx_desc; 5208 union e1000_adv_rx_desc *rx_desc;
4802 struct igb_buffer *buffer_info; 5209 struct igb_buffer *buffer_info;
4803 struct sk_buff *skb; 5210 struct sk_buff *skb;
@@ -4807,19 +5214,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4807 i = rx_ring->next_to_use; 5214 i = rx_ring->next_to_use;
4808 buffer_info = &rx_ring->buffer_info[i]; 5215 buffer_info = &rx_ring->buffer_info[i];
4809 5216
4810 if (adapter->rx_ps_hdr_size) 5217 bufsz = rx_ring->rx_buffer_len;
4811 bufsz = adapter->rx_ps_hdr_size;
4812 else
4813 bufsz = adapter->rx_buffer_len;
4814 5218
4815 while (cleaned_count--) { 5219 while (cleaned_count--) {
4816 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5220 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4817 5221
4818 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 5222 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
4819 if (!buffer_info->page) { 5223 if (!buffer_info->page) {
4820 buffer_info->page = alloc_page(GFP_ATOMIC); 5224 buffer_info->page = netdev_alloc_page(netdev);
4821 if (!buffer_info->page) { 5225 if (!buffer_info->page) {
4822 adapter->alloc_rx_buff_failed++; 5226 rx_ring->rx_stats.alloc_failed++;
4823 goto no_buffers; 5227 goto no_buffers;
4824 } 5228 }
4825 buffer_info->page_offset = 0; 5229 buffer_info->page_offset = 0;
@@ -4827,39 +5231,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4827 buffer_info->page_offset ^= PAGE_SIZE / 2; 5231 buffer_info->page_offset ^= PAGE_SIZE / 2;
4828 } 5232 }
4829 buffer_info->page_dma = 5233 buffer_info->page_dma =
4830 pci_map_page(pdev, buffer_info->page, 5234 pci_map_page(rx_ring->pdev, buffer_info->page,
4831 buffer_info->page_offset, 5235 buffer_info->page_offset,
4832 PAGE_SIZE / 2, 5236 PAGE_SIZE / 2,
4833 PCI_DMA_FROMDEVICE); 5237 PCI_DMA_FROMDEVICE);
5238 if (pci_dma_mapping_error(rx_ring->pdev,
5239 buffer_info->page_dma)) {
5240 buffer_info->page_dma = 0;
5241 rx_ring->rx_stats.alloc_failed++;
5242 goto no_buffers;
5243 }
4834 } 5244 }
4835 5245
4836 if (!buffer_info->skb) { 5246 skb = buffer_info->skb;
4837 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 5247 if (!skb) {
5248 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4838 if (!skb) { 5249 if (!skb) {
4839 adapter->alloc_rx_buff_failed++; 5250 rx_ring->rx_stats.alloc_failed++;
4840 goto no_buffers; 5251 goto no_buffers;
4841 } 5252 }
4842 5253
4843 /* Make buffer alignment 2 beyond a 16 byte boundary
4844 * this will result in a 16 byte aligned IP header after
4845 * the 14 byte MAC header is removed
4846 */
4847 skb_reserve(skb, NET_IP_ALIGN);
4848
4849 buffer_info->skb = skb; 5254 buffer_info->skb = skb;
4850 buffer_info->dma = pci_map_single(pdev, skb->data, 5255 }
5256 if (!buffer_info->dma) {
5257 buffer_info->dma = pci_map_single(rx_ring->pdev,
5258 skb->data,
4851 bufsz, 5259 bufsz,
4852 PCI_DMA_FROMDEVICE); 5260 PCI_DMA_FROMDEVICE);
5261 if (pci_dma_mapping_error(rx_ring->pdev,
5262 buffer_info->dma)) {
5263 buffer_info->dma = 0;
5264 rx_ring->rx_stats.alloc_failed++;
5265 goto no_buffers;
5266 }
4853 } 5267 }
4854 /* Refresh the desc even if buffer_addrs didn't change because 5268 /* Refresh the desc even if buffer_addrs didn't change because
4855 * each write-back erases this info. */ 5269 * each write-back erases this info. */
4856 if (adapter->rx_ps_hdr_size) { 5270 if (bufsz < IGB_RXBUFFER_1024) {
4857 rx_desc->read.pkt_addr = 5271 rx_desc->read.pkt_addr =
4858 cpu_to_le64(buffer_info->page_dma); 5272 cpu_to_le64(buffer_info->page_dma);
4859 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5273 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4860 } else { 5274 } else {
4861 rx_desc->read.pkt_addr = 5275 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
4862 cpu_to_le64(buffer_info->dma);
4863 rx_desc->read.hdr_addr = 0; 5276 rx_desc->read.hdr_addr = 0;
4864 } 5277 }
4865 5278
@@ -4882,7 +5295,7 @@ no_buffers:
4882 * applicable for weak-ordered memory model archs, 5295 * applicable for weak-ordered memory model archs,
4883 * such as IA-64). */ 5296 * such as IA-64). */
4884 wmb(); 5297 wmb();
4885 writel(i, adapter->hw.hw_addr + rx_ring->tail); 5298 writel(i, rx_ring->tail);
4886 } 5299 }
4887} 5300}
4888 5301
@@ -4941,13 +5354,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4941 struct igb_adapter *adapter = netdev_priv(netdev); 5354 struct igb_adapter *adapter = netdev_priv(netdev);
4942 struct e1000_hw *hw = &adapter->hw; 5355 struct e1000_hw *hw = &adapter->hw;
4943 struct hwtstamp_config config; 5356 struct hwtstamp_config config;
4944 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; 5357 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
4945 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; 5358 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
4946 u32 tsync_rx_ctl_type = 0;
4947 u32 tsync_rx_cfg = 0; 5359 u32 tsync_rx_cfg = 0;
4948 int is_l4 = 0; 5360 bool is_l4 = false;
4949 int is_l2 = 0; 5361 bool is_l2 = false;
4950 short port = 319; /* PTP */
4951 u32 regval; 5362 u32 regval;
4952 5363
4953 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5364 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -4959,10 +5370,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4959 5370
4960 switch (config.tx_type) { 5371 switch (config.tx_type) {
4961 case HWTSTAMP_TX_OFF: 5372 case HWTSTAMP_TX_OFF:
4962 tsync_tx_ctl_bit = 0; 5373 tsync_tx_ctl = 0;
4963 break;
4964 case HWTSTAMP_TX_ON: 5374 case HWTSTAMP_TX_ON:
4965 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4966 break; 5375 break;
4967 default: 5376 default:
4968 return -ERANGE; 5377 return -ERANGE;
@@ -4970,7 +5379,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4970 5379
4971 switch (config.rx_filter) { 5380 switch (config.rx_filter) {
4972 case HWTSTAMP_FILTER_NONE: 5381 case HWTSTAMP_FILTER_NONE:
4973 tsync_rx_ctl_bit = 0; 5382 tsync_rx_ctl = 0;
4974 break; 5383 break;
4975 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 5384 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4976 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 5385 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -4981,86 +5390,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4981 * possible to time stamp both Sync and Delay_Req messages 5390 * possible to time stamp both Sync and Delay_Req messages
4982 * => fall back to time stamping all packets 5391 * => fall back to time stamping all packets
4983 */ 5392 */
4984 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; 5393 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
4985 config.rx_filter = HWTSTAMP_FILTER_ALL; 5394 config.rx_filter = HWTSTAMP_FILTER_ALL;
4986 break; 5395 break;
4987 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 5396 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4988 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5397 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4989 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; 5398 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4990 is_l4 = 1; 5399 is_l4 = true;
4991 break; 5400 break;
4992 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 5401 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4993 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5402 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4994 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; 5403 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4995 is_l4 = 1; 5404 is_l4 = true;
4996 break; 5405 break;
4997 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 5406 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4998 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 5407 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4999 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5408 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5000 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; 5409 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5001 is_l2 = 1; 5410 is_l2 = true;
5002 is_l4 = 1; 5411 is_l4 = true;
5003 config.rx_filter = HWTSTAMP_FILTER_SOME; 5412 config.rx_filter = HWTSTAMP_FILTER_SOME;
5004 break; 5413 break;
5005 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 5414 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5006 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 5415 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5007 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5416 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5008 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; 5417 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5009 is_l2 = 1; 5418 is_l2 = true;
5010 is_l4 = 1; 5419 is_l4 = true;
5011 config.rx_filter = HWTSTAMP_FILTER_SOME; 5420 config.rx_filter = HWTSTAMP_FILTER_SOME;
5012 break; 5421 break;
5013 case HWTSTAMP_FILTER_PTP_V2_EVENT: 5422 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5014 case HWTSTAMP_FILTER_PTP_V2_SYNC: 5423 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5015 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 5424 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5016 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; 5425 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5017 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 5426 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5018 is_l2 = 1; 5427 is_l2 = true;
5019 break; 5428 break;
5020 default: 5429 default:
5021 return -ERANGE; 5430 return -ERANGE;
5022 } 5431 }
5023 5432
5433 if (hw->mac.type == e1000_82575) {
5434 if (tsync_rx_ctl | tsync_tx_ctl)
5435 return -EINVAL;
5436 return 0;
5437 }
5438
5024 /* enable/disable TX */ 5439 /* enable/disable TX */
5025 regval = rd32(E1000_TSYNCTXCTL); 5440 regval = rd32(E1000_TSYNCTXCTL);
5026 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; 5441 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5442 regval |= tsync_tx_ctl;
5027 wr32(E1000_TSYNCTXCTL, regval); 5443 wr32(E1000_TSYNCTXCTL, regval);
5028 5444
5029 /* enable/disable RX, define which PTP packets are time stamped */ 5445 /* enable/disable RX */
5030 regval = rd32(E1000_TSYNCRXCTL); 5446 regval = rd32(E1000_TSYNCRXCTL);
5031 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; 5447 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5032 regval = (regval & ~0xE) | tsync_rx_ctl_type; 5448 regval |= tsync_rx_ctl;
5033 wr32(E1000_TSYNCRXCTL, regval); 5449 wr32(E1000_TSYNCRXCTL, regval);
5034 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5035 5450
5036 /* 5451 /* define which PTP packets are time stamped */
5037 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 5452 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5038 * (Ethertype to filter on)
5039 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5040 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5041 */
5042 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5043
5044 /* L4 Queue Filter[0]: only filter by source and destination port */
5045 wr32(E1000_SPQF0, htons(port));
5046 wr32(E1000_IMIREXT(0), is_l4 ?
5047 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5048 wr32(E1000_IMIR(0), is_l4 ?
5049 (htons(port)
5050 | (0<<16) /* immediate interrupt disabled */
5051 | 0 /* (1<<17) bit cleared: do not bypass
5052 destination port check */)
5053 : 0);
5054 wr32(E1000_FTQF0, is_l4 ?
5055 (0x11 /* UDP */
5056 | (1<<15) /* VF not compared */
5057 | (1<<27) /* Enable Timestamping */
5058 | (7<<28) /* only source port filter enabled,
5059 source/target address and protocol
5060 masked */)
5061 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5062 enabled */));
5063 5453
5454 /* define ethertype filter for timestamped packets */
5455 if (is_l2)
5456 wr32(E1000_ETQF(3),
5457 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5458 E1000_ETQF_1588 | /* enable timestamping */
5459 ETH_P_1588)); /* 1588 eth protocol type */
5460 else
5461 wr32(E1000_ETQF(3), 0);
5462
5463#define PTP_PORT 319
5464 /* L4 Queue Filter[3]: filter by destination port and protocol */
5465 if (is_l4) {
5466 u32 ftqf = (IPPROTO_UDP /* UDP */
5467 | E1000_FTQF_VF_BP /* VF not compared */
5468 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5469 | E1000_FTQF_MASK); /* mask all inputs */
5470 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5471
5472 wr32(E1000_IMIR(3), htons(PTP_PORT));
5473 wr32(E1000_IMIREXT(3),
5474 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5475 if (hw->mac.type == e1000_82576) {
5476 /* enable source port check */
5477 wr32(E1000_SPQF(3), htons(PTP_PORT));
5478 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5479 }
5480 wr32(E1000_FTQF(3), ftqf);
5481 } else {
5482 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5483 }
5064 wrfl(); 5484 wrfl();
5065 5485
5066 adapter->hwtstamp_config = config; 5486 adapter->hwtstamp_config = config;
@@ -5137,21 +5557,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
5137 ctrl |= E1000_CTRL_VME; 5557 ctrl |= E1000_CTRL_VME;
5138 wr32(E1000_CTRL, ctrl); 5558 wr32(E1000_CTRL, ctrl);
5139 5559
5140 /* enable VLAN receive filtering */ 5560 /* Disable CFI check */
5141 rctl = rd32(E1000_RCTL); 5561 rctl = rd32(E1000_RCTL);
5142 rctl &= ~E1000_RCTL_CFIEN; 5562 rctl &= ~E1000_RCTL_CFIEN;
5143 wr32(E1000_RCTL, rctl); 5563 wr32(E1000_RCTL, rctl);
5144 igb_update_mng_vlan(adapter);
5145 } else { 5564 } else {
5146 /* disable VLAN tag insert/strip */ 5565 /* disable VLAN tag insert/strip */
5147 ctrl = rd32(E1000_CTRL); 5566 ctrl = rd32(E1000_CTRL);
5148 ctrl &= ~E1000_CTRL_VME; 5567 ctrl &= ~E1000_CTRL_VME;
5149 wr32(E1000_CTRL, ctrl); 5568 wr32(E1000_CTRL, ctrl);
5150
5151 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5152 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5153 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5154 }
5155 } 5569 }
5156 5570
5157 igb_rlpml_set(adapter); 5571 igb_rlpml_set(adapter);
@@ -5166,16 +5580,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5166 struct e1000_hw *hw = &adapter->hw; 5580 struct e1000_hw *hw = &adapter->hw;
5167 int pf_id = adapter->vfs_allocated_count; 5581 int pf_id = adapter->vfs_allocated_count;
5168 5582
5169 if ((hw->mng_cookie.status & 5583 /* attempt to add filter to vlvf array */
5170 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5584 igb_vlvf_set(adapter, vid, true, pf_id);
5171 (vid == adapter->mng_vlan_id))
5172 return;
5173
5174 /* add vid to vlvf if sr-iov is enabled,
5175 * if that fails add directly to filter table */
5176 if (igb_vlvf_set(adapter, vid, true, pf_id))
5177 igb_vfta_set(hw, vid, true);
5178 5585
5586 /* add the filter since PF can receive vlans w/o entry in vlvf */
5587 igb_vfta_set(hw, vid, true);
5179} 5588}
5180 5589
5181static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 5590static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5183,6 +5592,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5183 struct igb_adapter *adapter = netdev_priv(netdev); 5592 struct igb_adapter *adapter = netdev_priv(netdev);
5184 struct e1000_hw *hw = &adapter->hw; 5593 struct e1000_hw *hw = &adapter->hw;
5185 int pf_id = adapter->vfs_allocated_count; 5594 int pf_id = adapter->vfs_allocated_count;
5595 s32 err;
5186 5596
5187 igb_irq_disable(adapter); 5597 igb_irq_disable(adapter);
5188 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5598 vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5190,17 +5600,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5190 if (!test_bit(__IGB_DOWN, &adapter->state)) 5600 if (!test_bit(__IGB_DOWN, &adapter->state))
5191 igb_irq_enable(adapter); 5601 igb_irq_enable(adapter);
5192 5602
5193 if ((adapter->hw.mng_cookie.status & 5603 /* remove vlan from VLVF table array */
5194 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5604 err = igb_vlvf_set(adapter, vid, false, pf_id);
5195 (vid == adapter->mng_vlan_id)) {
5196 /* release control to f/w */
5197 igb_release_hw_control(adapter);
5198 return;
5199 }
5200 5605
5201 /* remove vid from vlvf if sr-iov is enabled, 5606 /* if vid was not present in VLVF just remove it from table */
5202 * if not in vlvf remove from vfta */ 5607 if (err)
5203 if (igb_vlvf_set(adapter, vid, false, pf_id))
5204 igb_vfta_set(hw, vid, false); 5608 igb_vfta_set(hw, vid, false);
5205} 5609}
5206 5610
@@ -5220,6 +5624,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
5220 5624
5221int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 5625int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5222{ 5626{
5627 struct pci_dev *pdev = adapter->pdev;
5223 struct e1000_mac_info *mac = &adapter->hw.mac; 5628 struct e1000_mac_info *mac = &adapter->hw.mac;
5224 5629
5225 mac->autoneg = 0; 5630 mac->autoneg = 0;
@@ -5243,8 +5648,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5243 break; 5648 break;
5244 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5649 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5245 default: 5650 default:
5246 dev_err(&adapter->pdev->dev, 5651 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5247 "Unsupported Speed/Duplex configuration\n");
5248 return -EINVAL; 5652 return -EINVAL;
5249 } 5653 }
5250 return 0; 5654 return 0;
@@ -5266,9 +5670,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5266 if (netif_running(netdev)) 5670 if (netif_running(netdev))
5267 igb_close(netdev); 5671 igb_close(netdev);
5268 5672
5269 igb_reset_interrupt_capability(adapter); 5673 igb_clear_interrupt_scheme(adapter);
5270
5271 igb_free_queues(adapter);
5272 5674
5273#ifdef CONFIG_PM 5675#ifdef CONFIG_PM
5274 retval = pci_save_state(pdev); 5676 retval = pci_save_state(pdev);
@@ -5300,7 +5702,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5300 wr32(E1000_CTRL, ctrl); 5702 wr32(E1000_CTRL, ctrl);
5301 5703
5302 /* Allow time for pending master requests to run */ 5704 /* Allow time for pending master requests to run */
5303 igb_disable_pcie_master(&adapter->hw); 5705 igb_disable_pcie_master(hw);
5304 5706
5305 wr32(E1000_WUC, E1000_WUC_PME_EN); 5707 wr32(E1000_WUC, E1000_WUC_PME_EN);
5306 wr32(E1000_WUFC, wufc); 5708 wr32(E1000_WUFC, wufc);
@@ -5363,9 +5765,7 @@ static int igb_resume(struct pci_dev *pdev)
5363 pci_enable_wake(pdev, PCI_D3hot, 0); 5765 pci_enable_wake(pdev, PCI_D3hot, 0);
5364 pci_enable_wake(pdev, PCI_D3cold, 0); 5766 pci_enable_wake(pdev, PCI_D3cold, 0);
5365 5767
5366 igb_set_interrupt_capability(adapter); 5768 if (igb_init_interrupt_scheme(adapter)) {
5367
5368 if (igb_alloc_queues(adapter)) {
5369 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5769 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5370 return -ENOMEM; 5770 return -ENOMEM;
5371 } 5771 }
@@ -5417,22 +5817,16 @@ static void igb_netpoll(struct net_device *netdev)
5417 int i; 5817 int i;
5418 5818
5419 if (!adapter->msix_entries) { 5819 if (!adapter->msix_entries) {
5820 struct igb_q_vector *q_vector = adapter->q_vector[0];
5420 igb_irq_disable(adapter); 5821 igb_irq_disable(adapter);
5421 napi_schedule(&adapter->rx_ring[0].napi); 5822 napi_schedule(&q_vector->napi);
5422 return; 5823 return;
5423 } 5824 }
5424 5825
5425 for (i = 0; i < adapter->num_tx_queues; i++) { 5826 for (i = 0; i < adapter->num_q_vectors; i++) {
5426 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 5827 struct igb_q_vector *q_vector = adapter->q_vector[i];
5427 wr32(E1000_EIMC, tx_ring->eims_value); 5828 wr32(E1000_EIMC, q_vector->eims_value);
5428 igb_clean_tx_irq(tx_ring); 5829 napi_schedule(&q_vector->napi);
5429 wr32(E1000_EIMS, tx_ring->eims_value);
5430 }
5431
5432 for (i = 0; i < adapter->num_rx_queues; i++) {
5433 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5434 wr32(E1000_EIMC, rx_ring->eims_value);
5435 napi_schedule(&rx_ring->napi);
5436 } 5830 }
5437} 5831}
5438#endif /* CONFIG_NET_POLL_CONTROLLER */ 5832#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5532,6 +5926,33 @@ static void igb_io_resume(struct pci_dev *pdev)
5532 igb_get_hw_control(adapter); 5926 igb_get_hw_control(adapter);
5533} 5927}
5534 5928
5929static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5930 u8 qsel)
5931{
5932 u32 rar_low, rar_high;
5933 struct e1000_hw *hw = &adapter->hw;
5934
5935 /* HW expects these in little endian so we reverse the byte order
5936 * from network order (big endian) to little endian
5937 */
5938 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5939 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5940 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5941
5942 /* Indicate to hardware the Address is Valid. */
5943 rar_high |= E1000_RAH_AV;
5944
5945 if (hw->mac.type == e1000_82575)
5946 rar_high |= E1000_RAH_POOL_1 * qsel;
5947 else
5948 rar_high |= E1000_RAH_POOL_1 << qsel;
5949
5950 wr32(E1000_RAL(index), rar_low);
5951 wrfl();
5952 wr32(E1000_RAH(index), rar_high);
5953 wrfl();
5954}
5955
5535static int igb_set_vf_mac(struct igb_adapter *adapter, 5956static int igb_set_vf_mac(struct igb_adapter *adapter,
5536 int vf, unsigned char *mac_addr) 5957 int vf, unsigned char *mac_addr)
5537{ 5958{
@@ -5542,8 +5963,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5542 5963
5543 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 5964 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5544 5965
5545 igb_rar_set(hw, mac_addr, rar_entry); 5966 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5546 igb_set_rah_pool(hw, vf, rar_entry);
5547 5967
5548 return 0; 5968 return 0;
5549} 5969}
@@ -5551,19 +5971,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5551static void igb_vmm_control(struct igb_adapter *adapter) 5971static void igb_vmm_control(struct igb_adapter *adapter)
5552{ 5972{
5553 struct e1000_hw *hw = &adapter->hw; 5973 struct e1000_hw *hw = &adapter->hw;
5554 u32 reg_data; 5974 u32 reg;
5555 5975
5556 if (!adapter->vfs_allocated_count) 5976 /* replication is not supported for 82575 */
5977 if (hw->mac.type == e1000_82575)
5557 return; 5978 return;
5558 5979
5559 /* VF's need PF reset indication before they 5980 /* enable replication vlan tag stripping */
5560 * can send/receive mail */ 5981 reg = rd32(E1000_RPLOLR);
5561 reg_data = rd32(E1000_CTRL_EXT); 5982 reg |= E1000_RPLOLR_STRVLAN;
5562 reg_data |= E1000_CTRL_EXT_PFRSTD; 5983 wr32(E1000_RPLOLR, reg);
5563 wr32(E1000_CTRL_EXT, reg_data);
5564 5984
5565 igb_vmdq_set_loopback_pf(hw, true); 5985 /* notify HW that the MAC is adding vlan tags */
5566 igb_vmdq_set_replication_pf(hw, true); 5986 reg = rd32(E1000_DTXCTL);
5987 reg |= E1000_DTXCTL_VLAN_ADDED;
5988 wr32(E1000_DTXCTL, reg);
5989
5990 if (adapter->vfs_allocated_count) {
5991 igb_vmdq_set_loopback_pf(hw, true);
5992 igb_vmdq_set_replication_pf(hw, true);
5993 } else {
5994 igb_vmdq_set_loopback_pf(hw, false);
5995 igb_vmdq_set_replication_pf(hw, false);
5996 }
5567} 5997}
5568 5998
5569/* igb_main.c */ 5999/* igb_main.c */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index c68265bd0d1a..8afff07ff559 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -367,16 +367,6 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
367 return *data; 367 return *data;
368} 368}
369 369
370static int igbvf_get_self_test_count(struct net_device *netdev)
371{
372 return IGBVF_TEST_LEN;
373}
374
375static int igbvf_get_stats_count(struct net_device *netdev)
376{
377 return IGBVF_GLOBAL_STATS_LEN;
378}
379
380static void igbvf_diag_test(struct net_device *netdev, 370static void igbvf_diag_test(struct net_device *netdev,
381 struct ethtool_test *eth_test, u64 *data) 371 struct ethtool_test *eth_test, u64 *data)
382{ 372{
@@ -484,6 +474,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
484 474
485} 475}
486 476
477static int igbvf_get_sset_count(struct net_device *dev, int stringset)
478{
479 switch(stringset) {
480 case ETH_SS_TEST:
481 return IGBVF_TEST_LEN;
482 case ETH_SS_STATS:
483 return IGBVF_GLOBAL_STATS_LEN;
484 default:
485 return -EINVAL;
486 }
487}
488
487static void igbvf_get_strings(struct net_device *netdev, u32 stringset, 489static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
488 u8 *data) 490 u8 *data)
489{ 491{
@@ -532,11 +534,10 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
532 .get_tso = ethtool_op_get_tso, 534 .get_tso = ethtool_op_get_tso,
533 .set_tso = igbvf_set_tso, 535 .set_tso = igbvf_set_tso,
534 .self_test = igbvf_diag_test, 536 .self_test = igbvf_diag_test,
537 .get_sset_count = igbvf_get_sset_count,
535 .get_strings = igbvf_get_strings, 538 .get_strings = igbvf_get_strings,
536 .phys_id = igbvf_phys_id, 539 .phys_id = igbvf_phys_id,
537 .get_ethtool_stats = igbvf_get_ethtool_stats, 540 .get_ethtool_stats = igbvf_get_ethtool_stats,
538 .self_test_count = igbvf_get_self_test_count,
539 .get_stats_count = igbvf_get_stats_count,
540 .get_coalesce = igbvf_get_coalesce, 541 .get_coalesce = igbvf_get_coalesce,
541 .set_coalesce = igbvf_set_coalesce, 542 .set_coalesce = igbvf_set_coalesce,
542}; 543};
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 91024a3cdad3..e01f44597a26 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
170 } 170 }
171 171
172 if (!buffer_info->skb) { 172 if (!buffer_info->skb) {
173 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 173 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
174 if (!skb) { 174 if (!skb) {
175 adapter->alloc_rx_buff_failed++; 175 adapter->alloc_rx_buff_failed++;
176 goto no_buffers; 176 goto no_buffers;
177 } 177 }
178 178
179 /* Make buffer alignment 2 beyond a 16 byte boundary
180 * this will result in a 16 byte aligned IP header after
181 * the 14 byte MAC header is removed
182 */
183 skb_reserve(skb, NET_IP_ALIGN);
184
185 buffer_info->skb = skb; 179 buffer_info->skb = skb;
186 buffer_info->dma = pci_map_single(pdev, skb->data, 180 buffer_info->dma = pci_map_single(pdev, skb->data,
187 bufsz, 181 bufsz,
@@ -1049,7 +1043,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
1049 } 1043 }
1050 1044
1051 err = request_irq(adapter->msix_entries[vector].vector, 1045 err = request_irq(adapter->msix_entries[vector].vector,
1052 &igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1046 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1053 netdev); 1047 netdev);
1054 if (err) 1048 if (err)
1055 goto out; 1049 goto out;
@@ -1059,7 +1053,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
1059 vector++; 1053 vector++;
1060 1054
1061 err = request_irq(adapter->msix_entries[vector].vector, 1055 err = request_irq(adapter->msix_entries[vector].vector,
1062 &igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1056 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1063 netdev); 1057 netdev);
1064 if (err) 1058 if (err)
1065 goto out; 1059 goto out;
@@ -1069,7 +1063,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
1069 vector++; 1063 vector++;
1070 1064
1071 err = request_irq(adapter->msix_entries[vector].vector, 1065 err = request_irq(adapter->msix_entries[vector].vector,
1072 &igbvf_msix_other, 0, netdev->name, netdev); 1066 igbvf_msix_other, 0, netdev->name, netdev);
1073 if (err) 1067 if (err)
1074 goto out; 1068 goto out;
1075 1069
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 9f7b5d4172b8..ba8d246d05a0 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -738,17 +738,12 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
738 738
739 IPG_DEBUG_MSG("_get_rxbuff\n"); 739 IPG_DEBUG_MSG("_get_rxbuff\n");
740 740
741 skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN); 741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) { 742 if (!skb) {
743 sp->rx_buff[entry] = NULL; 743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM; 744 return -ENOMEM;
745 } 745 }
746 746
747 /* Adjust the data start location within the buffer to
748 * align IP address field to a 16 byte boundary.
749 */
750 skb_reserve(skb, NET_IP_ALIGN);
751
752 /* Associate the receive buffer with the IPG NIC. */ 747 /* Associate the receive buffer with the IPG NIC. */
753 skb->dev = dev; 748 skb->dev = dev;
754 749
@@ -1756,7 +1751,7 @@ static int ipg_nic_open(struct net_device *dev)
1756 /* Register the interrupt line to be used by the IPG within 1751 /* Register the interrupt line to be used by the IPG within
1757 * the Linux system. 1752 * the Linux system.
1758 */ 1753 */
1759 rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, 1754 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1760 dev->name, dev); 1755 dev->name, dev);
1761 if (rc < 0) { 1756 if (rc < 0) {
1762 printk(KERN_INFO "%s: Error when requesting interrupt.\n", 1757 printk(KERN_INFO "%s: Error when requesting interrupt.\n",
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index eb424681202d..9b2eebdbb25b 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -353,13 +353,13 @@ static int au1k_irda_start(struct net_device *dev)
353 return retval; 353 return retval;
354 } 354 }
355 355
356 if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt, 356 if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt,
357 0, dev->name, dev))) { 357 0, dev->name, dev))) {
358 printk(KERN_ERR "%s: unable to get IRQ %d\n", 358 printk(KERN_ERR "%s: unable to get IRQ %d\n",
359 dev->name, dev->irq); 359 dev->name, dev->irq);
360 return retval; 360 return retval;
361 } 361 }
362 if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt, 362 if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt,
363 0, dev->name, dev))) { 363 0, dev->name, dev))) {
364 free_irq(AU1000_IRDA_TX_INT, dev); 364 free_irq(AU1000_IRDA_TX_INT, dev);
365 printk(KERN_ERR "%s: unable to get IRQ %d\n", 365 printk(KERN_ERR "%s: unable to get IRQ %d\n",
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 215adf6377d0..ae6eab3e5eed 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -852,7 +852,7 @@ static void irda_usb_receive(struct urb *urb)
852 * hot unplug of the dongle... 852 * hot unplug of the dongle...
853 * Lowest effective timer is 10ms... 853 * Lowest effective timer is 10ms...
854 * Jean II */ 854 * Jean II */
855 self->rx_defer_timer.function = &irda_usb_rx_defer_expired; 855 self->rx_defer_timer.function = irda_usb_rx_defer_expired;
856 self->rx_defer_timer.data = (unsigned long) urb; 856 self->rx_defer_timer.data = (unsigned long) urb;
857 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000)); 857 mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
858 return; 858 return;
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
index 9706e64e367b..0e71e2a93160 100644
--- a/drivers/net/isa-skeleton.c
+++ b/drivers/net/isa-skeleton.c
@@ -260,7 +260,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr)
260 dev->irq = 9; 260 dev->irq = 9;
261 261
262 { 262 {
263 int irqval = request_irq(dev->irq, &net_interrupt, 0, cardname, dev); 263 int irqval = request_irq(dev->irq, net_interrupt, 0, cardname, dev);
264 if (irqval) { 264 if (irqval) {
265 printk("%s: unable to get IRQ %d (irqval=%d).\n", 265 printk("%s: unable to get IRQ %d (irqval=%d).\n",
266 dev->name, dev->irq, irqval); 266 dev->name, dev->irq, irqval);
@@ -378,7 +378,7 @@ net_open(struct net_device *dev)
378 * This is used if the interrupt line can turned off (shared). 378 * This is used if the interrupt line can turned off (shared).
379 * See 3c503.c for an example of selecting the IRQ at config-time. 379 * See 3c503.c for an example of selecting the IRQ at config-time.
380 */ 380 */
381 if (request_irq(dev->irq, &net_interrupt, 0, cardname, dev)) { 381 if (request_irq(dev->irq, net_interrupt, 0, cardname, dev)) {
382 return -EAGAIN; 382 return -EAGAIN;
383 } 383 }
384 /* 384 /*
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index d85717e3022a..e95d9b6f1f2d 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -183,7 +183,6 @@ struct ixgb_adapter {
183 struct napi_struct napi; 183 struct napi_struct napi;
184 struct net_device *netdev; 184 struct net_device *netdev;
185 struct pci_dev *pdev; 185 struct pci_dev *pdev;
186 struct net_device_stats net_stats;
187 186
188 /* structs defined in ixgb_hw.h */ 187 /* structs defined in ixgb_hw.h */
189 struct ixgb_hw hw; 188 struct ixgb_hw hw;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 288ee1d0f431..a4ed96caae69 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -34,38 +34,46 @@
34 34
35#define IXGB_ALL_RAR_ENTRIES 16 35#define IXGB_ALL_RAR_ENTRIES 16
36 36
37enum {NETDEV_STATS, IXGB_STATS};
38
37struct ixgb_stats { 39struct ixgb_stats {
38 char stat_string[ETH_GSTRING_LEN]; 40 char stat_string[ETH_GSTRING_LEN];
41 int type;
39 int sizeof_stat; 42 int sizeof_stat;
40 int stat_offset; 43 int stat_offset;
41}; 44};
42 45
43#define IXGB_STAT(m) FIELD_SIZEOF(struct ixgb_adapter, m), \ 46#define IXGB_STAT(m) IXGB_STATS, \
44 offsetof(struct ixgb_adapter, m) 47 FIELD_SIZEOF(struct ixgb_adapter, m), \
48 offsetof(struct ixgb_adapter, m)
49#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
50 FIELD_SIZEOF(struct net_device, m), \
51 offsetof(struct net_device, m)
52
45static struct ixgb_stats ixgb_gstrings_stats[] = { 53static struct ixgb_stats ixgb_gstrings_stats[] = {
46 {"rx_packets", IXGB_STAT(net_stats.rx_packets)}, 54 {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
47 {"tx_packets", IXGB_STAT(net_stats.tx_packets)}, 55 {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
48 {"rx_bytes", IXGB_STAT(net_stats.rx_bytes)}, 56 {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
49 {"tx_bytes", IXGB_STAT(net_stats.tx_bytes)}, 57 {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
50 {"rx_errors", IXGB_STAT(net_stats.rx_errors)}, 58 {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
51 {"tx_errors", IXGB_STAT(net_stats.tx_errors)}, 59 {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
52 {"rx_dropped", IXGB_STAT(net_stats.rx_dropped)}, 60 {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
53 {"tx_dropped", IXGB_STAT(net_stats.tx_dropped)}, 61 {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
54 {"multicast", IXGB_STAT(net_stats.multicast)}, 62 {"multicast", IXGB_NETDEV_STAT(stats.multicast)},
55 {"collisions", IXGB_STAT(net_stats.collisions)}, 63 {"collisions", IXGB_NETDEV_STAT(stats.collisions)},
56 64
57/* { "rx_length_errors", IXGB_STAT(net_stats.rx_length_errors) }, */ 65/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */
58 {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)}, 66 {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
59 {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)}, 67 {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
60 {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)}, 68 {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
61 {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, 69 {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
62 {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)}, 70 {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
63 {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)}, 71 {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
64 {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)}, 72 {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
65 {"tx_carrier_errors", IXGB_STAT(net_stats.tx_carrier_errors)}, 73 {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
66 {"tx_fifo_errors", IXGB_STAT(net_stats.tx_fifo_errors)}, 74 {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
67 {"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)}, 75 {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
68 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 76 {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
69 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 77 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
70 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 78 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
71 {"tx_restart_queue", IXGB_STAT(restart_queue) }, 79 {"tx_restart_queue", IXGB_STAT(restart_queue) },
@@ -662,10 +670,21 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
662{ 670{
663 struct ixgb_adapter *adapter = netdev_priv(netdev); 671 struct ixgb_adapter *adapter = netdev_priv(netdev);
664 int i; 672 int i;
673 char *p = NULL;
665 674
666 ixgb_update_stats(adapter); 675 ixgb_update_stats(adapter);
667 for (i = 0; i < IXGB_STATS_LEN; i++) { 676 for (i = 0; i < IXGB_STATS_LEN; i++) {
668 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; 677 switch (ixgb_gstrings_stats[i].type) {
678 case NETDEV_STATS:
679 p = (char *) netdev +
680 ixgb_gstrings_stats[i].stat_offset;
681 break;
682 case IXGB_STATS:
683 p = (char *) adapter +
684 ixgb_gstrings_stats[i].stat_offset;
685 break;
686 }
687
669 data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 688 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
670 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 689 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
671 } 690 }
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 8aa44dca57eb..73646062e8dd 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -233,7 +233,7 @@ ixgb_up(struct ixgb_adapter *adapter)
233 /* proceed to try to request regular interrupt */ 233 /* proceed to try to request regular interrupt */
234 } 234 }
235 235
236 err = request_irq(adapter->pdev->irq, &ixgb_intr, irq_flags, 236 err = request_irq(adapter->pdev->irq, ixgb_intr, irq_flags,
237 netdev->name, netdev); 237 netdev->name, netdev);
238 if (err) { 238 if (err) {
239 if (adapter->have_msi) 239 if (adapter->have_msi)
@@ -1537,9 +1537,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
1537static struct net_device_stats * 1537static struct net_device_stats *
1538ixgb_get_stats(struct net_device *netdev) 1538ixgb_get_stats(struct net_device *netdev)
1539{ 1539{
1540 struct ixgb_adapter *adapter = netdev_priv(netdev); 1540 return &netdev->stats;
1541
1542 return &adapter->net_stats;
1543} 1541}
1544 1542
1545/** 1543/**
@@ -1676,16 +1674,16 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1676 1674
1677 /* Fill out the OS statistics structure */ 1675 /* Fill out the OS statistics structure */
1678 1676
1679 adapter->net_stats.rx_packets = adapter->stats.gprcl; 1677 netdev->stats.rx_packets = adapter->stats.gprcl;
1680 adapter->net_stats.tx_packets = adapter->stats.gptcl; 1678 netdev->stats.tx_packets = adapter->stats.gptcl;
1681 adapter->net_stats.rx_bytes = adapter->stats.gorcl; 1679 netdev->stats.rx_bytes = adapter->stats.gorcl;
1682 adapter->net_stats.tx_bytes = adapter->stats.gotcl; 1680 netdev->stats.tx_bytes = adapter->stats.gotcl;
1683 adapter->net_stats.multicast = adapter->stats.mprcl; 1681 netdev->stats.multicast = adapter->stats.mprcl;
1684 adapter->net_stats.collisions = 0; 1682 netdev->stats.collisions = 0;
1685 1683
1686 /* ignore RLEC as it reports errors for padded (<64bytes) frames 1684 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1687 * with a length in the type/len field */ 1685 * with a length in the type/len field */
1688 adapter->net_stats.rx_errors = 1686 netdev->stats.rx_errors =
1689 /* adapter->stats.rnbc + */ adapter->stats.crcerrs + 1687 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1690 adapter->stats.ruc + 1688 adapter->stats.ruc +
1691 adapter->stats.roc /*+ adapter->stats.rlec */ + 1689 adapter->stats.roc /*+ adapter->stats.rlec */ +
@@ -1693,21 +1691,21 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1693 adapter->stats.ecbc + adapter->stats.mpc; 1691 adapter->stats.ecbc + adapter->stats.mpc;
1694 1692
1695 /* see above 1693 /* see above
1696 * adapter->net_stats.rx_length_errors = adapter->stats.rlec; 1694 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1697 */ 1695 */
1698 1696
1699 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 1697 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1700 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; 1698 netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1701 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 1699 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1702 adapter->net_stats.rx_over_errors = adapter->stats.mpc; 1700 netdev->stats.rx_over_errors = adapter->stats.mpc;
1703 1701
1704 adapter->net_stats.tx_errors = 0; 1702 netdev->stats.tx_errors = 0;
1705 adapter->net_stats.rx_frame_errors = 0; 1703 netdev->stats.rx_frame_errors = 0;
1706 adapter->net_stats.tx_aborted_errors = 0; 1704 netdev->stats.tx_aborted_errors = 0;
1707 adapter->net_stats.tx_carrier_errors = 0; 1705 netdev->stats.tx_carrier_errors = 0;
1708 adapter->net_stats.tx_fifo_errors = 0; 1706 netdev->stats.tx_fifo_errors = 0;
1709 adapter->net_stats.tx_heartbeat_errors = 0; 1707 netdev->stats.tx_heartbeat_errors = 0;
1710 adapter->net_stats.tx_window_errors = 0; 1708 netdev->stats.tx_window_errors = 0;
1711} 1709}
1712 1710
1713#define IXGB_MAX_INTR 10 1711#define IXGB_MAX_INTR 10
@@ -1974,9 +1972,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1974 * of reassembly being done in the stack */ 1972 * of reassembly being done in the stack */
1975 if (length < copybreak) { 1973 if (length < copybreak) {
1976 struct sk_buff *new_skb = 1974 struct sk_buff *new_skb =
1977 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 1975 netdev_alloc_skb_ip_align(netdev, length);
1978 if (new_skb) { 1976 if (new_skb) {
1979 skb_reserve(new_skb, NET_IP_ALIGN);
1980 skb_copy_to_linear_data_offset(new_skb, 1977 skb_copy_to_linear_data_offset(new_skb,
1981 -NET_IP_ALIGN, 1978 -NET_IP_ALIGN,
1982 (skb->data - 1979 (skb->data -
@@ -2059,20 +2056,13 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2059 goto map_skb; 2056 goto map_skb;
2060 } 2057 }
2061 2058
2062 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len 2059 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2063 + NET_IP_ALIGN);
2064 if (unlikely(!skb)) { 2060 if (unlikely(!skb)) {
2065 /* Better luck next round */ 2061 /* Better luck next round */
2066 adapter->alloc_rx_buff_failed++; 2062 adapter->alloc_rx_buff_failed++;
2067 break; 2063 break;
2068 } 2064 }
2069 2065
2070 /* Make buffer alignment 2 beyond a 16 byte boundary
2071 * this will result in a 16 byte aligned IP header after
2072 * the 14 byte MAC header is removed
2073 */
2074 skb_reserve(skb, NET_IP_ALIGN);
2075
2076 buffer_info->skb = skb; 2066 buffer_info->skb = skb;
2077 buffer_info->length = adapter->rx_buffer_len; 2067 buffer_info->length = adapter->rx_buffer_len;
2078map_skb: 2068map_skb:
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 385be6016667..76b052fa3643 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -159,6 +159,7 @@ struct ixgbe_ring {
159 struct ixgbe_queue_stats stats; 159 struct ixgbe_queue_stats stats;
160 unsigned long reinit_state; 160 unsigned long reinit_state;
161 u64 rsc_count; /* stat for coalesced packets */ 161 u64 rsc_count; /* stat for coalesced packets */
162 u64 rsc_flush; /* stats for flushed packets */
162 163
163 unsigned int size; /* length in bytes */ 164 unsigned int size; /* length in bytes */
164 dma_addr_t dma; /* phys. address of descriptor ring */ 165 dma_addr_t dma; /* phys. address of descriptor ring */
@@ -340,7 +341,6 @@ struct ixgbe_adapter {
340 /* OS defined structs */ 341 /* OS defined structs */
341 struct net_device *netdev; 342 struct net_device *netdev;
342 struct pci_dev *pdev; 343 struct pci_dev *pdev;
343 struct net_device_stats net_stats;
344 344
345 u32 test_icr; 345 u32 test_icr;
346 struct ixgbe_ring test_tx_ring; 346 struct ixgbe_ring test_tx_ring;
@@ -376,7 +376,8 @@ struct ixgbe_adapter {
376#ifdef IXGBE_FCOE 376#ifdef IXGBE_FCOE
377 struct ixgbe_fcoe fcoe; 377 struct ixgbe_fcoe fcoe;
378#endif /* IXGBE_FCOE */ 378#endif /* IXGBE_FCOE */
379 u64 rsc_count; 379 u64 rsc_total_count;
380 u64 rsc_total_flush;
380 u32 wol; 381 u32 wol;
381 u16 eeprom_version; 382 u16 eeprom_version;
382}; 383};
@@ -397,7 +398,7 @@ enum ixgbe_boards {
397extern struct ixgbe_info ixgbe_82598_info; 398extern struct ixgbe_info ixgbe_82598_info;
398extern struct ixgbe_info ixgbe_82599_info; 399extern struct ixgbe_info ixgbe_82599_info;
399#ifdef CONFIG_IXGBE_DCB 400#ifdef CONFIG_IXGBE_DCB
400extern struct dcbnl_rtnl_ops dcbnl_ops; 401extern const struct dcbnl_rtnl_ops dcbnl_ops;
401extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 402extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
402 struct ixgbe_dcb_config *dst_dcb_cfg, 403 struct ixgbe_dcb_config *dst_dcb_cfg,
403 int tc_max); 404 int tc_max);
@@ -458,6 +459,7 @@ extern int ixgbe_fcoe_disable(struct net_device *netdev);
458extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); 459extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
459extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); 460extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
460#endif /* CONFIG_IXGBE_DCB */ 461#endif /* CONFIG_IXGBE_DCB */
462extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
461#endif /* IXGBE_FCOE */ 463#endif /* IXGBE_FCOE */
462 464
463#endif /* _IXGBE_H_ */ 465#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 34b04924c8a1..72106898a5cb 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -42,6 +42,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed, 42 ixgbe_link_speed speed,
43 bool autoneg, 43 bool autoneg,
44 bool autoneg_wait_to_complete); 44 bool autoneg_wait_to_complete);
45static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
46 ixgbe_link_speed speed,
47 bool autoneg,
48 bool autoneg_wait_to_complete);
45s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 49s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
46 bool autoneg_wait_to_complete); 50 bool autoneg_wait_to_complete);
47s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 51s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -64,7 +68,13 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
64 /* Set up dual speed SFP+ support */ 68 /* Set up dual speed SFP+ support */
65 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 69 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
66 } else { 70 } else {
67 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 71 if ((mac->ops.get_media_type(hw) ==
72 ixgbe_media_type_backplane) &&
73 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
74 hw->phy.smart_speed == ixgbe_smart_speed_on))
75 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
76 else
77 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
68 } 78 }
69} 79}
70 80
@@ -337,6 +347,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
337 media_type = ixgbe_media_type_backplane; 347 media_type = ixgbe_media_type_backplane;
338 break; 348 break;
339 case IXGBE_DEV_ID_82599_SFP: 349 case IXGBE_DEV_ID_82599_SFP:
350 case IXGBE_DEV_ID_82599_SFP_EM:
340 media_type = ixgbe_media_type_fiber; 351 media_type = ixgbe_media_type_fiber;
341 break; 352 break;
342 case IXGBE_DEV_ID_82599_CX4: 353 case IXGBE_DEV_ID_82599_CX4:
@@ -479,7 +490,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
479 hw->mac.autotry_restart = false; 490 hw->mac.autotry_restart = false;
480 } 491 }
481 492
482 /* The controller may take up to 500ms at 10g to acquire link */ 493 /*
494 * Wait for the controller to acquire link. Per IEEE 802.3ap,
495 * Section 73.10.2, we may have to wait up to 500ms if KR is
496 * attempted. 82599 uses the same timing for 10g SFI.
497 */
498
483 for (i = 0; i < 5; i++) { 499 for (i = 0; i < 5; i++) {
484 /* Wait for the link partner to also set speed */ 500 /* Wait for the link partner to also set speed */
485 msleep(100); 501 msleep(100);
@@ -567,6 +583,111 @@ out:
567} 583}
568 584
569/** 585/**
586 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
587 * @hw: pointer to hardware structure
588 * @speed: new link speed
589 * @autoneg: true if autonegotiation enabled
590 * @autoneg_wait_to_complete: true when waiting for completion is needed
591 *
592 * Implements the Intel SmartSpeed algorithm.
593 **/
594static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
595 ixgbe_link_speed speed, bool autoneg,
596 bool autoneg_wait_to_complete)
597{
598 s32 status = 0;
599 ixgbe_link_speed link_speed;
600 s32 i, j;
601 bool link_up = false;
602 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
603
604 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
605
606 /* Set autoneg_advertised value based on input link speed */
607 hw->phy.autoneg_advertised = 0;
608
609 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
610 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
611
612 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
613 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
614
615 if (speed & IXGBE_LINK_SPEED_100_FULL)
616 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
617
618 /*
619 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
620 * autoneg advertisement if link is unable to be established at the
621 * highest negotiated rate. This can sometimes happen due to integrity
622 * issues with the physical media connection.
623 */
624
625 /* First, try to get link with full advertisement */
626 hw->phy.smart_speed_active = false;
627 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
628 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
629 autoneg_wait_to_complete);
630 if (status)
631 goto out;
632
633 /*
634 * Wait for the controller to acquire link. Per IEEE 802.3ap,
635 * Section 73.10.2, we may have to wait up to 500ms if KR is
636 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
637 * Table 9 in the AN MAS.
638 */
639 for (i = 0; i < 5; i++) {
640 mdelay(100);
641
642 /* If we have link, just jump out */
643 hw->mac.ops.check_link(hw, &link_speed,
644 &link_up, false);
645 if (link_up)
646 goto out;
647 }
648 }
649
650 /*
651 * We didn't get link. If we advertised KR plus one of KX4/KX
652 * (or BX4/BX), then disable KR and try again.
653 */
654 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
655 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
656 goto out;
657
658 /* Turn SmartSpeed on to disable KR support */
659 hw->phy.smart_speed_active = true;
660 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
661 autoneg_wait_to_complete);
662 if (status)
663 goto out;
664
665 /*
666 * Wait for the controller to acquire link. 600ms will allow for
667 * the AN link_fail_inhibit_timer as well for multiple cycles of
668 * parallel detect, both 10g and 1g. This allows for the maximum
669 * connect attempts as defined in the AN MAS table 73-7.
670 */
671 for (i = 0; i < 6; i++) {
672 mdelay(100);
673
674 /* If we have link, just jump out */
675 hw->mac.ops.check_link(hw, &link_speed,
676 &link_up, false);
677 if (link_up)
678 goto out;
679 }
680
681 /* We didn't get link. Turn SmartSpeed back off. */
682 hw->phy.smart_speed_active = false;
683 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
684 autoneg_wait_to_complete);
685
686out:
687 return status;
688}
689
690/**
570 * ixgbe_check_mac_link_82599 - Determine link and speed status 691 * ixgbe_check_mac_link_82599 - Determine link and speed status
571 * @hw: pointer to hardware structure 692 * @hw: pointer to hardware structure
572 * @speed: pointer to link speed 693 * @speed: pointer to link speed
@@ -669,7 +790,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
669 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 790 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
670 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 791 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
671 autoc |= IXGBE_AUTOC_KX4_SUPP; 792 autoc |= IXGBE_AUTOC_KX4_SUPP;
672 if (orig_autoc & IXGBE_AUTOC_KR_SUPP) 793 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
794 (hw->phy.smart_speed_active == false))
673 autoc |= IXGBE_AUTOC_KR_SUPP; 795 autoc |= IXGBE_AUTOC_KR_SUPP;
674 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 796 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
675 autoc |= IXGBE_AUTOC_KX_SUPP; 797 autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -878,6 +1000,10 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
878 hw->mac.num_rar_entries--; 1000 hw->mac.num_rar_entries--;
879 } 1001 }
880 1002
1003 /* Store the alternative WWNN/WWPN prefix */
1004 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1005 &hw->mac.wwpn_prefix);
1006
881reset_hw_out: 1007reset_hw_out:
882 return status; 1008 return status;
883} 1009}
@@ -2414,6 +2540,51 @@ fw_version_out:
2414 return status; 2540 return status;
2415} 2541}
2416 2542
2543/**
2544 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
2545 * the EEPROM
2546 * @hw: pointer to hardware structure
2547 * @wwnn_prefix: the alternative WWNN prefix
2548 * @wwpn_prefix: the alternative WWPN prefix
2549 *
2550 * This function will read the EEPROM from the alternative SAN MAC address
2551 * block to check the support for the alternative WWNN/WWPN prefix support.
2552 **/
2553static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2554 u16 *wwpn_prefix)
2555{
2556 u16 offset, caps;
2557 u16 alt_san_mac_blk_offset;
2558
2559 /* clear output first */
2560 *wwnn_prefix = 0xFFFF;
2561 *wwpn_prefix = 0xFFFF;
2562
2563 /* check if alternative SAN MAC is supported */
2564 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2565 &alt_san_mac_blk_offset);
2566
2567 if ((alt_san_mac_blk_offset == 0) ||
2568 (alt_san_mac_blk_offset == 0xFFFF))
2569 goto wwn_prefix_out;
2570
2571 /* check capability in alternative san mac address block */
2572 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2573 hw->eeprom.ops.read(hw, offset, &caps);
2574 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2575 goto wwn_prefix_out;
2576
2577 /* get the corresponding prefix for WWNN/WWPN */
2578 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2579 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2580
2581 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2582 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2583
2584wwn_prefix_out:
2585 return 0;
2586}
2587
2417static struct ixgbe_mac_operations mac_ops_82599 = { 2588static struct ixgbe_mac_operations mac_ops_82599 = {
2418 .init_hw = &ixgbe_init_hw_generic, 2589 .init_hw = &ixgbe_init_hw_generic,
2419 .reset_hw = &ixgbe_reset_hw_82599, 2590 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2425,6 +2596,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2425 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2596 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2426 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599, 2597 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
2427 .get_device_caps = &ixgbe_get_device_caps_82599, 2598 .get_device_caps = &ixgbe_get_device_caps_82599,
2599 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
2428 .stop_adapter = &ixgbe_stop_adapter_generic, 2600 .stop_adapter = &ixgbe_stop_adapter_generic,
2429 .get_bus_info = &ixgbe_get_bus_info_generic, 2601 .get_bus_info = &ixgbe_get_bus_info_generic,
2430 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2602 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 40ff120a9ad4..688b8ca5da32 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1382,10 +1382,10 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1382 hw->addr_ctrl.overflow_promisc = 0; 1382 hw->addr_ctrl.overflow_promisc = 0;
1383 1383
1384 /* Zero out the other receive addresses */ 1384 /* Zero out the other receive addresses */
1385 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use); 1385 hw_dbg(hw, "Clearing RAR[1-%d]\n", uc_addr_in_use + 1);
1386 for (i = 1; i <= uc_addr_in_use; i++) { 1386 for (i = 0; i < uc_addr_in_use; i++) {
1387 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1387 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
1388 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); 1388 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
1389 } 1389 }
1390 1390
1391 /* Add the new addresses */ 1391 /* Add the new addresses */
@@ -1755,17 +1755,24 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1755 /* 1755 /*
1756 * On backplane, bail out if 1756 * On backplane, bail out if
1757 * - backplane autoneg was not completed, or if 1757 * - backplane autoneg was not completed, or if
1758 * - link partner is not AN enabled 1758 * - we are 82599 and link partner is not AN enabled
1759 */ 1759 */
1760 if (hw->phy.media_type == ixgbe_media_type_backplane) { 1760 if (hw->phy.media_type == ixgbe_media_type_backplane) {
1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 1761 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
1762 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 1762 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
1763 if (((links & IXGBE_LINKS_KX_AN_COMP) == 0) ||
1764 ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)) {
1765 hw->fc.fc_was_autonegged = false; 1763 hw->fc.fc_was_autonegged = false;
1766 hw->fc.current_mode = hw->fc.requested_mode; 1764 hw->fc.current_mode = hw->fc.requested_mode;
1767 goto out; 1765 goto out;
1768 } 1766 }
1767
1768 if (hw->mac.type == ixgbe_mac_82599EB) {
1769 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
1770 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
1771 hw->fc.fc_was_autonegged = false;
1772 hw->fc.current_mode = hw->fc.requested_mode;
1773 goto out;
1774 }
1775 }
1769 } 1776 }
1770 1777
1771 /* 1778 /*
@@ -1784,6 +1791,20 @@ s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
1784 } 1791 }
1785 1792
1786 /* 1793 /*
1794 * Bail out on
1795 * - copper or CX4 adapters
1796 * - fiber adapters running at 10gig
1797 */
1798 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
1799 (hw->phy.media_type == ixgbe_media_type_cx4) ||
1800 ((hw->phy.media_type == ixgbe_media_type_fiber) &&
1801 (speed == IXGBE_LINK_SPEED_10GB_FULL))) {
1802 hw->fc.fc_was_autonegged = false;
1803 hw->fc.current_mode = hw->fc.requested_mode;
1804 goto out;
1805 }
1806
1807 /*
1787 * Read the AN advertisement and LP ability registers and resolve 1808 * Read the AN advertisement and LP ability registers and resolve
1788 * local flow control settings accordingly 1809 * local flow control settings accordingly
1789 */ 1810 */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index a6bc1ef28f92..3c7a79a7d7c6 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -563,7 +563,7 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
563 return rval; 563 return rval;
564} 564}
565 565
566struct dcbnl_rtnl_ops dcbnl_ops = { 566const struct dcbnl_rtnl_ops dcbnl_ops = {
567 .getstate = ixgbe_dcbnl_get_state, 567 .getstate = ixgbe_dcbnl_get_state,
568 .setstate = ixgbe_dcbnl_set_state, 568 .setstate = ixgbe_dcbnl_set_state,
569 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 569 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 856c18c207f3..1928d559e65f 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -40,19 +40,27 @@
40 40
41#define IXGBE_ALL_RAR_ENTRIES 16 41#define IXGBE_ALL_RAR_ENTRIES 16
42 42
43enum {NETDEV_STATS, IXGBE_STATS};
44
43struct ixgbe_stats { 45struct ixgbe_stats {
44 char stat_string[ETH_GSTRING_LEN]; 46 char stat_string[ETH_GSTRING_LEN];
47 int type;
45 int sizeof_stat; 48 int sizeof_stat;
46 int stat_offset; 49 int stat_offset;
47}; 50};
48 51
49#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ 52#define IXGBE_STAT(m) IXGBE_STATS, \
50 offsetof(struct ixgbe_adapter, m) 53 sizeof(((struct ixgbe_adapter *)0)->m), \
54 offsetof(struct ixgbe_adapter, m)
55#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
56 sizeof(((struct net_device *)0)->m), \
57 offsetof(struct net_device, m)
58
51static struct ixgbe_stats ixgbe_gstrings_stats[] = { 59static struct ixgbe_stats ixgbe_gstrings_stats[] = {
52 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, 60 {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 61 {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 62 {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 63 {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 64 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 65 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 66 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -60,26 +68,27 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
60 {"lsc_int", IXGBE_STAT(lsc_int)}, 68 {"lsc_int", IXGBE_STAT(lsc_int)},
61 {"tx_busy", IXGBE_STAT(tx_busy)}, 69 {"tx_busy", IXGBE_STAT(tx_busy)},
62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 70 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
63 {"rx_errors", IXGBE_STAT(net_stats.rx_errors)}, 71 {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
64 {"tx_errors", IXGBE_STAT(net_stats.tx_errors)}, 72 {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
65 {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)}, 73 {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
66 {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)}, 74 {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
67 {"multicast", IXGBE_STAT(net_stats.multicast)}, 75 {"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
68 {"broadcast", IXGBE_STAT(stats.bprc)}, 76 {"broadcast", IXGBE_STAT(stats.bprc)},
69 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 77 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
70 {"collisions", IXGBE_STAT(net_stats.collisions)}, 78 {"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
71 {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)}, 79 {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
72 {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, 80 {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
73 {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, 81 {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
74 {"hw_rsc_count", IXGBE_STAT(rsc_count)}, 82 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
83 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
75 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 84 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
76 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 85 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
77 {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, 86 {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
78 {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, 87 {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
79 {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, 88 {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
80 {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)}, 89 {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
81 {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)}, 90 {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
82 {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)}, 91 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
83 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 92 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
84 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 93 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
85 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 94 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
@@ -196,6 +205,56 @@ static int ixgbe_get_settings(struct net_device *netdev,
196 ecmd->autoneg = AUTONEG_DISABLE; 205 ecmd->autoneg = AUTONEG_DISABLE;
197 } 206 }
198 207
208 /* Get PHY type */
209 switch (adapter->hw.phy.type) {
210 case ixgbe_phy_tn:
211 case ixgbe_phy_cu_unknown:
212 /* Copper 10G-BASET */
213 ecmd->port = PORT_TP;
214 break;
215 case ixgbe_phy_qt:
216 ecmd->port = PORT_FIBRE;
217 break;
218 case ixgbe_phy_nl:
219 case ixgbe_phy_tw_tyco:
220 case ixgbe_phy_tw_unknown:
221 case ixgbe_phy_sfp_ftl:
222 case ixgbe_phy_sfp_avago:
223 case ixgbe_phy_sfp_intel:
224 case ixgbe_phy_sfp_unknown:
225 switch (adapter->hw.phy.sfp_type) {
226 /* SFP+ devices, further checking needed */
227 case ixgbe_sfp_type_da_cu:
228 case ixgbe_sfp_type_da_cu_core0:
229 case ixgbe_sfp_type_da_cu_core1:
230 ecmd->port = PORT_DA;
231 break;
232 case ixgbe_sfp_type_sr:
233 case ixgbe_sfp_type_lr:
234 case ixgbe_sfp_type_srlr_core0:
235 case ixgbe_sfp_type_srlr_core1:
236 ecmd->port = PORT_FIBRE;
237 break;
238 case ixgbe_sfp_type_not_present:
239 ecmd->port = PORT_NONE;
240 break;
241 case ixgbe_sfp_type_unknown:
242 default:
243 ecmd->port = PORT_OTHER;
244 break;
245 }
246 break;
247 case ixgbe_phy_xaui:
248 ecmd->port = PORT_NONE;
249 break;
250 case ixgbe_phy_unknown:
251 case ixgbe_phy_generic:
252 case ixgbe_phy_sfp_unsupported:
253 default:
254 ecmd->port = PORT_OTHER;
255 break;
256 }
257
199 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 258 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
200 if (link_up) { 259 if (link_up) {
201 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 260 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
@@ -933,10 +992,21 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
933 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); 992 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
934 int j, k; 993 int j, k;
935 int i; 994 int i;
995 char *p = NULL;
936 996
937 ixgbe_update_stats(adapter); 997 ixgbe_update_stats(adapter);
938 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 998 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
939 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; 999 switch (ixgbe_gstrings_stats[i].type) {
1000 case NETDEV_STATS:
1001 p = (char *) netdev +
1002 ixgbe_gstrings_stats[i].stat_offset;
1003 break;
1004 case IXGBE_STATS:
1005 p = (char *) adapter +
1006 ixgbe_gstrings_stats[i].stat_offset;
1007 break;
1008 }
1009
940 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 1010 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
941 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1011 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
942 } 1012 }
@@ -1255,15 +1325,15 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1255 return 0; 1325 return 0;
1256 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1326 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1257 shared_int = false; 1327 shared_int = false;
1258 if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, 1328 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1259 netdev)) { 1329 netdev)) {
1260 *data = 1; 1330 *data = 1;
1261 return -1; 1331 return -1;
1262 } 1332 }
1263 } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, 1333 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1264 netdev->name, netdev)) { 1334 netdev->name, netdev)) {
1265 shared_int = false; 1335 shared_int = false;
1266 } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, 1336 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1267 netdev->name, netdev)) { 1337 netdev->name, netdev)) {
1268 *data = 1; 1338 *data = 1;
1269 return -1; 1339 return -1;
@@ -1952,6 +2022,10 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
1952 break; 2022 break;
1953 } 2023 }
1954 2024
2025 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2026 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
2027 return 0;
2028
1955 /* only valid if in constant ITR mode */ 2029 /* only valid if in constant ITR mode */
1956 switch (adapter->tx_itr_setting) { 2030 switch (adapter->tx_itr_setting) {
1957 case 0: 2031 case 0:
@@ -1977,12 +2051,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
1977 struct ixgbe_q_vector *q_vector; 2051 struct ixgbe_q_vector *q_vector;
1978 int i; 2052 int i;
1979 2053
1980 /* 2054 /* don't accept tx specific changes if we've got mixed RxTx vectors */
1981 * don't accept tx specific changes if we've got mixed RxTx vectors 2055 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
1982 * test and jump out here if needed before changing the rx numbers 2056 && ec->tx_coalesce_usecs)
1983 */
1984 if ((1000000/ec->tx_coalesce_usecs) != adapter->tx_eitr_param &&
1985 adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
1986 return -EINVAL; 2057 return -EINVAL;
1987 2058
1988 if (ec->tx_max_coalesced_frames_irq) 2059 if (ec->tx_max_coalesced_frames_irq)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99515e2..edecdc853c14 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -718,3 +718,49 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
718 return 1; 718 return 1;
719} 719}
720#endif /* CONFIG_IXGBE_DCB */ 720#endif /* CONFIG_IXGBE_DCB */
721
722/**
723 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
724 * @netdev : ixgbe adapter
725 * @wwn : the world wide name
726 * @type: the type of world wide name
727 *
728 * Returns the node or port world wide name if both the prefix and the san
729 * mac address are valid, then the wwn is formed based on the NAA-2 for
730 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
731 *
732 * Returns : 0 on success
733 */
734int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
735{
736 int rc = -EINVAL;
737 u16 prefix = 0xffff;
738 struct ixgbe_adapter *adapter = netdev_priv(netdev);
739 struct ixgbe_mac_info *mac = &adapter->hw.mac;
740
741 switch (type) {
742 case NETDEV_FCOE_WWNN:
743 prefix = mac->wwnn_prefix;
744 break;
745 case NETDEV_FCOE_WWPN:
746 prefix = mac->wwpn_prefix;
747 break;
748 default:
749 break;
750 }
751
752 if ((prefix != 0xffff) &&
753 is_valid_ether_addr(mac->san_addr)) {
754 *wwn = ((u64) prefix << 48) |
755 ((u64) mac->san_addr[0] << 40) |
756 ((u64) mac->san_addr[1] << 32) |
757 ((u64) mac->san_addr[2] << 24) |
758 ((u64) mac->san_addr[3] << 16) |
759 ((u64) mac->san_addr[4] << 8) |
760 ((u64) mac->san_addr[5]);
761 rc = 0;
762 }
763 return rc;
764}
765
766
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a456578b8578..5c56f2a741f3 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -98,6 +98,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
98 board_82599 }, 98 board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
100 board_82599 }, 100 board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
102 board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
102 board_82599 }, 104 board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
@@ -423,8 +425,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
423 tx_ring->total_packets += total_packets; 425 tx_ring->total_packets += total_packets;
424 tx_ring->stats.packets += total_packets; 426 tx_ring->stats.packets += total_packets;
425 tx_ring->stats.bytes += total_bytes; 427 tx_ring->stats.bytes += total_bytes;
426 adapter->net_stats.tx_bytes += total_bytes; 428 netdev->stats.tx_bytes += total_bytes;
427 adapter->net_stats.tx_packets += total_packets; 429 netdev->stats.tx_packets += total_packets;
428 return (count < tx_ring->work_limit); 430 return (count < tx_ring->work_limit);
429} 431}
430 432
@@ -669,22 +671,14 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
669 671
670 if (!bi->skb) { 672 if (!bi->skb) {
671 struct sk_buff *skb; 673 struct sk_buff *skb;
672 skb = netdev_alloc_skb(adapter->netdev, 674 skb = netdev_alloc_skb_ip_align(adapter->netdev,
673 (rx_ring->rx_buf_len + 675 rx_ring->rx_buf_len);
674 NET_IP_ALIGN));
675 676
676 if (!skb) { 677 if (!skb) {
677 adapter->alloc_rx_buff_failed++; 678 adapter->alloc_rx_buff_failed++;
678 goto no_buffers; 679 goto no_buffers;
679 } 680 }
680 681
681 /*
682 * Make buffer alignment 2 beyond a 16 byte boundary
683 * this will result in a 16 byte aligned IP header after
684 * the 14 byte MAC header is removed
685 */
686 skb_reserve(skb, NET_IP_ALIGN);
687
688 bi->skb = skb; 682 bi->skb = skb;
689 bi->dma = pci_map_single(pdev, skb->data, 683 bi->dma = pci_map_single(pdev, skb->data,
690 rx_ring->rx_buf_len, 684 rx_ring->rx_buf_len,
@@ -735,12 +729,14 @@ static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
735/** 729/**
736 * ixgbe_transform_rsc_queue - change rsc queue into a full packet 730 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
737 * @skb: pointer to the last skb in the rsc queue 731 * @skb: pointer to the last skb in the rsc queue
732 * @count: pointer to number of packets coalesced in this context
738 * 733 *
739 * This function changes a queue full of hw rsc buffers into a completed 734 * This function changes a queue full of hw rsc buffers into a completed
740 * packet. It uses the ->prev pointers to find the first packet and then 735 * packet. It uses the ->prev pointers to find the first packet and then
741 * turns it into the frag list owner. 736 * turns it into the frag list owner.
742 **/ 737 **/
743static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb) 738static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
739 u64 *count)
744{ 740{
745 unsigned int frag_list_size = 0; 741 unsigned int frag_list_size = 0;
746 742
@@ -749,6 +745,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
749 frag_list_size += skb->len; 745 frag_list_size += skb->len;
750 skb->prev = NULL; 746 skb->prev = NULL;
751 skb = prev; 747 skb = prev;
748 *count += 1;
752 } 749 }
753 750
754 skb_shinfo(skb)->frag_list = skb->next; 751 skb_shinfo(skb)->frag_list = skb->next;
@@ -764,6 +761,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
764 int *work_done, int work_to_do) 761 int *work_done, int work_to_do)
765{ 762{
766 struct ixgbe_adapter *adapter = q_vector->adapter; 763 struct ixgbe_adapter *adapter = q_vector->adapter;
764 struct net_device *netdev = adapter->netdev;
767 struct pci_dev *pdev = adapter->pdev; 765 struct pci_dev *pdev = adapter->pdev;
768 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 766 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
769 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 767 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -850,14 +848,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
850 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 848 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
851 IXGBE_RXDADV_NEXTP_SHIFT; 849 IXGBE_RXDADV_NEXTP_SHIFT;
852 next_buffer = &rx_ring->rx_buffer_info[nextp]; 850 next_buffer = &rx_ring->rx_buffer_info[nextp];
853 rx_ring->rsc_count += (rsc_count - 1);
854 } else { 851 } else {
855 next_buffer = &rx_ring->rx_buffer_info[i]; 852 next_buffer = &rx_ring->rx_buffer_info[i];
856 } 853 }
857 854
858 if (staterr & IXGBE_RXD_STAT_EOP) { 855 if (staterr & IXGBE_RXD_STAT_EOP) {
859 if (skb->prev) 856 if (skb->prev)
860 skb = ixgbe_transform_rsc_queue(skb); 857 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
858 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
859 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
860 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
861 else
862 rx_ring->rsc_count++;
863 rx_ring->rsc_flush++;
864 }
861 rx_ring->stats.packets++; 865 rx_ring->stats.packets++;
862 rx_ring->stats.bytes += skb->len; 866 rx_ring->stats.bytes += skb->len;
863 } else { 867 } else {
@@ -935,8 +939,8 @@ next_desc:
935 939
936 rx_ring->total_packets += total_rx_packets; 940 rx_ring->total_packets += total_rx_packets;
937 rx_ring->total_bytes += total_rx_bytes; 941 rx_ring->total_bytes += total_rx_bytes;
938 adapter->net_stats.rx_bytes += total_rx_bytes; 942 netdev->stats.rx_bytes += total_rx_bytes;
939 adapter->net_stats.rx_packets += total_rx_packets; 943 netdev->stats.rx_packets += total_rx_packets;
940 944
941 return cleaned; 945 return cleaned;
942} 946}
@@ -1209,6 +1213,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1209 adapter->link_check_timeout = jiffies; 1213 adapter->link_check_timeout = jiffies;
1210 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1214 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1211 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1215 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1216 IXGBE_WRITE_FLUSH(hw);
1212 schedule_work(&adapter->watchdog_task); 1217 schedule_work(&adapter->watchdog_task);
1213 } 1218 }
1214} 1219}
@@ -1344,8 +1349,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1344 if (!q_vector->rxr_count) 1349 if (!q_vector->rxr_count)
1345 return IRQ_HANDLED; 1350 return IRQ_HANDLED;
1346 1351
1347 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1348 rx_ring = &(adapter->rx_ring[r_idx]);
1349 /* disable interrupts on this vector only */ 1352 /* disable interrupts on this vector only */
1350 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); 1353 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1351 napi_schedule(&q_vector->napi); 1354 napi_schedule(&q_vector->napi);
@@ -1667,7 +1670,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1667 1670
1668 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1671 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1669 err = request_irq(adapter->msix_entries[vector].vector, 1672 err = request_irq(adapter->msix_entries[vector].vector,
1670 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1673 ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1671 if (err) { 1674 if (err) {
1672 DPRINTK(PROBE, ERR, 1675 DPRINTK(PROBE, ERR,
1673 "request_irq for msix_lsc failed: %d\n", err); 1676 "request_irq for msix_lsc failed: %d\n", err);
@@ -1838,10 +1841,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1838 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1841 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1839 err = ixgbe_request_msix_irqs(adapter); 1842 err = ixgbe_request_msix_irqs(adapter);
1840 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1843 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1841 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, 1844 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
1842 netdev->name, netdev); 1845 netdev->name, netdev);
1843 } else { 1846 } else {
1844 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, 1847 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
1845 netdev->name, netdev); 1848 netdev->name, netdev);
1846 } 1849 }
1847 1850
@@ -2063,18 +2066,18 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2063 * ixgbe_configure_rscctl - enable RSC for the indicated ring 2066 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2064 * @adapter: address of board private structure 2067 * @adapter: address of board private structure
2065 * @index: index of ring to set 2068 * @index: index of ring to set
2066 * @rx_buf_len: rx buffer length
2067 **/ 2069 **/
2068static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index, 2070static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
2069 int rx_buf_len)
2070{ 2071{
2071 struct ixgbe_ring *rx_ring; 2072 struct ixgbe_ring *rx_ring;
2072 struct ixgbe_hw *hw = &adapter->hw; 2073 struct ixgbe_hw *hw = &adapter->hw;
2073 int j; 2074 int j;
2074 u32 rscctrl; 2075 u32 rscctrl;
2076 int rx_buf_len;
2075 2077
2076 rx_ring = &adapter->rx_ring[index]; 2078 rx_ring = &adapter->rx_ring[index];
2077 j = rx_ring->reg_idx; 2079 j = rx_ring->reg_idx;
2080 rx_buf_len = rx_ring->rx_buf_len;
2078 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j)); 2081 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2079 rscctrl |= IXGBE_RSCCTL_RSCEN; 2082 rscctrl |= IXGBE_RSCCTL_RSCEN;
2080 /* 2083 /*
@@ -2282,7 +2285,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2282 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 2285 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2283 /* Enable 82599 HW-RSC */ 2286 /* Enable 82599 HW-RSC */
2284 for (i = 0; i < adapter->num_rx_queues; i++) 2287 for (i = 0; i < adapter->num_rx_queues; i++)
2285 ixgbe_configure_rscctl(adapter, i, rx_buf_len); 2288 ixgbe_configure_rscctl(adapter, i);
2286 2289
2287 /* Disable RSC for ACK packets */ 2290 /* Disable RSC for ACK packets */
2288 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 2291 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
@@ -2333,23 +2336,25 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2333 * not in DCB mode. 2336 * not in DCB mode.
2334 */ 2337 */
2335 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2338 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2339
2340 /* Disable CFI check */
2341 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2342
2343 /* enable VLAN tag stripping */
2336 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2344 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2337 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE; 2345 ctrl |= IXGBE_VLNCTRL_VME;
2338 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2339 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2340 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2346 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2341 ctrl |= IXGBE_VLNCTRL_VFE;
2342 /* enable VLAN tag insert/strip */
2343 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2344 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2345 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2346 for (i = 0; i < adapter->num_rx_queues; i++) { 2347 for (i = 0; i < adapter->num_rx_queues; i++) {
2348 u32 ctrl;
2347 j = adapter->rx_ring[i].reg_idx; 2349 j = adapter->rx_ring[i].reg_idx;
2348 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j)); 2350 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2349 ctrl |= IXGBE_RXDCTL_VME; 2351 ctrl |= IXGBE_RXDCTL_VME;
2350 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl); 2352 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2351 } 2353 }
2352 } 2354 }
2355
2356 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2357
2353 ixgbe_vlan_rx_add_vid(netdev, 0); 2358 ixgbe_vlan_rx_add_vid(netdev, 0);
2354 2359
2355 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2360 if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -3632,10 +3637,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3632 * It's easy to be greedy for MSI-X vectors, but it really 3637 * It's easy to be greedy for MSI-X vectors, but it really
3633 * doesn't do us much good if we have a lot more vectors 3638 * doesn't do us much good if we have a lot more vectors
3634 * than CPU's. So let's be conservative and only ask for 3639 * than CPU's. So let's be conservative and only ask for
3635 * (roughly) twice the number of vectors as there are CPU's. 3640 * (roughly) the same number of vectors as there are CPU's.
3636 */ 3641 */
3637 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 3642 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
3638 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 3643 (int)num_online_cpus()) + NON_Q_VECTORS;
3639 3644
3640 /* 3645 /*
3641 * At the same time, hardware can only support a maximum of 3646 * At the same time, hardware can only support a maximum of
@@ -4475,18 +4480,23 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
4475 **/ 4480 **/
4476void ixgbe_update_stats(struct ixgbe_adapter *adapter) 4481void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4477{ 4482{
4483 struct net_device *netdev = adapter->netdev;
4478 struct ixgbe_hw *hw = &adapter->hw; 4484 struct ixgbe_hw *hw = &adapter->hw;
4479 u64 total_mpc = 0; 4485 u64 total_mpc = 0;
4480 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 4486 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
4481 4487
4482 if (hw->mac.type == ixgbe_mac_82599EB) { 4488 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
4483 u64 rsc_count = 0; 4489 u64 rsc_count = 0;
4490 u64 rsc_flush = 0;
4484 for (i = 0; i < 16; i++) 4491 for (i = 0; i < 16; i++)
4485 adapter->hw_rx_no_dma_resources += 4492 adapter->hw_rx_no_dma_resources +=
4486 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 4493 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4487 for (i = 0; i < adapter->num_rx_queues; i++) 4494 for (i = 0; i < adapter->num_rx_queues; i++) {
4488 rsc_count += adapter->rx_ring[i].rsc_count; 4495 rsc_count += adapter->rx_ring[i].rsc_count;
4489 adapter->rsc_count = rsc_count; 4496 rsc_flush += adapter->rx_ring[i].rsc_flush;
4497 }
4498 adapter->rsc_total_count = rsc_count;
4499 adapter->rsc_total_flush = rsc_flush;
4490 } 4500 }
4491 4501
4492 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 4502 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4594,15 +4604,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4594 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 4604 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4595 4605
4596 /* Fill out the OS statistics structure */ 4606 /* Fill out the OS statistics structure */
4597 adapter->net_stats.multicast = adapter->stats.mprc; 4607 netdev->stats.multicast = adapter->stats.mprc;
4598 4608
4599 /* Rx Errors */ 4609 /* Rx Errors */
4600 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 4610 netdev->stats.rx_errors = adapter->stats.crcerrs +
4601 adapter->stats.rlec; 4611 adapter->stats.rlec;
4602 adapter->net_stats.rx_dropped = 0; 4612 netdev->stats.rx_dropped = 0;
4603 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 4613 netdev->stats.rx_length_errors = adapter->stats.rlec;
4604 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 4614 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4605 adapter->net_stats.rx_missed_errors = total_mpc; 4615 netdev->stats.rx_missed_errors = total_mpc;
4606} 4616}
4607 4617
4608/** 4618/**
@@ -5372,10 +5382,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5372 **/ 5382 **/
5373static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) 5383static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5374{ 5384{
5375 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5376
5377 /* only return the current stats */ 5385 /* only return the current stats */
5378 return &adapter->net_stats; 5386 return &netdev->stats;
5379} 5387}
5380 5388
5381/** 5389/**
@@ -5527,6 +5535,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5527 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 5535 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
5528 .ndo_fcoe_enable = ixgbe_fcoe_enable, 5536 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5529 .ndo_fcoe_disable = ixgbe_fcoe_disable, 5537 .ndo_fcoe_disable = ixgbe_fcoe_disable,
5538 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
5530#endif /* IXGBE_FCOE */ 5539#endif /* IXGBE_FCOE */
5531}; 5540};
5532 5541
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index ef4bdd58e016..21b6633da578 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -52,6 +52,7 @@
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
53#define IXGBE_DEV_ID_82599_CX4 0x10F9 53#define IXGBE_DEV_ID_82599_CX4 0x10F9
54#define IXGBE_DEV_ID_82599_SFP 0x10FB 54#define IXGBE_DEV_ID_82599_SFP 0x10FB
55#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
55#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 56#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
56#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 57#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
57 58
@@ -1538,6 +1539,16 @@
1538#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 1539#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
1539#define IXGBE_FW_PATCH_VERSION_4 0x7 1540#define IXGBE_FW_PATCH_VERSION_4 0x7
1540 1541
1542/* Alternative SAN MAC Address Block */
1543#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
1544#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
1545#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
1546#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
1547#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
1548#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
1549#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
1550#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1551
1541/* PCI Bus Info */ 1552/* PCI Bus Info */
1542#define IXGBE_PCI_LINK_STATUS 0xB2 1553#define IXGBE_PCI_LINK_STATUS 0xB2
1543#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1554#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2171,6 +2182,14 @@ enum ixgbe_fc_mode {
2171 ixgbe_fc_default 2182 ixgbe_fc_default
2172}; 2183};
2173 2184
2185/* Smart Speed Settings */
2186#define IXGBE_SMARTSPEED_MAX_RETRIES 3
2187enum ixgbe_smart_speed {
2188 ixgbe_smart_speed_auto = 0,
2189 ixgbe_smart_speed_on,
2190 ixgbe_smart_speed_off
2191};
2192
2174/* PCI bus types */ 2193/* PCI bus types */
2175enum ixgbe_bus_type { 2194enum ixgbe_bus_type {
2176 ixgbe_bus_type_unknown = 0, 2195 ixgbe_bus_type_unknown = 0,
@@ -2336,6 +2355,7 @@ struct ixgbe_mac_operations {
2336 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); 2355 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
2337 s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); 2356 s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
2338 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); 2357 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
2358 s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
2339 s32 (*stop_adapter)(struct ixgbe_hw *); 2359 s32 (*stop_adapter)(struct ixgbe_hw *);
2340 s32 (*get_bus_info)(struct ixgbe_hw *); 2360 s32 (*get_bus_info)(struct ixgbe_hw *);
2341 void (*set_lan_id)(struct ixgbe_hw *); 2361 void (*set_lan_id)(struct ixgbe_hw *);
@@ -2407,6 +2427,10 @@ struct ixgbe_mac_info {
2407 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2427 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2408 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2428 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2409 u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2429 u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2430 /* prefix for World Wide Node Name (WWNN) */
2431 u16 wwnn_prefix;
2432 /* prefix for World Wide Port Name (WWPN) */
2433 u16 wwpn_prefix;
2410 s32 mc_filter_type; 2434 s32 mc_filter_type;
2411 u32 mcft_size; 2435 u32 mcft_size;
2412 u32 vft_size; 2436 u32 vft_size;
@@ -2431,6 +2455,8 @@ struct ixgbe_phy_info {
2431 enum ixgbe_media_type media_type; 2455 enum ixgbe_media_type media_type;
2432 bool reset_disable; 2456 bool reset_disable;
2433 ixgbe_autoneg_advertised autoneg_advertised; 2457 ixgbe_autoneg_advertised autoneg_advertised;
2458 enum ixgbe_smart_speed smart_speed;
2459 bool smart_speed_active;
2434 bool multispeed_fiber; 2460 bool multispeed_fiber;
2435}; 2461};
2436 2462
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 9aee0cc922c9..e9d9d595e1b7 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -109,9 +109,8 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
109 if (unlikely(!netif_running(nds[desc->channel]))) 109 if (unlikely(!netif_running(nds[desc->channel])))
110 goto err; 110 goto err;
111 111
112 skb = netdev_alloc_skb(dev, desc->pkt_length + 2); 112 skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
113 if (likely(skb != NULL)) { 113 if (likely(skb != NULL)) {
114 skb_reserve(skb, 2);
115 skb_copy_to_linear_data(skb, buf, desc->pkt_length); 114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
116 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
117 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c
index 6e5b3f30527f..35d5bed450da 100644
--- a/drivers/net/jazzsonic.c
+++ b/drivers/net/jazzsonic.c
@@ -81,7 +81,7 @@ static unsigned short known_revisions[] =
81 81
82static int jazzsonic_open(struct net_device* dev) 82static int jazzsonic_open(struct net_device* dev)
83{ 83{
84 if (request_irq(dev->irq, &sonic_interrupt, IRQF_DISABLED, "sonic", dev)) { 84 if (request_irq(dev->irq, sonic_interrupt, IRQF_DISABLED, "sonic", dev)) {
85 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); 85 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
86 return -EAGAIN; 86 return -EAGAIN;
87 } 87 }
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 1d2a32544ed2..6c1b92fa0b0c 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2764,19 +2764,19 @@ jme_init_one(struct pci_dev *pdev,
2764 atomic_set(&jme->rx_empty, 1); 2764 atomic_set(&jme->rx_empty, 1);
2765 2765
2766 tasklet_init(&jme->pcc_task, 2766 tasklet_init(&jme->pcc_task,
2767 &jme_pcc_tasklet, 2767 jme_pcc_tasklet,
2768 (unsigned long) jme); 2768 (unsigned long) jme);
2769 tasklet_init(&jme->linkch_task, 2769 tasklet_init(&jme->linkch_task,
2770 &jme_link_change_tasklet, 2770 jme_link_change_tasklet,
2771 (unsigned long) jme); 2771 (unsigned long) jme);
2772 tasklet_init(&jme->txclean_task, 2772 tasklet_init(&jme->txclean_task,
2773 &jme_tx_clean_tasklet, 2773 jme_tx_clean_tasklet,
2774 (unsigned long) jme); 2774 (unsigned long) jme);
2775 tasklet_init(&jme->rxclean_task, 2775 tasklet_init(&jme->rxclean_task,
2776 &jme_rx_clean_tasklet, 2776 jme_rx_clean_tasklet,
2777 (unsigned long) jme); 2777 (unsigned long) jme);
2778 tasklet_init(&jme->rxempty_task, 2778 tasklet_init(&jme->rxempty_task,
2779 &jme_rx_empty_tasklet, 2779 jme_rx_empty_tasklet,
2780 (unsigned long) jme); 2780 (unsigned long) jme);
2781 tasklet_disable_nosync(&jme->linkch_task); 2781 tasklet_disable_nosync(&jme->linkch_task);
2782 tasklet_disable_nosync(&jme->txclean_task); 2782 tasklet_disable_nosync(&jme->txclean_task);
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 03199fa10003..25e2af6997e4 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -400,7 +400,7 @@ static int korina_rx(struct net_device *dev, int limit)
400 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); 400 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
401 401
402 /* Malloc up new buffer. */ 402 /* Malloc up new buffer. */
403 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); 403 skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
404 404
405 if (!skb_new) 405 if (!skb_new)
406 break; 406 break;
@@ -417,9 +417,6 @@ static int korina_rx(struct net_device *dev, int limit)
417 if (devcs & ETH_RX_MP) 417 if (devcs & ETH_RX_MP)
418 dev->stats.multicast++; 418 dev->stats.multicast++;
419 419
420 /* 16 bit align */
421 skb_reserve(skb_new, 2);
422
423 lp->rx_skb[lp->rx_next_done] = skb_new; 420 lp->rx_skb[lp->rx_next_done] = skb_new;
424 } 421 }
425 422
@@ -1017,14 +1014,14 @@ static int korina_open(struct net_device *dev)
1017 /* Install the interrupt handler 1014 /* Install the interrupt handler
1018 * that handles the Done Finished 1015 * that handles the Done Finished
1019 * Ovr and Und Events */ 1016 * Ovr and Und Events */
1020 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt, 1017 ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
1021 IRQF_DISABLED, "Korina ethernet Rx", dev); 1018 IRQF_DISABLED, "Korina ethernet Rx", dev);
1022 if (ret < 0) { 1019 if (ret < 0) {
1023 printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n", 1020 printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1024 dev->name, lp->rx_irq); 1021 dev->name, lp->rx_irq);
1025 goto err_release; 1022 goto err_release;
1026 } 1023 }
1027 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt, 1024 ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1028 IRQF_DISABLED, "Korina ethernet Tx", dev); 1025 IRQF_DISABLED, "Korina ethernet Tx", dev);
1029 if (ret < 0) { 1026 if (ret < 0) {
1030 printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n", 1027 printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
@@ -1033,7 +1030,7 @@ static int korina_open(struct net_device *dev)
1033 } 1030 }
1034 1031
1035 /* Install handler for overrun error. */ 1032 /* Install handler for overrun error. */
1036 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt, 1033 ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1037 IRQF_DISABLED, "Ethernet Overflow", dev); 1034 IRQF_DISABLED, "Ethernet Overflow", dev);
1038 if (ret < 0) { 1035 if (ret < 0) {
1039 printk(KERN_ERR "%s: unable to get OVR IRQ %d\n", 1036 printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
@@ -1042,7 +1039,7 @@ static int korina_open(struct net_device *dev)
1042 } 1039 }
1043 1040
1044 /* Install handler for underflow error. */ 1041 /* Install handler for underflow error. */
1045 ret = request_irq(lp->und_irq, &korina_und_interrupt, 1042 ret = request_irq(lp->und_irq, korina_und_interrupt,
1046 IRQF_DISABLED, "Ethernet Underflow", dev); 1043 IRQF_DISABLED, "Ethernet Underflow", dev);
1047 if (ret < 0) { 1044 if (ret < 0) {
1048 printk(KERN_ERR "%s: unable to get UND IRQ %d\n", 1045 printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 99e954167fa6..5c45cb58d023 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -357,7 +357,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
357 357
358 /* check the status */ 358 /* check the status */
359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
360 struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2); 360 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
361 361
362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", 362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
363 __func__, len); 363 __func__, len);
@@ -369,9 +369,6 @@ static void ks8842_rx_frame(struct net_device *netdev,
369 if (status & RXSR_MULTICAST) 369 if (status & RXSR_MULTICAST)
370 netdev->stats.multicast++; 370 netdev->stats.multicast++;
371 371
372 /* Align socket buffer in 4-byte boundary for
373 better performance. */
374 skb_reserve(skb, 2);
375 data = (u32 *)skb_put(skb, len); 372 data = (u32 *)skb_put(skb, len);
376 373
377 ks8842_select_bank(adapter, 17); 374 ks8842_select_bank(adapter, 17);
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index dcda30338b65..7b2c42992c35 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -755,7 +755,7 @@ lance_open(struct net_device *dev)
755 int i; 755 int i;
756 756
757 if (dev->irq == 0 || 757 if (dev->irq == 0 ||
758 request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) { 758 request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
759 return -EAGAIN; 759 return -EAGAIN;
760 } 760 }
761 761
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 51e11c3e53e1..7a07430206e3 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev)
470 470
471 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { 471 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
472 dma_addr_t dma_addr; 472 dma_addr_t dma_addr;
473 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); 473 struct sk_buff *skb;
474 474
475 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
475 if (skb == NULL) 476 if (skb == NULL)
476 return -1; 477 return -1;
477 skb_reserve(skb, 2);
478 dma_addr = dma_map_single(dev->dev.parent, skb->data, 478 dma_addr = dma_map_single(dev->dev.parent, skb->data,
479 PKT_BUF_SZ, DMA_FROM_DEVICE); 479 PKT_BUF_SZ, DMA_FROM_DEVICE);
480 rbd->v_next = rbd+1; 480 rbd->v_next = rbd+1;
@@ -588,7 +588,7 @@ static int init_i596_mem(struct net_device *dev)
588 "%s: i82596 initialization successful\n", 588 "%s: i82596 initialization successful\n",
589 dev->name)); 589 dev->name));
590 590
591 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) { 591 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
592 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); 592 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
593 goto failed; 593 goto failed;
594 } 594 }
@@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev)
697 (dma_addr_t)SWAP32(rbd->b_data), 697 (dma_addr_t)SWAP32(rbd->b_data),
698 PKT_BUF_SZ, DMA_FROM_DEVICE); 698 PKT_BUF_SZ, DMA_FROM_DEVICE);
699 /* Get fresh skbuff to replace filled one. */ 699 /* Get fresh skbuff to replace filled one. */
700 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); 700 newskb = netdev_alloc_skb_ip_align(dev,
701 PKT_BUF_SZ);
701 if (newskb == NULL) { 702 if (newskb == NULL) {
702 skb = NULL; /* drop pkt */ 703 skb = NULL; /* drop pkt */
703 goto memory_squeeze; 704 goto memory_squeeze;
704 } 705 }
705 skb_reserve(newskb, 2);
706 706
707 /* Pass up the skb already on the Rx ring. */ 707 /* Pass up the skb already on the Rx ring. */
708 skb_put(skb, pkt_len); 708 skb_put(skb, pkt_len);
@@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev)
716 rbd->b_data = SWAP32(dma_addr); 716 rbd->b_data = SWAP32(dma_addr);
717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718 } else 718 } else
719 skb = netdev_alloc_skb(dev, pkt_len + 2); 719 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720memory_squeeze: 720memory_squeeze:
721 if (skb == NULL) { 721 if (skb == NULL) {
722 /* XXX tulip.c can defer packets here!! */ 722 /* XXX tulip.c can defer packets here!! */
@@ -730,7 +730,6 @@ memory_squeeze:
730 dma_sync_single_for_cpu(dev->dev.parent, 730 dma_sync_single_for_cpu(dev->dev.parent,
731 (dma_addr_t)SWAP32(rbd->b_data), 731 (dma_addr_t)SWAP32(rbd->b_data),
732 PKT_BUF_SZ, DMA_FROM_DEVICE); 732 PKT_BUF_SZ, DMA_FROM_DEVICE);
733 skb_reserve(skb, 2);
734 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); 733 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
735 dma_sync_single_for_device(dev->dev.parent, 734 dma_sync_single_for_device(dev->dev.parent,
736 (dma_addr_t)SWAP32(rbd->b_data), 735 (dma_addr_t)SWAP32(rbd->b_data),
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 1bc654a73c47..c9f65574378f 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -207,7 +207,7 @@ static __net_init int loopback_net_init(struct net *net)
207out_free_netdev: 207out_free_netdev:
208 free_netdev(dev); 208 free_netdev(dev);
209out: 209out:
210 if (net == &init_net) 210 if (net_eq(net, &init_net))
211 panic("loopback: Failed to register netdevice: %d\n", err); 211 panic("loopback: Failed to register netdevice: %d\n", err);
212 return err; 212 return err;
213} 213}
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index cc3ed9cf28be..e20fefc73c8b 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -845,7 +845,7 @@ static int i596_open(struct net_device *dev)
845{ 845{
846 int i; 846 int i;
847 847
848 i = request_irq(dev->irq, &i596_interrupt, IRQF_SHARED, dev->name, dev); 848 i = request_irq(dev->irq, i596_interrupt, IRQF_SHARED, dev->name, dev);
849 if (i) { 849 if (i) {
850 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); 850 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
851 return i; 851 return i;
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 149e0ed4a055..c244ec34fc43 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -337,7 +337,7 @@ net_open(struct net_device *dev)
337 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ); 337 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
338 338
339 /* Grab the interrupt */ 339 /* Grab the interrupt */
340 if (request_irq(dev->irq, &net_interrupt, 0, "cs89x0", dev)) 340 if (request_irq(dev->irq, net_interrupt, 0, "cs89x0", dev))
341 return -EAGAIN; 341 return -EAGAIN;
342 342
343 /* Set up the IRQ - Apparently magic */ 343 /* Set up the IRQ - Apparently magic */
diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c
index b3d7d8d77f46..875d361fb79d 100644
--- a/drivers/net/macsonic.c
+++ b/drivers/net/macsonic.c
@@ -140,7 +140,7 @@ static irqreturn_t macsonic_interrupt(int irq, void *dev_id)
140 140
141static int macsonic_open(struct net_device* dev) 141static int macsonic_open(struct net_device* dev)
142{ 142{
143 if (request_irq(dev->irq, &sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { 143 if (request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) {
144 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq); 144 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
145 return -EAGAIN; 145 return -EAGAIN;
146 } 146 }
@@ -149,7 +149,7 @@ static int macsonic_open(struct net_device* dev)
149 * rupt as well, which must prevent re-entrance of the sonic handler. 149 * rupt as well, which must prevent re-entrance of the sonic handler.
150 */ 150 */
151 if (dev->irq == IRQ_AUTO_3) 151 if (dev->irq == IRQ_AUTO_3)
152 if (request_irq(IRQ_NUBUS_9, &macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) { 152 if (request_irq(IRQ_NUBUS_9, macsonic_interrupt, IRQ_FLG_FAST, "sonic", dev)) {
153 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9); 153 printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, IRQ_NUBUS_9);
154 free_irq(dev->irq, dev); 154 free_irq(dev->irq, dev);
155 return -EAGAIN; 155 return -EAGAIN;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2490aa39804c..93c3e6edf702 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -29,6 +29,7 @@
29#include <linux/if_link.h> 29#include <linux/if_link.h>
30#include <linux/if_macvlan.h> 30#include <linux/if_macvlan.h>
31#include <net/rtnetlink.h> 31#include <net/rtnetlink.h>
32#include <net/xfrm.h>
32 33
33#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) 34#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
34 35
@@ -38,12 +39,28 @@ struct macvlan_port {
38 struct list_head vlans; 39 struct list_head vlans;
39}; 40};
40 41
42/**
43 * struct macvlan_rx_stats - MACVLAN percpu rx stats
44 * @rx_packets: number of received packets
45 * @rx_bytes: number of received bytes
46 * @multicast: number of received multicast packets
47 * @rx_errors: number of errors
48 */
49struct macvlan_rx_stats {
50 unsigned long rx_packets;
51 unsigned long rx_bytes;
52 unsigned long multicast;
53 unsigned long rx_errors;
54};
55
41struct macvlan_dev { 56struct macvlan_dev {
42 struct net_device *dev; 57 struct net_device *dev;
43 struct list_head list; 58 struct list_head list;
44 struct hlist_node hlist; 59 struct hlist_node hlist;
45 struct macvlan_port *port; 60 struct macvlan_port *port;
46 struct net_device *lowerdev; 61 struct net_device *lowerdev;
62 struct macvlan_rx_stats *rx_stats;
63 enum macvlan_mode mode;
47}; 64};
48 65
49 66
@@ -101,41 +118,67 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
101 return 0; 118 return 0;
102} 119}
103 120
121static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
122 unsigned int len, bool success,
123 bool multicast)
124{
125 struct macvlan_rx_stats *rx_stats;
126
127 rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
128 if (likely(success)) {
129 rx_stats->rx_packets++;;
130 rx_stats->rx_bytes += len;
131 if (multicast)
132 rx_stats->multicast++;
133 } else {
134 rx_stats->rx_errors++;
135 }
136}
137
138static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
139 const struct ethhdr *eth, bool local)
140{
141 if (!skb)
142 return NET_RX_DROP;
143
144 if (local)
145 return dev_forward_skb(dev, skb);
146
147 skb->dev = dev;
148 if (!compare_ether_addr_64bits(eth->h_dest,
149 dev->broadcast))
150 skb->pkt_type = PACKET_BROADCAST;
151 else
152 skb->pkt_type = PACKET_MULTICAST;
153
154 return netif_rx(skb);
155}
156
104static void macvlan_broadcast(struct sk_buff *skb, 157static void macvlan_broadcast(struct sk_buff *skb,
105 const struct macvlan_port *port) 158 const struct macvlan_port *port,
159 struct net_device *src,
160 enum macvlan_mode mode)
106{ 161{
107 const struct ethhdr *eth = eth_hdr(skb); 162 const struct ethhdr *eth = eth_hdr(skb);
108 const struct macvlan_dev *vlan; 163 const struct macvlan_dev *vlan;
109 struct hlist_node *n; 164 struct hlist_node *n;
110 struct net_device *dev;
111 struct sk_buff *nskb; 165 struct sk_buff *nskb;
112 unsigned int i; 166 unsigned int i;
167 int err;
113 168
114 if (skb->protocol == htons(ETH_P_PAUSE)) 169 if (skb->protocol == htons(ETH_P_PAUSE))
115 return; 170 return;
116 171
117 for (i = 0; i < MACVLAN_HASH_SIZE; i++) { 172 for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
118 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { 173 hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
119 dev = vlan->dev; 174 if (vlan->dev == src || !(vlan->mode & mode))
120
121 nskb = skb_clone(skb, GFP_ATOMIC);
122 if (nskb == NULL) {
123 dev->stats.rx_errors++;
124 dev->stats.rx_dropped++;
125 continue; 175 continue;
126 }
127 176
128 dev->stats.rx_bytes += skb->len + ETH_HLEN; 177 nskb = skb_clone(skb, GFP_ATOMIC);
129 dev->stats.rx_packets++; 178 err = macvlan_broadcast_one(nskb, vlan->dev, eth,
130 dev->stats.multicast++; 179 mode == MACVLAN_MODE_BRIDGE);
131 180 macvlan_count_rx(vlan, skb->len + ETH_HLEN,
132 nskb->dev = dev; 181 err == NET_RX_SUCCESS, 1);
133 if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
134 nskb->pkt_type = PACKET_BROADCAST;
135 else
136 nskb->pkt_type = PACKET_MULTICAST;
137
138 netif_rx(nskb);
139 } 182 }
140 } 183 }
141} 184}
@@ -146,14 +189,34 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
146 const struct ethhdr *eth = eth_hdr(skb); 189 const struct ethhdr *eth = eth_hdr(skb);
147 const struct macvlan_port *port; 190 const struct macvlan_port *port;
148 const struct macvlan_dev *vlan; 191 const struct macvlan_dev *vlan;
192 const struct macvlan_dev *src;
149 struct net_device *dev; 193 struct net_device *dev;
194 unsigned int len;
150 195
151 port = rcu_dereference(skb->dev->macvlan_port); 196 port = rcu_dereference(skb->dev->macvlan_port);
152 if (port == NULL) 197 if (port == NULL)
153 return skb; 198 return skb;
154 199
155 if (is_multicast_ether_addr(eth->h_dest)) { 200 if (is_multicast_ether_addr(eth->h_dest)) {
156 macvlan_broadcast(skb, port); 201 src = macvlan_hash_lookup(port, eth->h_source);
202 if (!src)
203 /* frame comes from an external address */
204 macvlan_broadcast(skb, port, NULL,
205 MACVLAN_MODE_PRIVATE |
206 MACVLAN_MODE_VEPA |
207 MACVLAN_MODE_BRIDGE);
208 else if (src->mode == MACVLAN_MODE_VEPA)
209 /* flood to everyone except source */
210 macvlan_broadcast(skb, port, src->dev,
211 MACVLAN_MODE_VEPA |
212 MACVLAN_MODE_BRIDGE);
213 else if (src->mode == MACVLAN_MODE_BRIDGE)
214 /*
215 * flood only to VEPA ports, bridge ports
216 * already saw the frame on the way out.
217 */
218 macvlan_broadcast(skb, port, src->dev,
219 MACVLAN_MODE_VEPA);
157 return skb; 220 return skb;
158 } 221 }
159 222
@@ -166,16 +229,11 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
166 kfree_skb(skb); 229 kfree_skb(skb);
167 return NULL; 230 return NULL;
168 } 231 }
169 232 len = skb->len + ETH_HLEN;
170 skb = skb_share_check(skb, GFP_ATOMIC); 233 skb = skb_share_check(skb, GFP_ATOMIC);
171 if (skb == NULL) { 234 macvlan_count_rx(vlan, len, skb != NULL, 0);
172 dev->stats.rx_errors++; 235 if (!skb)
173 dev->stats.rx_dropped++;
174 return NULL; 236 return NULL;
175 }
176
177 dev->stats.rx_bytes += skb->len + ETH_HLEN;
178 dev->stats.rx_packets++;
179 237
180 skb->dev = dev; 238 skb->dev = dev;
181 skb->pkt_type = PACKET_HOST; 239 skb->pkt_type = PACKET_HOST;
@@ -184,25 +242,53 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
184 return NULL; 242 return NULL;
185} 243}
186 244
245static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
246{
247 const struct macvlan_dev *vlan = netdev_priv(dev);
248 const struct macvlan_port *port = vlan->port;
249 const struct macvlan_dev *dest;
250
251 if (vlan->mode == MACVLAN_MODE_BRIDGE) {
252 const struct ethhdr *eth = (void *)skb->data;
253
254 /* send to other bridge ports directly */
255 if (is_multicast_ether_addr(eth->h_dest)) {
256 macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE);
257 goto xmit_world;
258 }
259
260 dest = macvlan_hash_lookup(port, eth->h_dest);
261 if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
262 unsigned int length = skb->len + ETH_HLEN;
263 int ret = dev_forward_skb(dest->dev, skb);
264 macvlan_count_rx(dest, length,
265 ret == NET_RX_SUCCESS, 0);
266
267 return NET_XMIT_SUCCESS;
268 }
269 }
270
271xmit_world:
272 skb->dev = vlan->lowerdev;
273 return dev_queue_xmit(skb);
274}
275
187static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 276static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
188 struct net_device *dev) 277 struct net_device *dev)
189{ 278{
190 int i = skb_get_queue_mapping(skb); 279 int i = skb_get_queue_mapping(skb);
191 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 280 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
192 const struct macvlan_dev *vlan = netdev_priv(dev);
193 unsigned int len = skb->len; 281 unsigned int len = skb->len;
194 int ret; 282 int ret;
195 283
196 skb->dev = vlan->lowerdev; 284 ret = macvlan_queue_xmit(skb, dev);
197 ret = dev_queue_xmit(skb);
198
199 if (likely(ret == NET_XMIT_SUCCESS)) { 285 if (likely(ret == NET_XMIT_SUCCESS)) {
200 txq->tx_packets++; 286 txq->tx_packets++;
201 txq->tx_bytes += len; 287 txq->tx_bytes += len;
202 } else 288 } else
203 txq->tx_dropped++; 289 txq->tx_dropped++;
204 290
205 return NETDEV_TX_OK; 291 return ret;
206} 292}
207 293
208static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 294static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -366,9 +452,47 @@ static int macvlan_init(struct net_device *dev)
366 452
367 macvlan_set_lockdep_class(dev); 453 macvlan_set_lockdep_class(dev);
368 454
455 vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
456 if (!vlan->rx_stats)
457 return -ENOMEM;
458
369 return 0; 459 return 0;
370} 460}
371 461
462static void macvlan_uninit(struct net_device *dev)
463{
464 struct macvlan_dev *vlan = netdev_priv(dev);
465
466 free_percpu(vlan->rx_stats);
467}
468
469static struct net_device_stats *macvlan_dev_get_stats(struct net_device *dev)
470{
471 struct net_device_stats *stats = &dev->stats;
472 struct macvlan_dev *vlan = netdev_priv(dev);
473
474 dev_txq_stats_fold(dev, stats);
475
476 if (vlan->rx_stats) {
477 struct macvlan_rx_stats *p, rx = {0};
478 int i;
479
480 for_each_possible_cpu(i) {
481 p = per_cpu_ptr(vlan->rx_stats, i);
482 rx.rx_packets += p->rx_packets;
483 rx.rx_bytes += p->rx_bytes;
484 rx.rx_errors += p->rx_errors;
485 rx.multicast += p->multicast;
486 }
487 stats->rx_packets = rx.rx_packets;
488 stats->rx_bytes = rx.rx_bytes;
489 stats->rx_errors = rx.rx_errors;
490 stats->rx_dropped = rx.rx_errors;
491 stats->multicast = rx.multicast;
492 }
493 return stats;
494}
495
372static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 496static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
373 struct ethtool_drvinfo *drvinfo) 497 struct ethtool_drvinfo *drvinfo)
374{ 498{
@@ -405,6 +529,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
405 529
406static const struct net_device_ops macvlan_netdev_ops = { 530static const struct net_device_ops macvlan_netdev_ops = {
407 .ndo_init = macvlan_init, 531 .ndo_init = macvlan_init,
532 .ndo_uninit = macvlan_uninit,
408 .ndo_open = macvlan_open, 533 .ndo_open = macvlan_open,
409 .ndo_stop = macvlan_stop, 534 .ndo_stop = macvlan_stop,
410 .ndo_start_xmit = macvlan_start_xmit, 535 .ndo_start_xmit = macvlan_start_xmit,
@@ -412,6 +537,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
412 .ndo_change_rx_flags = macvlan_change_rx_flags, 537 .ndo_change_rx_flags = macvlan_change_rx_flags,
413 .ndo_set_mac_address = macvlan_set_mac_address, 538 .ndo_set_mac_address = macvlan_set_mac_address,
414 .ndo_set_multicast_list = macvlan_set_multicast_list, 539 .ndo_set_multicast_list = macvlan_set_multicast_list,
540 .ndo_get_stats = macvlan_dev_get_stats,
415 .ndo_validate_addr = eth_validate_addr, 541 .ndo_validate_addr = eth_validate_addr,
416}; 542};
417 543
@@ -483,6 +609,17 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
483 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 609 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
484 return -EADDRNOTAVAIL; 610 return -EADDRNOTAVAIL;
485 } 611 }
612
613 if (data && data[IFLA_MACVLAN_MODE]) {
614 switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
615 case MACVLAN_MODE_PRIVATE:
616 case MACVLAN_MODE_VEPA:
617 case MACVLAN_MODE_BRIDGE:
618 break;
619 default:
620 return -EINVAL;
621 }
622 }
486 return 0; 623 return 0;
487} 624}
488 625
@@ -505,7 +642,7 @@ static int macvlan_get_tx_queues(struct net *net,
505 return 0; 642 return 0;
506} 643}
507 644
508static int macvlan_newlink(struct net_device *dev, 645static int macvlan_newlink(struct net *src_net, struct net_device *dev,
509 struct nlattr *tb[], struct nlattr *data[]) 646 struct nlattr *tb[], struct nlattr *data[])
510{ 647{
511 struct macvlan_dev *vlan = netdev_priv(dev); 648 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -516,7 +653,7 @@ static int macvlan_newlink(struct net_device *dev,
516 if (!tb[IFLA_LINK]) 653 if (!tb[IFLA_LINK])
517 return -EINVAL; 654 return -EINVAL;
518 655
519 lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); 656 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
520 if (lowerdev == NULL) 657 if (lowerdev == NULL)
521 return -ENODEV; 658 return -ENODEV;
522 659
@@ -547,6 +684,10 @@ static int macvlan_newlink(struct net_device *dev,
547 vlan->dev = dev; 684 vlan->dev = dev;
548 vlan->port = port; 685 vlan->port = port;
549 686
687 vlan->mode = MACVLAN_MODE_VEPA;
688 if (data && data[IFLA_MACVLAN_MODE])
689 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
690
550 err = register_netdevice(dev); 691 err = register_netdevice(dev);
551 if (err < 0) 692 if (err < 0)
552 return err; 693 return err;
@@ -556,18 +697,48 @@ static int macvlan_newlink(struct net_device *dev,
556 return 0; 697 return 0;
557} 698}
558 699
559static void macvlan_dellink(struct net_device *dev) 700static void macvlan_dellink(struct net_device *dev, struct list_head *head)
560{ 701{
561 struct macvlan_dev *vlan = netdev_priv(dev); 702 struct macvlan_dev *vlan = netdev_priv(dev);
562 struct macvlan_port *port = vlan->port; 703 struct macvlan_port *port = vlan->port;
563 704
564 list_del(&vlan->list); 705 list_del(&vlan->list);
565 unregister_netdevice(dev); 706 unregister_netdevice_queue(dev, head);
566 707
567 if (list_empty(&port->vlans)) 708 if (list_empty(&port->vlans))
568 macvlan_port_destroy(port->dev); 709 macvlan_port_destroy(port->dev);
569} 710}
570 711
712static int macvlan_changelink(struct net_device *dev,
713 struct nlattr *tb[], struct nlattr *data[])
714{
715 struct macvlan_dev *vlan = netdev_priv(dev);
716 if (data && data[IFLA_MACVLAN_MODE])
717 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
718 return 0;
719}
720
721static size_t macvlan_get_size(const struct net_device *dev)
722{
723 return nla_total_size(4);
724}
725
726static int macvlan_fill_info(struct sk_buff *skb,
727 const struct net_device *dev)
728{
729 struct macvlan_dev *vlan = netdev_priv(dev);
730
731 NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode);
732 return 0;
733
734nla_put_failure:
735 return -EMSGSIZE;
736}
737
738static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
739 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
740};
741
571static struct rtnl_link_ops macvlan_link_ops __read_mostly = { 742static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
572 .kind = "macvlan", 743 .kind = "macvlan",
573 .priv_size = sizeof(struct macvlan_dev), 744 .priv_size = sizeof(struct macvlan_dev),
@@ -576,6 +747,11 @@ static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
576 .validate = macvlan_validate, 747 .validate = macvlan_validate,
577 .newlink = macvlan_newlink, 748 .newlink = macvlan_newlink,
578 .dellink = macvlan_dellink, 749 .dellink = macvlan_dellink,
750 .maxtype = IFLA_MACVLAN_MAX,
751 .policy = macvlan_policy,
752 .changelink = macvlan_changelink,
753 .get_size = macvlan_get_size,
754 .fill_info = macvlan_fill_info,
579}; 755};
580 756
581static int macvlan_device_event(struct notifier_block *unused, 757static int macvlan_device_event(struct notifier_block *unused,
@@ -603,7 +779,7 @@ static int macvlan_device_event(struct notifier_block *unused,
603 break; 779 break;
604 case NETDEV_UNREGISTER: 780 case NETDEV_UNREGISTER:
605 list_for_each_entry_safe(vlan, next, &port->vlans, list) 781 list_for_each_entry_safe(vlan, next, &port->vlans, list)
606 macvlan_dellink(vlan->dev); 782 macvlan_dellink(vlan->dev, NULL);
607 break; 783 break;
608 } 784 }
609 return NOTIFY_DONE; 785 return NOTIFY_DONE;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 21f8754fcf4c..e85bf04cf813 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -162,6 +162,10 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
162 result |= ADVERTISED_100baseT_Half; 162 result |= ADVERTISED_100baseT_Half;
163 if (reg & ADVERTISE_100FULL) 163 if (reg & ADVERTISE_100FULL)
164 result |= ADVERTISED_100baseT_Full; 164 result |= ADVERTISED_100baseT_Full;
165 if (reg & ADVERTISE_PAUSE_CAP)
166 result |= ADVERTISED_Pause;
167 if (reg & ADVERTISE_PAUSE_ASYM)
168 result |= ADVERTISED_Asym_Pause;
165 return result; 169 return result;
166} 170}
167 171
@@ -344,11 +348,9 @@ void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
344 348
345 old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, 349 old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
346 MDIO_AN_ADVERTISE); 350 MDIO_AN_ADVERTISE);
347 adv = old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 351 adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
348 if (ecmd->autoneg) 352 mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
349 adv |= mii_advertise_flowctrl( 353 (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
350 (ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
351 (ecmd->tx_pause ? FLOW_CTRL_TX : 0));
352 if (adv != old_adv) { 354 if (adv != old_adv) {
353 mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN, 355 mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
354 MDIO_AN_ADVERTISE, adv); 356 MDIO_AN_ADVERTISE, adv);
diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c
index 8ea98bd89ff1..8e9704f5c122 100644
--- a/drivers/net/mipsnet.c
+++ b/drivers/net/mipsnet.c
@@ -211,7 +211,7 @@ static int mipsnet_open(struct net_device *dev)
211{ 211{
212 int err; 212 int err;
213 213
214 err = request_irq(dev->irq, &mipsnet_interrupt, 214 err = request_irq(dev->irq, mipsnet_interrupt,
215 IRQF_SHARED, dev->name, (void *) dev); 215 IRQF_SHARED, dev->name, (void *) dev);
216 if (err) { 216 if (err) {
217 release_region(dev->base_addr, sizeof(struct mipsnet_regs)); 217 release_region(dev->base_addr, sizeof(struct mipsnet_regs));
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f3624517cb0e..85e1b6a3ac1b 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -207,7 +207,6 @@ struct myri10ge_priv {
207 int big_bytes; 207 int big_bytes;
208 int max_intr_slots; 208 int max_intr_slots;
209 struct net_device *dev; 209 struct net_device *dev;
210 struct net_device_stats stats;
211 spinlock_t stats_lock; 210 spinlock_t stats_lock;
212 u8 __iomem *sram; 211 u8 __iomem *sram;
213 int sram_size; 212 int sram_size;
@@ -264,6 +263,10 @@ static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
264static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; 263static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
265static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat"; 264static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
266static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat"; 265static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
266MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
267MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
268MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
269MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
267 270
268static char *myri10ge_fw_name = NULL; 271static char *myri10ge_fw_name = NULL;
269module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 272module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
@@ -1832,7 +1835,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1832 /* force stats update */ 1835 /* force stats update */
1833 (void)myri10ge_get_stats(netdev); 1836 (void)myri10ge_get_stats(netdev);
1834 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1837 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1835 data[i] = ((unsigned long *)&mgp->stats)[i]; 1838 data[i] = ((unsigned long *)&netdev->stats)[i];
1836 1839
1837 data[i++] = (unsigned int)mgp->tx_boundary; 1840 data[i++] = (unsigned int)mgp->tx_boundary;
1838 data[i++] = (unsigned int)mgp->wc_enabled; 1841 data[i++] = (unsigned int)mgp->wc_enabled;
@@ -3002,7 +3005,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
3002{ 3005{
3003 struct myri10ge_priv *mgp = netdev_priv(dev); 3006 struct myri10ge_priv *mgp = netdev_priv(dev);
3004 struct myri10ge_slice_netstats *slice_stats; 3007 struct myri10ge_slice_netstats *slice_stats;
3005 struct net_device_stats *stats = &mgp->stats; 3008 struct net_device_stats *stats = &dev->stats;
3006 int i; 3009 int i;
3007 3010
3008 spin_lock(&mgp->stats_lock); 3011 spin_lock(&mgp->stats_lock);
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 29ebebc6a95b..b3513ad3b703 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -1084,7 +1084,7 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic
1084 1084
1085 /* Register interrupt handler now. */ 1085 /* Register interrupt handler now. */
1086 DET(("Requesting MYRIcom IRQ line.\n")); 1086 DET(("Requesting MYRIcom IRQ line.\n"));
1087 if (request_irq(dev->irq, &myri_interrupt, 1087 if (request_irq(dev->irq, myri_interrupt,
1088 IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) { 1088 IRQF_SHARED, "MyriCOM Ethernet", (void *) dev)) {
1089 printk("MyriCOM: Cannot register interrupt handler.\n"); 1089 printk("MyriCOM: Cannot register interrupt handler.\n");
1090 goto err; 1090 goto err;
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index b2722c44337e..9a8d3ab4709b 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1535,7 +1535,7 @@ static int netdev_open(struct net_device *dev)
1535 /* Reset the chip, just in case. */ 1535 /* Reset the chip, just in case. */
1536 natsemi_reset(dev); 1536 natsemi_reset(dev);
1537 1537
1538 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev); 1538 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
1539 if (i) return i; 1539 if (i) return i;
1540 1540
1541 if (netif_msg_ifup(np)) 1541 if (netif_msg_ifup(np))
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 9f4235466d59..64770298c4f7 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -212,7 +212,7 @@ static int netx_eth_open(struct net_device *ndev)
212 struct netx_eth_priv *priv = netdev_priv(ndev); 212 struct netx_eth_priv *priv = netdev_priv(ndev);
213 213
214 if (request_irq 214 if (request_irq
215 (ndev->irq, &netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev)) 215 (ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev))
216 return -EAGAIN; 216 return -EAGAIN;
217 217
218 writel(ndev->dev_addr[0] | 218 writel(ndev->dev_addr[0] |
@@ -510,3 +510,6 @@ module_exit(netx_eth_cleanup);
510MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 510MODULE_AUTHOR("Sascha Hauer, Pengutronix");
511MODULE_LICENSE("GPL"); 511MODULE_LICENSE("GPL");
512MODULE_ALIAS("platform:" CARDNAME); 512MODULE_ALIAS("platform:" CARDNAME);
513MODULE_FIRMWARE("xc0.bin");
514MODULE_FIRMWARE("xc1.bin");
515MODULE_FIRMWARE("xc2.bin");
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e1237b802872..76cd1f3e9fc8 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 50 56#define _NETXEN_NIC_LINUX_SUBVERSION 65
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.50" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -74,8 +74,6 @@
74#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ 74#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \
75 * NETXEN_FLASH_SECTOR_SIZE) 75 * NETXEN_FLASH_SECTOR_SIZE)
76 76
77#define PHAN_VENDOR_ID 0x4040
78
79#define RCV_DESC_RINGSIZE(rds_ring) \ 77#define RCV_DESC_RINGSIZE(rds_ring) \
80 (sizeof(struct rcv_desc) * (rds_ring)->num_desc) 78 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
81#define RCV_BUFF_RINGSIZE(rds_ring) \ 79#define RCV_BUFF_RINGSIZE(rds_ring) \
@@ -117,9 +115,11 @@
117#define NX_P3_B0 0x40 115#define NX_P3_B0 0x40
118#define NX_P3_B1 0x41 116#define NX_P3_B1 0x41
119#define NX_P3_B2 0x42 117#define NX_P3_B2 0x42
118#define NX_P3P_A0 0x50
120 119
121#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) 120#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
122#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) 121#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
122#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0)
123 123
124#define FIRST_PAGE_GROUP_START 0 124#define FIRST_PAGE_GROUP_START 0
125#define FIRST_PAGE_GROUP_END 0x100000 125#define FIRST_PAGE_GROUP_END 0x100000
@@ -419,6 +419,34 @@ struct status_desc {
419 __le64 status_desc_data[2]; 419 __le64 status_desc_data[2];
420} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
421 421
422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0x3eb000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7
427
428/*Offsets */
429#define NX_UNI_CHIP_REV_OFF 10
430#define NX_UNI_FLAGS_OFF 11
431#define NX_UNI_BIOS_VERSION_OFF 12
432#define NX_UNI_BOOTLD_IDX_OFF 27
433#define NX_UNI_FIRMWARE_IDX_OFF 29
434
435struct uni_table_desc{
436 uint32_t findex;
437 uint32_t num_entries;
438 uint32_t entry_size;
439 uint32_t reserved[5];
440};
441
442struct uni_data_desc{
443 uint32_t findex;
444 uint32_t size;
445 uint32_t reserved[5];
446};
447
448/* UNIFIED ROMIMAGE *************************/
449
422/* The version of the main data structure */ 450/* The version of the main data structure */
423#define NETXEN_BDINFO_VERSION 1 451#define NETXEN_BDINFO_VERSION 1
424 452
@@ -485,7 +513,15 @@ struct status_desc {
485#define NX_P2_MN_ROMIMAGE 0 513#define NX_P2_MN_ROMIMAGE 0
486#define NX_P3_CT_ROMIMAGE 1 514#define NX_P3_CT_ROMIMAGE 1
487#define NX_P3_MN_ROMIMAGE 2 515#define NX_P3_MN_ROMIMAGE 2
488#define NX_FLASH_ROMIMAGE 3 516#define NX_UNIFIED_ROMIMAGE 3
517#define NX_FLASH_ROMIMAGE 4
518#define NX_UNKNOWN_ROMIMAGE 0xff
519
520#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
521#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
522#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
523#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
524#define NX_FLASH_ROMIMAGE_NAME "flash"
489 525
490extern char netxen_nic_driver_name[]; 526extern char netxen_nic_driver_name[];
491 527
@@ -543,13 +579,16 @@ struct netxen_hardware_context {
543 void __iomem *pci_base1; 579 void __iomem *pci_base1;
544 void __iomem *pci_base2; 580 void __iomem *pci_base2;
545 void __iomem *db_base; 581 void __iomem *db_base;
582 void __iomem *ocm_win_crb;
583
546 unsigned long db_len; 584 unsigned long db_len;
547 unsigned long pci_len0; 585 unsigned long pci_len0;
548 586
549 int qdr_sn_window; 587 u32 ocm_win;
550 int ddr_mn_window; 588 u32 crb_win;
551 u32 mn_win_crb; 589
552 u32 ms_win_crb; 590 rwlock_t crb_lock;
591 spinlock_t mem_lock;
553 592
554 u8 cut_through; 593 u8 cut_through;
555 u8 revision_id; 594 u8 revision_id;
@@ -1039,6 +1078,9 @@ typedef struct {
1039#define LINKEVENT_LINKSPEED_MBPS 0 1078#define LINKEVENT_LINKSPEED_MBPS 0
1040#define LINKEVENT_LINKSPEED_ENCODED 1 1079#define LINKEVENT_LINKSPEED_ENCODED 1
1041 1080
1081#define AUTO_FW_RESET_ENABLED 0xEF10AF12
1082#define AUTO_FW_RESET_DISABLED 0xDCBAAF12
1083
1042/* firmware response header: 1084/* firmware response header:
1043 * 63:58 - message type 1085 * 63:58 - message type
1044 * 57:56 - owner 1086 * 57:56 - owner
@@ -1086,6 +1128,7 @@ typedef struct {
1086#define NETXEN_NIC_MSIX_ENABLED 0x04 1128#define NETXEN_NIC_MSIX_ENABLED 0x04
1087#define NETXEN_NIC_LRO_ENABLED 0x08 1129#define NETXEN_NIC_LRO_ENABLED 0x08
1088#define NETXEN_NIC_BRIDGE_ENABLED 0X10 1130#define NETXEN_NIC_BRIDGE_ENABLED 0X10
1131#define NETXEN_NIC_DIAG_ENABLED 0x20
1089#define NETXEN_IS_MSI_FAMILY(adapter) \ 1132#define NETXEN_IS_MSI_FAMILY(adapter) \
1090 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) 1133 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
1091 1134
@@ -1115,10 +1158,6 @@ struct netxen_adapter {
1115 struct pci_dev *pdev; 1158 struct pci_dev *pdev;
1116 struct list_head mac_list; 1159 struct list_head mac_list;
1117 1160
1118 u32 curr_window;
1119 u32 crb_win;
1120 rwlock_t adapter_lock;
1121
1122 spinlock_t tx_clean_lock; 1161 spinlock_t tx_clean_lock;
1123 1162
1124 u16 num_txd; 1163 u16 num_txd;
@@ -1182,11 +1221,10 @@ struct netxen_adapter {
1182 u32 (*crb_read)(struct netxen_adapter *, ulong); 1221 u32 (*crb_read)(struct netxen_adapter *, ulong);
1183 int (*crb_write)(struct netxen_adapter *, ulong, u32); 1222 int (*crb_write)(struct netxen_adapter *, ulong, u32);
1184 1223
1185 int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int); 1224 int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
1186 int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int); 1225 int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
1187 1226
1188 unsigned long (*pci_set_window)(struct netxen_adapter *, 1227 int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
1189 unsigned long long);
1190 1228
1191 u32 (*io_read)(struct netxen_adapter *, void __iomem *); 1229 u32 (*io_read)(struct netxen_adapter *, void __iomem *);
1192 void (*io_write)(struct netxen_adapter *, void __iomem *, u32); 1230 void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
@@ -1205,12 +1243,10 @@ struct netxen_adapter {
1205 1243
1206 struct work_struct tx_timeout_task; 1244 struct work_struct tx_timeout_task;
1207 1245
1208 struct net_device_stats net_stats;
1209
1210 nx_nic_intr_coalesce_t coal; 1246 nx_nic_intr_coalesce_t coal;
1211 1247
1212 unsigned long state; 1248 unsigned long state;
1213 u32 resv5; 1249 __le32 file_prd_off; /*File fw product offset*/
1214 u32 fw_version; 1250 u32 fw_version;
1215 const struct firmware *fw; 1251 const struct firmware *fw;
1216}; 1252};
@@ -1273,7 +1309,7 @@ int netxen_load_firmware(struct netxen_adapter *adapter);
1273int netxen_need_fw_reset(struct netxen_adapter *adapter); 1309int netxen_need_fw_reset(struct netxen_adapter *adapter);
1274void netxen_request_firmware(struct netxen_adapter *adapter); 1310void netxen_request_firmware(struct netxen_adapter *adapter);
1275void netxen_release_firmware(struct netxen_adapter *adapter); 1311void netxen_release_firmware(struct netxen_adapter *adapter);
1276int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1312int netxen_pinit_from_rom(struct netxen_adapter *adapter);
1277 1313
1278int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); 1314int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
1279int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 1315int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 714f38791a9a..c86095eb5d9e 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -85,11 +85,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
85 85
86 strncpy(drvinfo->driver, netxen_nic_driver_name, 32); 86 strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
87 strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); 87 strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
88 read_lock(&adapter->adapter_lock);
89 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 88 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
90 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 89 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
91 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 90 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
92 read_unlock(&adapter->adapter_lock);
93 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); 91 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
94 92
95 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 93 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
@@ -690,8 +688,8 @@ static int netxen_nic_reg_test(struct net_device *dev)
690 u32 data_read, data_written; 688 u32 data_read, data_written;
691 689
692 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); 690 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
693 if ((data_read & 0xffff) != PHAN_VENDOR_ID) 691 if ((data_read & 0xffff) != adapter->pdev->vendor)
694 return 1; 692 return 1;
695 693
696 data_written = (u32)0xa5a5a5a5; 694 data_written = (u32)0xa5a5a5a5;
697 695
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 17bb3818d84e..d138fc22927a 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -664,40 +664,51 @@ enum {
664#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) 664#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
665#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) 665#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
666 666
667
668#define TEST_AGT_CTRL (0x00)
669
670#define TA_CTL_START 1
671#define TA_CTL_ENABLE 2
672#define TA_CTL_WRITE 4
673#define TA_CTL_BUSY 8
674
667/* 675/*
668 * Register offsets for MN 676 * Register offsets for MN
669 */ 677 */
670#define MIU_CONTROL (0x000) 678#define MIU_TEST_AGT_BASE (0x90)
671#define MIU_TEST_AGT_CTRL (0x090) 679
672#define MIU_TEST_AGT_ADDR_LO (0x094) 680#define MIU_TEST_AGT_ADDR_LO (0x04)
673#define MIU_TEST_AGT_ADDR_HI (0x098) 681#define MIU_TEST_AGT_ADDR_HI (0x08)
674#define MIU_TEST_AGT_WRDATA_LO (0x0a0) 682#define MIU_TEST_AGT_WRDATA_LO (0x10)
675#define MIU_TEST_AGT_WRDATA_HI (0x0a4) 683#define MIU_TEST_AGT_WRDATA_HI (0x14)
676#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i))) 684#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
677#define MIU_TEST_AGT_RDDATA_LO (0x0a8) 685#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
678#define MIU_TEST_AGT_RDDATA_HI (0x0ac) 686#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
679#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i))) 687#define MIU_TEST_AGT_RDDATA_LO (0x18)
680#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 688#define MIU_TEST_AGT_RDDATA_HI (0x1c)
681#define MIU_TEST_AGT_UPPER_ADDR(off) (0) 689#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
682 690#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
683/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ 691#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
684#define MIU_TA_CTL_START 1 692
685#define MIU_TA_CTL_ENABLE 2 693#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
686#define MIU_TA_CTL_WRITE 4 694#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
687#define MIU_TA_CTL_BUSY 8 695
688 696/*
689#define SIU_TEST_AGT_CTRL (0x060) 697 * Register offsets for MS
690#define SIU_TEST_AGT_ADDR_LO (0x064) 698 */
691#define SIU_TEST_AGT_ADDR_HI (0x078) 699#define SIU_TEST_AGT_BASE (0x60)
692#define SIU_TEST_AGT_WRDATA_LO (0x068) 700
693#define SIU_TEST_AGT_WRDATA_HI (0x06c) 701#define SIU_TEST_AGT_ADDR_LO (0x04)
694#define SIU_TEST_AGT_WRDATA(i) (0x068+(4*(i))) 702#define SIU_TEST_AGT_ADDR_HI (0x18)
695#define SIU_TEST_AGT_RDDATA_LO (0x070) 703#define SIU_TEST_AGT_WRDATA_LO (0x08)
696#define SIU_TEST_AGT_RDDATA_HI (0x074) 704#define SIU_TEST_AGT_WRDATA_HI (0x0c)
697#define SIU_TEST_AGT_RDDATA(i) (0x070+(4*(i))) 705#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
698 706#define SIU_TEST_AGT_RDDATA_LO (0x10)
699#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 707#define SIU_TEST_AGT_RDDATA_HI (0x14)
700#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) 708#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
709
710#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
711#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
701 712
702/* XG Link status */ 713/* XG Link status */
703#define XG_LINK_UP 0x10 714#define XG_LINK_UP 0x10
@@ -859,6 +870,9 @@ enum {
859 (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ 870 (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
860 (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) 871 (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
861 872
873#define PCIX_OCM_WINDOW (0x10800)
874#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
875
862#define PCIX_TARGET_STATUS (0x10118) 876#define PCIX_TARGET_STATUS (0x10118)
863#define PCIX_TARGET_STATUS_F1 (0x10160) 877#define PCIX_TARGET_STATUS_F1 (0x10160)
864#define PCIX_TARGET_STATUS_F2 (0x10164) 878#define PCIX_TARGET_STATUS_F2 (0x10164)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 52a3798d8d94..e71ca30433ef 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -31,6 +31,7 @@
31#define MASK(n) ((1ULL<<(n))-1) 31#define MASK(n) ((1ULL<<(n))-1)
32#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) 32#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
33#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) 33#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
34#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
34#define MS_WIN(addr) (addr & 0x0ffc0000) 35#define MS_WIN(addr) (addr & 0x0ffc0000)
35 36
36#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) 37#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -41,6 +42,11 @@
41#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) 42#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
42#define CRB_INDIRECT_2M (0x1e0000UL) 43#define CRB_INDIRECT_2M (0x1e0000UL)
43 44
45static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
46 void __iomem *addr, u32 data);
47static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
48 void __iomem *addr);
49
44#ifndef readq 50#ifndef readq
45static inline u64 readq(void __iomem *addr) 51static inline u64 readq(void __iomem *addr)
46{ 52{
@@ -326,7 +332,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
326 if (done == 1) 332 if (done == 1)
327 break; 333 break;
328 if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) 334 if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
329 return -1; 335 return -EIO;
330 msleep(1); 336 msleep(1);
331 } 337 }
332 338
@@ -1073,89 +1079,71 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
1073 * Changes the CRB window to the specified window. 1079 * Changes the CRB window to the specified window.
1074 */ 1080 */
1075static void 1081static void
1076netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, u32 wndw) 1082netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
1083 u32 window)
1077{ 1084{
1078 void __iomem *offset; 1085 void __iomem *offset;
1079 u32 tmp; 1086 int count = 10;
1080 int count = 0; 1087 u8 func = adapter->ahw.pci_func;
1081 uint8_t func = adapter->ahw.pci_func;
1082 1088
1083 if (adapter->curr_window == wndw) 1089 if (adapter->ahw.crb_win == window)
1084 return; 1090 return;
1085 /* 1091
1086 * Move the CRB window.
1087 * We need to write to the "direct access" region of PCI
1088 * to avoid a race condition where the window register has
1089 * not been successfully written across CRB before the target
1090 * register address is received by PCI. The direct region bypasses
1091 * the CRB bus.
1092 */
1093 offset = PCI_OFFSET_SECOND_RANGE(adapter, 1092 offset = PCI_OFFSET_SECOND_RANGE(adapter,
1094 NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); 1093 NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
1095 1094
1096 if (wndw & 0x1) 1095 writel(window, offset);
1097 wndw = NETXEN_WINDOW_ONE; 1096 do {
1097 if (window == readl(offset))
1098 break;
1098 1099
1099 writel(wndw, offset); 1100 if (printk_ratelimit())
1101 dev_warn(&adapter->pdev->dev,
1102 "failed to set CRB window to %d\n",
1103 (window == NETXEN_WINDOW_ONE));
1104 udelay(1);
1100 1105
1101 /* MUST make sure window is set before we forge on... */ 1106 } while (--count > 0);
1102 while ((tmp = readl(offset)) != wndw) {
1103 printk(KERN_WARNING "%s: %s WARNING: CRB window value not "
1104 "registered properly: 0x%08x.\n",
1105 netxen_nic_driver_name, __func__, tmp);
1106 mdelay(1);
1107 if (count >= 10)
1108 break;
1109 count++;
1110 }
1111 1107
1112 if (wndw == NETXEN_WINDOW_ONE) 1108 if (count > 0)
1113 adapter->curr_window = 1; 1109 adapter->ahw.crb_win = window;
1114 else
1115 adapter->curr_window = 0;
1116} 1110}
1117 1111
1118/* 1112/*
1119 * Return -1 if off is not valid, 1113 * Returns < 0 if off is not valid,
1120 * 1 if window access is needed. 'off' is set to offset from 1114 * 1 if window access is needed. 'off' is set to offset from
1121 * CRB space in 128M pci map 1115 * CRB space in 128M pci map
1122 * 0 if no window access is needed. 'off' is set to 2M addr 1116 * 0 if no window access is needed. 'off' is set to 2M addr
1123 * In: 'off' is offset from base in 128M pci map 1117 * In: 'off' is offset from base in 128M pci map
1124 */ 1118 */
1125static int 1119static int
1126netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off) 1120netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
1121 ulong off, void __iomem **addr)
1127{ 1122{
1128 crb_128M_2M_sub_block_map_t *m; 1123 crb_128M_2M_sub_block_map_t *m;
1129 1124
1130 1125
1131 if (*off >= NETXEN_CRB_MAX) 1126 if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
1132 return -1; 1127 return -EINVAL;
1133
1134 if (*off >= NETXEN_PCI_CAMQM && (*off < NETXEN_PCI_CAMQM_2M_END)) {
1135 *off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
1136 (ulong)adapter->ahw.pci_base0;
1137 return 0;
1138 }
1139
1140 if (*off < NETXEN_PCI_CRBSPACE)
1141 return -1;
1142 1128
1143 *off -= NETXEN_PCI_CRBSPACE; 1129 off -= NETXEN_PCI_CRBSPACE;
1144 1130
1145 /* 1131 /*
1146 * Try direct map 1132 * Try direct map
1147 */ 1133 */
1148 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; 1134 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
1149 1135
1150 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { 1136 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
1151 *off = *off + m->start_2M - m->start_128M + 1137 *addr = adapter->ahw.pci_base0 + m->start_2M +
1152 (ulong)adapter->ahw.pci_base0; 1138 (off - m->start_128M);
1153 return 0; 1139 return 0;
1154 } 1140 }
1155 1141
1156 /* 1142 /*
1157 * Not in direct map, use crb window 1143 * Not in direct map, use crb window
1158 */ 1144 */
1145 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
1146 (off & MASK(16));
1159 return 1; 1147 return 1;
1160} 1148}
1161 1149
@@ -1165,24 +1153,26 @@ netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
1165 * side effect: lock crb window 1153 * side effect: lock crb window
1166 */ 1154 */
1167static void 1155static void
1168netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off) 1156netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
1169{ 1157{
1170 u32 win_read; 1158 u32 window;
1159 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
1171 1160
1172 adapter->crb_win = CRB_HI(*off); 1161 off -= NETXEN_PCI_CRBSPACE;
1173 writel(adapter->crb_win, (adapter->ahw.pci_base0 + CRB_WINDOW_2M)); 1162
1174 /* 1163 window = CRB_HI(off);
1175 * Read back value to make sure write has gone through before trying 1164
1176 * to use it. 1165 if (adapter->ahw.crb_win == window)
1177 */ 1166 return;
1178 win_read = readl(adapter->ahw.pci_base0 + CRB_WINDOW_2M); 1167
1179 if (win_read != adapter->crb_win) { 1168 writel(window, addr);
1180 printk(KERN_ERR "%s: Written crbwin (0x%x) != " 1169 if (readl(addr) != window) {
1181 "Read crbwin (0x%x), off=0x%lx\n", 1170 if (printk_ratelimit())
1182 __func__, adapter->crb_win, win_read, *off); 1171 dev_warn(&adapter->pdev->dev,
1172 "failed to set CRB window to %d off 0x%lx\n",
1173 window, off);
1183 } 1174 }
1184 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + 1175 adapter->ahw.crb_win = window;
1185 (ulong)adapter->ahw.pci_base0;
1186} 1176}
1187 1177
1188static int 1178static int
@@ -1199,16 +1189,15 @@ netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
1199 BUG_ON(!addr); 1189 BUG_ON(!addr);
1200 1190
1201 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ 1191 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
1202 read_lock(&adapter->adapter_lock); 1192 netxen_nic_io_write_128M(adapter, addr, data);
1203 writel(data, addr);
1204 read_unlock(&adapter->adapter_lock);
1205 } else { /* Window 0 */ 1193 } else { /* Window 0 */
1206 write_lock_irqsave(&adapter->adapter_lock, flags); 1194 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1207 addr = pci_base_offset(adapter, off); 1195 addr = pci_base_offset(adapter, off);
1208 netxen_nic_pci_change_crbwindow_128M(adapter, 0); 1196 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1209 writel(data, addr); 1197 writel(data, addr);
1210 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1198 netxen_nic_pci_set_crbwindow_128M(adapter,
1211 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1199 NETXEN_WINDOW_ONE);
1200 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1212 } 1201 }
1213 1202
1214 return 0; 1203 return 0;
@@ -1229,15 +1218,14 @@ netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
1229 BUG_ON(!addr); 1218 BUG_ON(!addr);
1230 1219
1231 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ 1220 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
1232 read_lock(&adapter->adapter_lock); 1221 data = netxen_nic_io_read_128M(adapter, addr);
1233 data = readl(addr);
1234 read_unlock(&adapter->adapter_lock);
1235 } else { /* Window 0 */ 1222 } else { /* Window 0 */
1236 write_lock_irqsave(&adapter->adapter_lock, flags); 1223 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1237 netxen_nic_pci_change_crbwindow_128M(adapter, 0); 1224 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1238 data = readl(addr); 1225 data = readl(addr);
1239 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1226 netxen_nic_pci_set_crbwindow_128M(adapter,
1240 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1227 NETXEN_WINDOW_ONE);
1228 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1241 } 1229 }
1242 1230
1243 return data; 1231 return data;
@@ -1248,28 +1236,30 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
1248{ 1236{
1249 unsigned long flags; 1237 unsigned long flags;
1250 int rv; 1238 int rv;
1239 void __iomem *addr = NULL;
1251 1240
1252 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off); 1241 rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
1253 1242
1254 if (rv == -1) { 1243 if (rv == 0) {
1255 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", 1244 writel(data, addr);
1256 __func__, off); 1245 return 0;
1257 dump_stack();
1258 return -1;
1259 } 1246 }
1260 1247
1261 if (rv == 1) { 1248 if (rv > 0) {
1262 write_lock_irqsave(&adapter->adapter_lock, flags); 1249 /* indirect access */
1250 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1263 crb_win_lock(adapter); 1251 crb_win_lock(adapter);
1264 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1252 netxen_nic_pci_set_crbwindow_2M(adapter, off);
1265 writel(data, (void __iomem *)off); 1253 writel(data, addr);
1266 crb_win_unlock(adapter); 1254 crb_win_unlock(adapter);
1267 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1255 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1268 } else 1256 return 0;
1269 writel(data, (void __iomem *)off); 1257 }
1270
1271 1258
1272 return 0; 1259 dev_err(&adapter->pdev->dev,
1260 "%s: invalid offset: 0x%016lx\n", __func__, off);
1261 dump_stack();
1262 return -EIO;
1273} 1263}
1274 1264
1275static u32 1265static u32
@@ -1278,102 +1268,37 @@ netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
1278 unsigned long flags; 1268 unsigned long flags;
1279 int rv; 1269 int rv;
1280 u32 data; 1270 u32 data;
1271 void __iomem *addr = NULL;
1281 1272
1282 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off); 1273 rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
1283 1274
1284 if (rv == -1) { 1275 if (rv == 0)
1285 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", 1276 return readl(addr);
1286 __func__, off);
1287 dump_stack();
1288 return -1;
1289 }
1290 1277
1291 if (rv == 1) { 1278 if (rv > 0) {
1292 write_lock_irqsave(&adapter->adapter_lock, flags); 1279 /* indirect access */
1280 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1293 crb_win_lock(adapter); 1281 crb_win_lock(adapter);
1294 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1282 netxen_nic_pci_set_crbwindow_2M(adapter, off);
1295 data = readl((void __iomem *)off); 1283 data = readl(addr);
1296 crb_win_unlock(adapter); 1284 crb_win_unlock(adapter);
1297 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1285 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1298 } else 1286 return data;
1299 data = readl((void __iomem *)off);
1300
1301 return data;
1302}
1303
1304static int netxen_pci_set_window_warning_count;
1305
1306static unsigned long
1307netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
1308 unsigned long long addr)
1309{
1310 void __iomem *offset;
1311 int window;
1312 unsigned long long qdr_max;
1313 uint8_t func = adapter->ahw.pci_func;
1314
1315 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1316 qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2;
1317 } else {
1318 qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3;
1319 } 1287 }
1320 1288
1321 if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1289 dev_err(&adapter->pdev->dev,
1322 /* DDR network side */ 1290 "%s: invalid offset: 0x%016lx\n", __func__, off);
1323 addr -= NETXEN_ADDR_DDR_NET; 1291 dump_stack();
1324 window = (addr >> 25) & 0x3ff; 1292 return -1;
1325 if (adapter->ahw.ddr_mn_window != window) {
1326 adapter->ahw.ddr_mn_window = window;
1327 offset = PCI_OFFSET_SECOND_RANGE(adapter,
1328 NETXEN_PCIX_PH_REG(PCIE_MN_WINDOW_REG(func)));
1329 writel(window, offset);
1330 /* MUST make sure window is set before we forge on... */
1331 readl(offset);
1332 }
1333 addr -= (window * NETXEN_WINDOW_ONE);
1334 addr += NETXEN_PCI_DDR_NET;
1335 } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
1336 addr -= NETXEN_ADDR_OCM0;
1337 addr += NETXEN_PCI_OCM0;
1338 } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1339 addr -= NETXEN_ADDR_OCM1;
1340 addr += NETXEN_PCI_OCM1;
1341 } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) {
1342 /* QDR network side */
1343 addr -= NETXEN_ADDR_QDR_NET;
1344 window = (addr >> 22) & 0x3f;
1345 if (adapter->ahw.qdr_sn_window != window) {
1346 adapter->ahw.qdr_sn_window = window;
1347 offset = PCI_OFFSET_SECOND_RANGE(adapter,
1348 NETXEN_PCIX_PH_REG(PCIE_SN_WINDOW_REG(func)));
1349 writel((window << 22), offset);
1350 /* MUST make sure window is set before we forge on... */
1351 readl(offset);
1352 }
1353 addr -= (window * 0x400000);
1354 addr += NETXEN_PCI_QDR_NET;
1355 } else {
1356 /*
1357 * peg gdb frequently accesses memory that doesn't exist,
1358 * this limits the chit chat so debugging isn't slowed down.
1359 */
1360 if ((netxen_pci_set_window_warning_count++ < 8)
1361 || (netxen_pci_set_window_warning_count % 64 == 0))
1362 printk("%s: Warning:netxen_nic_pci_set_window()"
1363 " Unknown address range!\n",
1364 netxen_nic_driver_name);
1365 addr = -1UL;
1366 }
1367 return addr;
1368} 1293}
1369 1294
1370/* window 1 registers only */ 1295/* window 1 registers only */
1371static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, 1296static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
1372 void __iomem *addr, u32 data) 1297 void __iomem *addr, u32 data)
1373{ 1298{
1374 read_lock(&adapter->adapter_lock); 1299 read_lock(&adapter->ahw.crb_lock);
1375 writel(data, addr); 1300 writel(data, addr);
1376 read_unlock(&adapter->adapter_lock); 1301 read_unlock(&adapter->ahw.crb_lock);
1377} 1302}
1378 1303
1379static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, 1304static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
@@ -1381,9 +1306,9 @@ static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
1381{ 1306{
1382 u32 val; 1307 u32 val;
1383 1308
1384 read_lock(&adapter->adapter_lock); 1309 read_lock(&adapter->ahw.crb_lock);
1385 val = readl(addr); 1310 val = readl(addr);
1386 read_unlock(&adapter->adapter_lock); 1311 read_unlock(&adapter->ahw.crb_lock);
1387 1312
1388 return val; 1313 return val;
1389} 1314}
@@ -1403,488 +1328,437 @@ static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
1403void __iomem * 1328void __iomem *
1404netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) 1329netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
1405{ 1330{
1406 ulong off = offset; 1331 void __iomem *addr = NULL;
1407 1332
1408 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1333 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1409 if (offset < NETXEN_CRB_PCIX_HOST2 && 1334 if ((offset < NETXEN_CRB_PCIX_HOST2) &&
1410 offset > NETXEN_CRB_PCIX_HOST) 1335 (offset > NETXEN_CRB_PCIX_HOST))
1411 return PCI_OFFSET_SECOND_RANGE(adapter, offset); 1336 addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
1412 return NETXEN_CRB_NORMALIZE(adapter, offset); 1337 else
1338 addr = NETXEN_CRB_NORMALIZE(adapter, offset);
1339 } else {
1340 WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
1341 offset, &addr));
1413 } 1342 }
1414 1343
1415 BUG_ON(netxen_nic_pci_get_crb_addr_2M(adapter, &off)); 1344 return addr;
1416 return (void __iomem *)off;
1417} 1345}
1418 1346
1419static unsigned long 1347static int
1420netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, 1348netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
1421 unsigned long long addr) 1349 u64 addr, u32 *start)
1422{ 1350{
1423 int window; 1351 if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
1424 u32 win_read; 1352 *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0);
1425 1353 return 0;
1426 if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1427 /* DDR network side */
1428 window = MN_WIN(addr);
1429 adapter->ahw.ddr_mn_window = window;
1430 NXWR32(adapter, adapter->ahw.mn_win_crb, window);
1431 win_read = NXRD32(adapter, adapter->ahw.mn_win_crb);
1432 if ((win_read << 17) != window) {
1433 printk(KERN_INFO "Written MNwin (0x%x) != "
1434 "Read MNwin (0x%x)\n", window, win_read);
1435 }
1436 addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_DDR_NET;
1437 } else if (ADDR_IN_RANGE(addr, 1354 } else if (ADDR_IN_RANGE(addr,
1438 NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { 1355 NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1439 if ((addr & 0x00ff800) == 0xff800) { 1356 *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
1440 printk("%s: QM access not handled.\n", __func__); 1357 return 0;
1441 addr = -1UL; 1358 }
1442 } 1359
1360 return -EIO;
1361}
1362
1363static int
1364netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1365 u64 addr, u32 *start)
1366{
1367 u32 window;
1368 struct pci_dev *pdev = adapter->pdev;
1369
1370 if ((addr & 0x00ff800) == 0xff800) {
1371 if (printk_ratelimit())
1372 dev_warn(&pdev->dev, "QM access not handled\n");
1373 return -EIO;
1374 }
1443 1375
1376 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
1377 window = OCM_WIN_P3P(addr);
1378 else
1444 window = OCM_WIN(addr); 1379 window = OCM_WIN(addr);
1445 adapter->ahw.ddr_mn_window = window;
1446 NXWR32(adapter, adapter->ahw.mn_win_crb, window);
1447 win_read = NXRD32(adapter, adapter->ahw.mn_win_crb);
1448 if ((win_read >> 7) != window) {
1449 printk(KERN_INFO "%s: Written OCMwin (0x%x) != "
1450 "Read OCMwin (0x%x)\n",
1451 __func__, window, win_read);
1452 }
1453 addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_OCM0_2M;
1454 1380
1455 } else if (ADDR_IN_RANGE(addr, 1381 writel(window, adapter->ahw.ocm_win_crb);
1456 NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { 1382 /* read back to flush */
1457 /* QDR network side */ 1383 readl(adapter->ahw.ocm_win_crb);
1458 window = MS_WIN(addr);
1459 adapter->ahw.qdr_sn_window = window;
1460 NXWR32(adapter, adapter->ahw.ms_win_crb, window);
1461 win_read = NXRD32(adapter, adapter->ahw.ms_win_crb);
1462 if (win_read != window) {
1463 printk(KERN_INFO "%s: Written MSwin (0x%x) != "
1464 "Read MSwin (0x%x)\n",
1465 __func__, window, win_read);
1466 }
1467 addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_QDR_NET;
1468 1384
1469 } else { 1385 adapter->ahw.ocm_win = window;
1470 /* 1386 *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1471 * peg gdb frequently accesses memory that doesn't exist, 1387 return 0;
1472 * this limits the chit chat so debugging isn't slowed down.
1473 */
1474 if ((netxen_pci_set_window_warning_count++ < 8)
1475 || (netxen_pci_set_window_warning_count%64 == 0)) {
1476 printk("%s: Warning:%s Unknown address range!\n",
1477 __func__, netxen_nic_driver_name);
1478} 1388}
1479 addr = -1UL; 1389
1390static int
1391netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
1392 u64 *data, int op)
1393{
1394 void __iomem *addr, *mem_ptr = NULL;
1395 resource_size_t mem_base;
1396 int ret = -EIO;
1397 u32 start;
1398
1399 spin_lock(&adapter->ahw.mem_lock);
1400
1401 ret = adapter->pci_set_window(adapter, off, &start);
1402 if (ret != 0)
1403 goto unlock;
1404
1405 addr = pci_base_offset(adapter, start);
1406 if (addr)
1407 goto noremap;
1408
1409 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
1410
1411 mem_ptr = ioremap(mem_base, PAGE_SIZE);
1412 if (mem_ptr == NULL) {
1413 ret = -EIO;
1414 goto unlock;
1480 } 1415 }
1481 return addr; 1416
1417 addr = mem_ptr + (start & (PAGE_SIZE - 1));
1418
1419noremap:
1420 if (op == 0) /* read */
1421 *data = readq(addr);
1422 else /* write */
1423 writeq(*data, addr);
1424
1425unlock:
1426 spin_unlock(&adapter->ahw.mem_lock);
1427
1428 if (mem_ptr)
1429 iounmap(mem_ptr);
1430 return ret;
1482} 1431}
1483 1432
1484#define MAX_CTL_CHECK 1000 1433#define MAX_CTL_CHECK 1000
1485 1434
1486static int 1435static int
1487netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, 1436netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
1488 u64 off, void *data, int size) 1437 u64 off, u64 data)
1489{ 1438{
1490 unsigned long flags; 1439 int j, ret;
1491 int i, j, ret = 0, loop, sz[2], off0; 1440 u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
1492 uint32_t temp;
1493 uint64_t off8, tmpw, word[2] = {0, 0};
1494 void __iomem *mem_crb; 1441 void __iomem *mem_crb;
1495 1442
1496 if (size != 8) 1443 /* Only 64-bit aligned access */
1444 if (off & 7)
1497 return -EIO; 1445 return -EIO;
1498 1446
1447 /* P2 has different SIU and MIU test agent base addr */
1499 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1448 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1500 NETXEN_ADDR_QDR_NET_MAX_P2)) { 1449 NETXEN_ADDR_QDR_NET_MAX_P2)) {
1501 mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET); 1450 mem_crb = pci_base_offset(adapter,
1451 NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
1452 addr_hi = SIU_TEST_AGT_ADDR_HI;
1453 data_lo = SIU_TEST_AGT_WRDATA_LO;
1454 data_hi = SIU_TEST_AGT_WRDATA_HI;
1455 off_lo = off & SIU_TEST_AGT_ADDR_MASK;
1456 off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
1502 goto correct; 1457 goto correct;
1503 } 1458 }
1504 1459
1505 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1460 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1506 mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET); 1461 mem_crb = pci_base_offset(adapter,
1462 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1463 addr_hi = MIU_TEST_AGT_ADDR_HI;
1464 data_lo = MIU_TEST_AGT_WRDATA_LO;
1465 data_hi = MIU_TEST_AGT_WRDATA_HI;
1466 off_lo = off & MIU_TEST_AGT_ADDR_MASK;
1467 off_hi = 0;
1507 goto correct; 1468 goto correct;
1508 } 1469 }
1509 1470
1510 return -EIO; 1471 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
1511 1472 ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1512correct: 1473 if (adapter->ahw.pci_len0 != 0) {
1513 off8 = off & 0xfffffff8; 1474 return netxen_nic_pci_mem_access_direct(adapter,
1514 off0 = off & 0x7; 1475 off, &data, 1);
1515 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1516 sz[1] = size - sz[0];
1517 loop = ((off0 + size - 1) >> 3) + 1;
1518
1519 if ((size != 8) || (off0 != 0)) {
1520 for (i = 0; i < loop; i++) {
1521 if (adapter->pci_mem_read(adapter,
1522 off8 + (i << 3), &word[i], 8))
1523 return -1;
1524 } 1476 }
1525 } 1477 }
1526 1478
1527 switch (size) { 1479 return -EIO;
1528 case 1:
1529 tmpw = *((uint8_t *)data);
1530 break;
1531 case 2:
1532 tmpw = *((uint16_t *)data);
1533 break;
1534 case 4:
1535 tmpw = *((uint32_t *)data);
1536 break;
1537 case 8:
1538 default:
1539 tmpw = *((uint64_t *)data);
1540 break;
1541 }
1542 word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1543 word[0] |= tmpw << (off0 * 8);
1544 1480
1545 if (loop == 2) { 1481correct:
1546 word[1] &= ~(~0ULL << (sz[1] * 8)); 1482 spin_lock(&adapter->ahw.mem_lock);
1547 word[1] |= tmpw >> (sz[0] * 8); 1483 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1484
1485 writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1486 writel(off_hi, (mem_crb + addr_hi));
1487 writel(data & 0xffffffff, (mem_crb + data_lo));
1488 writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
1489 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1490 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1491 (mem_crb + TEST_AGT_CTRL));
1492
1493 for (j = 0; j < MAX_CTL_CHECK; j++) {
1494 temp = readl((mem_crb + TEST_AGT_CTRL));
1495 if ((temp & TA_CTL_BUSY) == 0)
1496 break;
1548 } 1497 }
1549 1498
1550 write_lock_irqsave(&adapter->adapter_lock, flags); 1499 if (j >= MAX_CTL_CHECK) {
1551 netxen_nic_pci_change_crbwindow_128M(adapter, 0); 1500 if (printk_ratelimit())
1552 1501 dev_err(&adapter->pdev->dev,
1553 for (i = 0; i < loop; i++) {
1554 writel((uint32_t)(off8 + (i << 3)),
1555 (mem_crb+MIU_TEST_AGT_ADDR_LO));
1556 writel(0,
1557 (mem_crb+MIU_TEST_AGT_ADDR_HI));
1558 writel(word[i] & 0xffffffff,
1559 (mem_crb+MIU_TEST_AGT_WRDATA_LO));
1560 writel((word[i] >> 32) & 0xffffffff,
1561 (mem_crb+MIU_TEST_AGT_WRDATA_HI));
1562 writel(MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
1563 (mem_crb+MIU_TEST_AGT_CTRL));
1564 writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
1565 (mem_crb+MIU_TEST_AGT_CTRL));
1566
1567 for (j = 0; j < MAX_CTL_CHECK; j++) {
1568 temp = readl(
1569 (mem_crb+MIU_TEST_AGT_CTRL));
1570 if ((temp & MIU_TA_CTL_BUSY) == 0)
1571 break;
1572 }
1573
1574 if (j >= MAX_CTL_CHECK) {
1575 if (printk_ratelimit())
1576 dev_err(&adapter->pdev->dev,
1577 "failed to write through agent\n"); 1502 "failed to write through agent\n");
1578 ret = -1; 1503 ret = -EIO;
1579 break; 1504 } else
1580 } 1505 ret = 0;
1581 }
1582 1506
1583 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1507 netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
1584 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1508 spin_unlock(&adapter->ahw.mem_lock);
1585 return ret; 1509 return ret;
1586} 1510}
1587 1511
1588static int 1512static int
1589netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, 1513netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
1590 u64 off, void *data, int size) 1514 u64 off, u64 *data)
1591{ 1515{
1592 unsigned long flags; 1516 int j, ret;
1593 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1517 u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
1594 uint32_t temp; 1518 u64 val;
1595 uint64_t off8, val, word[2] = {0, 0};
1596 void __iomem *mem_crb; 1519 void __iomem *mem_crb;
1597 1520
1598 if (size != 8) 1521 /* Only 64-bit aligned access */
1522 if (off & 7)
1599 return -EIO; 1523 return -EIO;
1600 1524
1525 /* P2 has different SIU and MIU test agent base addr */
1601 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1526 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1602 NETXEN_ADDR_QDR_NET_MAX_P2)) { 1527 NETXEN_ADDR_QDR_NET_MAX_P2)) {
1603 mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET); 1528 mem_crb = pci_base_offset(adapter,
1529 NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
1530 addr_hi = SIU_TEST_AGT_ADDR_HI;
1531 data_lo = SIU_TEST_AGT_RDDATA_LO;
1532 data_hi = SIU_TEST_AGT_RDDATA_HI;
1533 off_lo = off & SIU_TEST_AGT_ADDR_MASK;
1534 off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
1604 goto correct; 1535 goto correct;
1605 } 1536 }
1606 1537
1607 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1538 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1608 mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET); 1539 mem_crb = pci_base_offset(adapter,
1540 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1541 addr_hi = MIU_TEST_AGT_ADDR_HI;
1542 data_lo = MIU_TEST_AGT_RDDATA_LO;
1543 data_hi = MIU_TEST_AGT_RDDATA_HI;
1544 off_lo = off & MIU_TEST_AGT_ADDR_MASK;
1545 off_hi = 0;
1609 goto correct; 1546 goto correct;
1610 } 1547 }
1611 1548
1549 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
1550 ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1551 if (adapter->ahw.pci_len0 != 0) {
1552 return netxen_nic_pci_mem_access_direct(adapter,
1553 off, data, 0);
1554 }
1555 }
1556
1612 return -EIO; 1557 return -EIO;
1613 1558
1614correct: 1559correct:
1615 off8 = off & 0xfffffff8; 1560 spin_lock(&adapter->ahw.mem_lock);
1616 off0[0] = off & 0x7; 1561 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1617 off0[1] = 0;
1618 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
1619 sz[1] = size - sz[0];
1620 loop = ((off0[0] + size - 1) >> 3) + 1;
1621
1622 write_lock_irqsave(&adapter->adapter_lock, flags);
1623 netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1624
1625 for (i = 0; i < loop; i++) {
1626 writel((uint32_t)(off8 + (i << 3)),
1627 (mem_crb+MIU_TEST_AGT_ADDR_LO));
1628 writel(0,
1629 (mem_crb+MIU_TEST_AGT_ADDR_HI));
1630 writel(MIU_TA_CTL_ENABLE,
1631 (mem_crb+MIU_TEST_AGT_CTRL));
1632 writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE,
1633 (mem_crb+MIU_TEST_AGT_CTRL));
1634 1562
1635 for (j = 0; j < MAX_CTL_CHECK; j++) { 1563 writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1636 temp = readl( 1564 writel(off_hi, (mem_crb + addr_hi));
1637 (mem_crb+MIU_TEST_AGT_CTRL)); 1565 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1638 if ((temp & MIU_TA_CTL_BUSY) == 0) 1566 writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1639 break;
1640 }
1641 1567
1642 if (j >= MAX_CTL_CHECK) { 1568 for (j = 0; j < MAX_CTL_CHECK; j++) {
1643 if (printk_ratelimit()) 1569 temp = readl(mem_crb + TEST_AGT_CTRL);
1644 dev_err(&adapter->pdev->dev, 1570 if ((temp & TA_CTL_BUSY) == 0)
1645 "failed to read through agent\n");
1646 break; 1571 break;
1647 }
1648
1649 start = off0[i] >> 2;
1650 end = (off0[i] + sz[i] - 1) >> 2;
1651 for (k = start; k <= end; k++) {
1652 word[i] |= ((uint64_t) readl(
1653 (mem_crb +
1654 MIU_TEST_AGT_RDDATA(k))) << (32*k));
1655 }
1656 } 1572 }
1657 1573
1658 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1574 if (j >= MAX_CTL_CHECK) {
1659 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1575 if (printk_ratelimit())
1660 1576 dev_err(&adapter->pdev->dev,
1661 if (j >= MAX_CTL_CHECK) 1577 "failed to read through agent\n");
1662 return -1; 1578 ret = -EIO;
1663
1664 if (sz[0] == 8) {
1665 val = word[0];
1666 } else { 1579 } else {
1667 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1668 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1669 }
1670 1580
1671 switch (size) { 1581 temp = readl(mem_crb + data_hi);
1672 case 1: 1582 val = ((u64)temp << 32);
1673 *(uint8_t *)data = val; 1583 val |= readl(mem_crb + data_lo);
1674 break; 1584 *data = val;
1675 case 2: 1585 ret = 0;
1676 *(uint16_t *)data = val;
1677 break;
1678 case 4:
1679 *(uint32_t *)data = val;
1680 break;
1681 case 8:
1682 *(uint64_t *)data = val;
1683 break;
1684 } 1586 }
1685 return 0; 1587
1588 netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
1589 spin_unlock(&adapter->ahw.mem_lock);
1590
1591 return ret;
1686} 1592}
1687 1593
1688static int 1594static int
1689netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, 1595netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
1690 u64 off, void *data, int size) 1596 u64 off, u64 data)
1691{ 1597{
1692 int i, j, ret = 0, loop, sz[2], off0; 1598 int i, j, ret;
1693 uint32_t temp; 1599 u32 temp, off8;
1694 uint64_t off8, tmpw, word[2] = {0, 0}; 1600 u64 stride;
1695 void __iomem *mem_crb; 1601 void __iomem *mem_crb;
1696 1602
1697 if (size != 8) 1603 /* Only 64-bit aligned access */
1604 if (off & 7)
1698 return -EIO; 1605 return -EIO;
1699 1606
1607 /* P3 onward, test agent base for MIU and SIU is same */
1700 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1608 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1701 NETXEN_ADDR_QDR_NET_MAX_P3)) { 1609 NETXEN_ADDR_QDR_NET_MAX_P3)) {
1702 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET); 1610 mem_crb = netxen_get_ioaddr(adapter,
1611 NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1703 goto correct; 1612 goto correct;
1704 } 1613 }
1705 1614
1706 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1615 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1707 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET); 1616 mem_crb = netxen_get_ioaddr(adapter,
1617 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1708 goto correct; 1618 goto correct;
1709 } 1619 }
1710 1620
1621 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
1622 return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
1623
1711 return -EIO; 1624 return -EIO;
1712 1625
1713correct: 1626correct:
1714 off8 = off & 0xfffffff8; 1627 stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1715 off0 = off & 0x7;
1716 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1717 sz[1] = size - sz[0];
1718 loop = ((off0 + size - 1) >> 3) + 1;
1719
1720 if ((size != 8) || (off0 != 0)) {
1721 for (i = 0; i < loop; i++) {
1722 if (adapter->pci_mem_read(adapter,
1723 off8 + (i << 3), &word[i], 8))
1724 return -1;
1725 }
1726 }
1727 1628
1728 switch (size) { 1629 off8 = off & ~(stride-1);
1729 case 1:
1730 tmpw = *((uint8_t *)data);
1731 break;
1732 case 2:
1733 tmpw = *((uint16_t *)data);
1734 break;
1735 case 4:
1736 tmpw = *((uint32_t *)data);
1737 break;
1738 case 8:
1739 default:
1740 tmpw = *((uint64_t *)data);
1741 break;
1742 }
1743 1630
1744 word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); 1631 spin_lock(&adapter->ahw.mem_lock);
1745 word[0] |= tmpw << (off0 * 8);
1746 1632
1747 if (loop == 2) { 1633 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1748 word[1] &= ~(~0ULL << (sz[1] * 8)); 1634 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1749 word[1] |= tmpw >> (sz[0] * 8);
1750 }
1751
1752 /*
1753 * don't lock here - write_wx gets the lock if each time
1754 * write_lock_irqsave(&adapter->adapter_lock, flags);
1755 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1756 */
1757 1635
1758 for (i = 0; i < loop; i++) { 1636 i = 0;
1759 writel(off8 + (i << 3), mem_crb+MIU_TEST_AGT_ADDR_LO); 1637 if (stride == 16) {
1760 writel(0, mem_crb+MIU_TEST_AGT_ADDR_HI); 1638 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1761 writel(word[i] & 0xffffffff, mem_crb+MIU_TEST_AGT_WRDATA_LO); 1639 writel((TA_CTL_START | TA_CTL_ENABLE),
1762 writel((word[i] >> 32) & 0xffffffff, 1640 (mem_crb + TEST_AGT_CTRL));
1763 mem_crb+MIU_TEST_AGT_WRDATA_HI);
1764 writel((MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE),
1765 mem_crb+MIU_TEST_AGT_CTRL);
1766 writel(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE,
1767 mem_crb+MIU_TEST_AGT_CTRL);
1768 1641
1769 for (j = 0; j < MAX_CTL_CHECK; j++) { 1642 for (j = 0; j < MAX_CTL_CHECK; j++) {
1770 temp = readl(mem_crb + MIU_TEST_AGT_CTRL); 1643 temp = readl(mem_crb + TEST_AGT_CTRL);
1771 if ((temp & MIU_TA_CTL_BUSY) == 0) 1644 if ((temp & TA_CTL_BUSY) == 0)
1772 break; 1645 break;
1773 } 1646 }
1774 1647
1775 if (j >= MAX_CTL_CHECK) { 1648 if (j >= MAX_CTL_CHECK) {
1776 if (printk_ratelimit()) 1649 ret = -EIO;
1777 dev_err(&adapter->pdev->dev, 1650 goto done;
1778 "failed to write through agent\n");
1779 ret = -1;
1780 break;
1781 } 1651 }
1652
1653 i = (off & 0xf) ? 0 : 2;
1654 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1655 mem_crb + MIU_TEST_AGT_WRDATA(i));
1656 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1657 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1658 i = (off & 0xf) ? 2 : 0;
1782 } 1659 }
1783 1660
1784 /* 1661 writel(data & 0xffffffff,
1785 * netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1662 mem_crb + MIU_TEST_AGT_WRDATA(i));
1786 * write_unlock_irqrestore(&adapter->adapter_lock, flags); 1663 writel((data >> 32) & 0xffffffff,
1787 */ 1664 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1665
1666 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1667 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1668 (mem_crb + TEST_AGT_CTRL));
1669
1670 for (j = 0; j < MAX_CTL_CHECK; j++) {
1671 temp = readl(mem_crb + TEST_AGT_CTRL);
1672 if ((temp & TA_CTL_BUSY) == 0)
1673 break;
1674 }
1675
1676 if (j >= MAX_CTL_CHECK) {
1677 if (printk_ratelimit())
1678 dev_err(&adapter->pdev->dev,
1679 "failed to write through agent\n");
1680 ret = -EIO;
1681 } else
1682 ret = 0;
1683
1684done:
1685 spin_unlock(&adapter->ahw.mem_lock);
1686
1788 return ret; 1687 return ret;
1789} 1688}
1790 1689
1791static int 1690static int
1792netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, 1691netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
1793 u64 off, void *data, int size) 1692 u64 off, u64 *data)
1794{ 1693{
1795 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1694 int j, ret;
1796 uint32_t temp; 1695 u32 temp, off8;
1797 uint64_t off8, val, word[2] = {0, 0}; 1696 u64 val, stride;
1798 void __iomem *mem_crb; 1697 void __iomem *mem_crb;
1799 1698
1800 if (size != 8) 1699 /* Only 64-bit aligned access */
1700 if (off & 7)
1801 return -EIO; 1701 return -EIO;
1802 1702
1703 /* P3 onward, test agent base for MIU and SIU is same */
1803 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1704 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1804 NETXEN_ADDR_QDR_NET_MAX_P3)) { 1705 NETXEN_ADDR_QDR_NET_MAX_P3)) {
1805 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET); 1706 mem_crb = netxen_get_ioaddr(adapter,
1707 NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1806 goto correct; 1708 goto correct;
1807 } 1709 }
1808 1710
1809 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1711 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1810 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET); 1712 mem_crb = netxen_get_ioaddr(adapter,
1713 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1811 goto correct; 1714 goto correct;
1812 } 1715 }
1813 1716
1717 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
1718 return netxen_nic_pci_mem_access_direct(adapter,
1719 off, data, 0);
1720 }
1721
1814 return -EIO; 1722 return -EIO;
1815 1723
1816correct: 1724correct:
1817 off8 = off & 0xfffffff8; 1725 stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1818 off0[0] = off & 0x7;
1819 off0[1] = 0;
1820 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
1821 sz[1] = size - sz[0];
1822 loop = ((off0[0] + size - 1) >> 3) + 1;
1823 1726
1824 /* 1727 off8 = off & ~(stride-1);
1825 * don't lock here - write_wx gets the lock if each time
1826 * write_lock_irqsave(&adapter->adapter_lock, flags);
1827 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1828 */
1829 1728
1830 for (i = 0; i < loop; i++) { 1729 spin_lock(&adapter->ahw.mem_lock);
1831 writel(off8 + (i << 3), mem_crb + MIU_TEST_AGT_ADDR_LO);
1832 writel(0, mem_crb + MIU_TEST_AGT_ADDR_HI);
1833 writel(MIU_TA_CTL_ENABLE, mem_crb + MIU_TEST_AGT_CTRL);
1834 writel(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE,
1835 mem_crb + MIU_TEST_AGT_CTRL);
1836 1730
1837 for (j = 0; j < MAX_CTL_CHECK; j++) { 1731 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1838 temp = readl(mem_crb + MIU_TEST_AGT_CTRL); 1732 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1839 if ((temp & MIU_TA_CTL_BUSY) == 0) 1733 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1840 break; 1734 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1841 }
1842 1735
1843 if (j >= MAX_CTL_CHECK) { 1736 for (j = 0; j < MAX_CTL_CHECK; j++) {
1844 if (printk_ratelimit()) 1737 temp = readl(mem_crb + TEST_AGT_CTRL);
1845 dev_err(&adapter->pdev->dev, 1738 if ((temp & TA_CTL_BUSY) == 0)
1846 "failed to read through agent\n");
1847 break; 1739 break;
1848 }
1849
1850 start = off0[i] >> 2;
1851 end = (off0[i] + sz[i] - 1) >> 2;
1852 for (k = start; k <= end; k++) {
1853 temp = readl(mem_crb + MIU_TEST_AGT_RDDATA(k));
1854 word[i] |= ((uint64_t)temp << (32 * k));
1855 }
1856 } 1740 }
1857 1741
1858 /* 1742 if (j >= MAX_CTL_CHECK) {
1859 * netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1743 if (printk_ratelimit())
1860 * write_unlock_irqrestore(&adapter->adapter_lock, flags); 1744 dev_err(&adapter->pdev->dev,
1861 */ 1745 "failed to read through agent\n");
1862 1746 ret = -EIO;
1863 if (j >= MAX_CTL_CHECK)
1864 return -1;
1865
1866 if (sz[0] == 8) {
1867 val = word[0];
1868 } else { 1747 } else {
1869 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | 1748 off8 = MIU_TEST_AGT_RDDATA_LO;
1870 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); 1749 if ((stride == 16) && (off & 0xf))
1871 } 1750 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1872 1751
1873 switch (size) { 1752 temp = readl(mem_crb + off8 + 4);
1874 case 1: 1753 val = (u64)temp << 32;
1875 *(uint8_t *)data = val; 1754 val |= readl(mem_crb + off8);
1876 break; 1755 *data = val;
1877 case 2: 1756 ret = 0;
1878 *(uint16_t *)data = val;
1879 break;
1880 case 4:
1881 *(uint32_t *)data = val;
1882 break;
1883 case 8:
1884 *(uint64_t *)data = val;
1885 break;
1886 } 1757 }
1887 return 0; 1758
1759 spin_unlock(&adapter->ahw.mem_lock);
1760
1761 return ret;
1888} 1762}
1889 1763
1890void 1764void
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 8a0904368e08..6ee27a630d89 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -46,6 +46,7 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
46static void 46static void
47netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 47netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
48 struct nx_host_rds_ring *rds_ring); 48 struct nx_host_rds_ring *rds_ring);
49static int netxen_p3_has_mn(struct netxen_adapter *adapter);
49 50
50static void crb_addr_transform_setup(void) 51static void crb_addr_transform_setup(void)
51{ 52{
@@ -437,7 +438,7 @@ int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
437#define NETXEN_BOARDNUM 0x400c 438#define NETXEN_BOARDNUM 0x400c
438#define NETXEN_CHIPNUM 0x4010 439#define NETXEN_CHIPNUM 0x4010
439 440
440int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) 441int netxen_pinit_from_rom(struct netxen_adapter *adapter)
441{ 442{
442 int addr, val; 443 int addr, val;
443 int i, n, init_delay = 0; 444 int i, n, init_delay = 0;
@@ -450,21 +451,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
450 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); 451 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
451 netxen_rom_unlock(adapter); 452 netxen_rom_unlock(adapter);
452 453
453 if (verbose) {
454 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
455 printk("P2 ROM board type: 0x%08x\n", val);
456 else
457 printk("Could not read board type\n");
458 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
459 printk("P2 ROM board num: 0x%08x\n", val);
460 else
461 printk("Could not read board number\n");
462 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
463 printk("P2 ROM chip num: 0x%08x\n", val);
464 else
465 printk("Could not read chip number\n");
466 }
467
468 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 454 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
469 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 455 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
470 (n != 0xcafecafe) || 456 (n != 0xcafecafe) ||
@@ -486,11 +472,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
486 n &= ~0x80000000; 472 n &= ~0x80000000;
487 } 473 }
488 474
489 if (n < 1024) { 475 if (n >= 1024) {
490 if (verbose)
491 printk(KERN_DEBUG "%s: %d CRB init values found"
492 " in ROM.\n", netxen_nic_driver_name, n);
493 } else {
494 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" 476 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
495 " initialized.\n", __func__, n); 477 " initialized.\n", __func__, n);
496 return -EIO; 478 return -EIO;
@@ -502,6 +484,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
502 netxen_nic_driver_name); 484 netxen_nic_driver_name);
503 return -ENOMEM; 485 return -ENOMEM;
504 } 486 }
487
505 for (i = 0; i < n; i++) { 488 for (i = 0; i < n; i++) {
506 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || 489 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
507 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { 490 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
@@ -512,11 +495,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
512 buf[i].addr = addr; 495 buf[i].addr = addr;
513 buf[i].data = val; 496 buf[i].data = val;
514 497
515 if (verbose)
516 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
517 netxen_nic_driver_name,
518 (u32)netxen_decode_crb_addr(addr), val);
519 } 498 }
499
520 for (i = 0; i < n; i++) { 500 for (i = 0; i < n; i++) {
521 501
522 off = netxen_decode_crb_addr(buf[i].addr); 502 off = netxen_decode_crb_addr(buf[i].addr);
@@ -526,6 +506,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
526 continue; 506 continue;
527 } 507 }
528 off += NETXEN_PCI_CRBSPACE; 508 off += NETXEN_PCI_CRBSPACE;
509
510 if (off & 1)
511 continue;
512
529 /* skipping cold reboot MAGIC */ 513 /* skipping cold reboot MAGIC */
530 if (off == NETXEN_CAM_RAM(0x1fc)) 514 if (off == NETXEN_CAM_RAM(0x1fc))
531 continue; 515 continue;
@@ -544,9 +528,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
544 continue; 528 continue;
545 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ 529 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
546 continue; 530 continue;
547 if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET) 531 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
548 continue; 532 !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
549 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
550 buf[i].data = 0x1020; 533 buf[i].data = 0x1020;
551 /* skip the function enable register */ 534 /* skip the function enable register */
552 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) 535 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
@@ -607,6 +590,172 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
607 return 0; 590 return 0;
608} 591}
609 592
593static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
594{
595 uint32_t i;
596 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
597 __le32 entries = cpu_to_le32(directory->num_entries);
598
599 for (i = 0; i < entries; i++) {
600
601 __le32 offs = cpu_to_le32(directory->findex) +
602 (i * cpu_to_le32(directory->entry_size));
603 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
604
605 if (tab_type == section)
606 return (struct uni_table_desc *) &unirom[offs];
607 }
608
609 return NULL;
610}
611
612static int
613nx_set_product_offs(struct netxen_adapter *adapter)
614{
615 struct uni_table_desc *ptab_descr;
616 const u8 *unirom = adapter->fw->data;
617 uint32_t i;
618 __le32 entries;
619
620 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
621 if (ptab_descr == NULL)
622 return -1;
623
624 entries = cpu_to_le32(ptab_descr->num_entries);
625
626 for (i = 0; i < entries; i++) {
627
628 __le32 flags, file_chiprev, offs;
629 u8 chiprev = adapter->ahw.revision_id;
630 int mn_present = netxen_p3_has_mn(adapter);
631 uint32_t flagbit;
632
633 offs = cpu_to_le32(ptab_descr->findex) +
634 (i * cpu_to_le32(ptab_descr->entry_size));
635 flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
636 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
637 NX_UNI_CHIP_REV_OFF));
638
639 flagbit = mn_present ? 1 : 2;
640
641 if ((chiprev == file_chiprev) &&
642 ((1ULL << flagbit) & flags)) {
643 adapter->file_prd_off = offs;
644 return 0;
645 }
646 }
647
648 return -1;
649}
650
651
652static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
653 u32 section, u32 idx_offset)
654{
655 const u8 *unirom = adapter->fw->data;
656 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
657 idx_offset));
658 struct uni_table_desc *tab_desc;
659 __le32 offs;
660
661 tab_desc = nx_get_table_desc(unirom, section);
662
663 if (tab_desc == NULL)
664 return NULL;
665
666 offs = cpu_to_le32(tab_desc->findex) +
667 (cpu_to_le32(tab_desc->entry_size) * idx);
668
669 return (struct uni_data_desc *)&unirom[offs];
670}
671
672static u8 *
673nx_get_bootld_offs(struct netxen_adapter *adapter)
674{
675 u32 offs = NETXEN_BOOTLD_START;
676
677 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
678 offs = cpu_to_le32((nx_get_data_desc(adapter,
679 NX_UNI_DIR_SECT_BOOTLD,
680 NX_UNI_BOOTLD_IDX_OFF))->findex);
681
682 return (u8 *)&adapter->fw->data[offs];
683}
684
685static u8 *
686nx_get_fw_offs(struct netxen_adapter *adapter)
687{
688 u32 offs = NETXEN_IMAGE_START;
689
690 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
691 offs = cpu_to_le32((nx_get_data_desc(adapter,
692 NX_UNI_DIR_SECT_FW,
693 NX_UNI_FIRMWARE_IDX_OFF))->findex);
694
695 return (u8 *)&adapter->fw->data[offs];
696}
697
698static __le32
699nx_get_fw_size(struct netxen_adapter *adapter)
700{
701 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
702 return cpu_to_le32((nx_get_data_desc(adapter,
703 NX_UNI_DIR_SECT_FW,
704 NX_UNI_FIRMWARE_IDX_OFF))->size);
705 else
706 return cpu_to_le32(
707 *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
708}
709
710static __le32
711nx_get_fw_version(struct netxen_adapter *adapter)
712{
713 struct uni_data_desc *fw_data_desc;
714 const struct firmware *fw = adapter->fw;
715 __le32 major, minor, sub;
716 const u8 *ver_str;
717 int i, ret = 0;
718
719 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
720
721 fw_data_desc = nx_get_data_desc(adapter,
722 NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
723 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
724 cpu_to_le32(fw_data_desc->size) - 17;
725
726 for (i = 0; i < 12; i++) {
727 if (!strncmp(&ver_str[i], "REV=", 4)) {
728 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
729 &major, &minor, &sub);
730 break;
731 }
732 }
733
734 if (ret != 3)
735 return 0;
736
737 return major + (minor << 8) + (sub << 16);
738
739 } else
740 return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
741}
742
743static __le32
744nx_get_bios_version(struct netxen_adapter *adapter)
745{
746 const struct firmware *fw = adapter->fw;
747 __le32 bios_ver, prd_off = adapter->file_prd_off;
748
749 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
750 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
751 + NX_UNI_BIOS_VERSION_OFF));
752 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
753 (bios_ver >> 24);
754 } else
755 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
756
757}
758
610int 759int
611netxen_need_fw_reset(struct netxen_adapter *adapter) 760netxen_need_fw_reset(struct netxen_adapter *adapter)
612{ 761{
@@ -646,9 +795,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
646 /* check if we have got newer or different file firmware */ 795 /* check if we have got newer or different file firmware */
647 if (adapter->fw) { 796 if (adapter->fw) {
648 797
649 const struct firmware *fw = adapter->fw; 798 val = nx_get_fw_version(adapter);
650 799
651 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
652 version = NETXEN_DECODE_VERSION(val); 800 version = NETXEN_DECODE_VERSION(val);
653 801
654 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 802 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
@@ -658,7 +806,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
658 if (version > NETXEN_VERSION_CODE(major, minor, build)) 806 if (version > NETXEN_VERSION_CODE(major, minor, build))
659 return 1; 807 return 1;
660 808
661 if (version == NETXEN_VERSION_CODE(major, minor, build)) { 809 if (version == NETXEN_VERSION_CODE(major, minor, build) &&
810 adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
662 811
663 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); 812 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
664 fw_type = (val & 0x4) ? 813 fw_type = (val & 0x4) ?
@@ -673,7 +822,11 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
673} 822}
674 823
675static char *fw_name[] = { 824static char *fw_name[] = {
676 "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash", 825 NX_P2_MN_ROMIMAGE_NAME,
826 NX_P3_CT_ROMIMAGE_NAME,
827 NX_P3_MN_ROMIMAGE_NAME,
828 NX_UNIFIED_ROMIMAGE_NAME,
829 NX_FLASH_ROMIMAGE_NAME,
677}; 830};
678 831
679int 832int
@@ -695,26 +848,28 @@ netxen_load_firmware(struct netxen_adapter *adapter)
695 848
696 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 849 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
697 850
698 ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START]; 851 ptr64 = (u64 *)nx_get_bootld_offs(adapter);
699 flashaddr = NETXEN_BOOTLD_START; 852 flashaddr = NETXEN_BOOTLD_START;
700 853
701 for (i = 0; i < size; i++) { 854 for (i = 0; i < size; i++) {
702 data = cpu_to_le64(ptr64[i]); 855 data = cpu_to_le64(ptr64[i]);
703 adapter->pci_mem_write(adapter, flashaddr, &data, 8); 856
857 if (adapter->pci_mem_write(adapter, flashaddr, data))
858 return -EIO;
859
704 flashaddr += 8; 860 flashaddr += 8;
705 } 861 }
706 862
707 size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET]; 863 size = (__force u32)nx_get_fw_size(adapter) / 8;
708 size = (__force u32)cpu_to_le32(size) / 8;
709 864
710 ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START]; 865 ptr64 = (u64 *)nx_get_fw_offs(adapter);
711 flashaddr = NETXEN_IMAGE_START; 866 flashaddr = NETXEN_IMAGE_START;
712 867
713 for (i = 0; i < size; i++) { 868 for (i = 0; i < size; i++) {
714 data = cpu_to_le64(ptr64[i]); 869 data = cpu_to_le64(ptr64[i]);
715 870
716 if (adapter->pci_mem_write(adapter, 871 if (adapter->pci_mem_write(adapter,
717 flashaddr, &data, 8)) 872 flashaddr, data))
718 return -EIO; 873 return -EIO;
719 874
720 flashaddr += 8; 875 flashaddr += 8;
@@ -728,17 +883,17 @@ netxen_load_firmware(struct netxen_adapter *adapter)
728 883
729 for (i = 0; i < size; i++) { 884 for (i = 0; i < size; i++) {
730 if (netxen_rom_fast_read(adapter, 885 if (netxen_rom_fast_read(adapter,
731 flashaddr, &lo) != 0) 886 flashaddr, (int *)&lo) != 0)
732 return -EIO; 887 return -EIO;
733 if (netxen_rom_fast_read(adapter, 888 if (netxen_rom_fast_read(adapter,
734 flashaddr + 4, &hi) != 0) 889 flashaddr + 4, (int *)&hi) != 0)
735 return -EIO; 890 return -EIO;
736 891
737 /* hi, lo are already in host endian byteorder */ 892 /* hi, lo are already in host endian byteorder */
738 data = (((u64)hi << 32) | lo); 893 data = (((u64)hi << 32) | lo);
739 894
740 if (adapter->pci_mem_write(adapter, 895 if (adapter->pci_mem_write(adapter,
741 flashaddr, &data, 8)) 896 flashaddr, data))
742 return -EIO; 897 return -EIO;
743 898
744 flashaddr += 8; 899 flashaddr += 8;
@@ -746,7 +901,10 @@ netxen_load_firmware(struct netxen_adapter *adapter)
746 } 901 }
747 msleep(1); 902 msleep(1);
748 903
749 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 904 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
905 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
906 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
907 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
750 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); 908 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
751 else { 909 else {
752 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); 910 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
@@ -757,21 +915,31 @@ netxen_load_firmware(struct netxen_adapter *adapter)
757} 915}
758 916
759static int 917static int
760netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) 918netxen_validate_firmware(struct netxen_adapter *adapter)
761{ 919{
762 __le32 val; 920 __le32 val;
763 u32 ver, min_ver, bios; 921 u32 ver, min_ver, bios, min_size;
764 struct pci_dev *pdev = adapter->pdev; 922 struct pci_dev *pdev = adapter->pdev;
765 const struct firmware *fw = adapter->fw; 923 const struct firmware *fw = adapter->fw;
924 u8 fw_type = adapter->fw_type;
766 925
767 if (fw->size < NX_FW_MIN_SIZE) 926 if (fw_type == NX_UNIFIED_ROMIMAGE) {
768 return -EINVAL; 927 if (nx_set_product_offs(adapter))
928 return -EINVAL;
929
930 min_size = NX_UNI_FW_MIN_SIZE;
931 } else {
932 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
933 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
934 return -EINVAL;
935
936 min_size = NX_FW_MIN_SIZE;
937 }
769 938
770 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 939 if (fw->size < min_size)
771 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
772 return -EINVAL; 940 return -EINVAL;
773 941
774 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); 942 val = nx_get_fw_version(adapter);
775 943
776 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 944 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
777 min_ver = NETXEN_VERSION_CODE(4, 0, 216); 945 min_ver = NETXEN_VERSION_CODE(4, 0, 216);
@@ -783,15 +951,15 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
783 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) { 951 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
784 dev_err(&pdev->dev, 952 dev_err(&pdev->dev,
785 "%s: firmware version %d.%d.%d unsupported\n", 953 "%s: firmware version %d.%d.%d unsupported\n",
786 fwname, _major(ver), _minor(ver), _build(ver)); 954 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
787 return -EINVAL; 955 return -EINVAL;
788 } 956 }
789 957
790 val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 958 val = nx_get_bios_version(adapter);
791 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); 959 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
792 if ((__force u32)val != bios) { 960 if ((__force u32)val != bios) {
793 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", 961 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
794 fwname); 962 fw_name[fw_type]);
795 return -EINVAL; 963 return -EINVAL;
796 } 964 }
797 965
@@ -802,7 +970,7 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
802 val = NETXEN_DECODE_VERSION(val); 970 val = NETXEN_DECODE_VERSION(val);
803 if (val > ver) { 971 if (val > ver) {
804 dev_info(&pdev->dev, "%s: firmware is older than flash\n", 972 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
805 fwname); 973 fw_name[fw_type]);
806 return -EINVAL; 974 return -EINVAL;
807 } 975 }
808 976
@@ -810,6 +978,41 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
810 return 0; 978 return 0;
811} 979}
812 980
981static void
982nx_get_next_fwtype(struct netxen_adapter *adapter)
983{
984 u8 fw_type;
985
986 switch (adapter->fw_type) {
987 case NX_UNKNOWN_ROMIMAGE:
988 fw_type = NX_UNIFIED_ROMIMAGE;
989 break;
990
991 case NX_UNIFIED_ROMIMAGE:
992 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
993 fw_type = NX_FLASH_ROMIMAGE;
994 else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
995 fw_type = NX_P2_MN_ROMIMAGE;
996 else if (netxen_p3_has_mn(adapter))
997 fw_type = NX_P3_MN_ROMIMAGE;
998 else
999 fw_type = NX_P3_CT_ROMIMAGE;
1000 break;
1001
1002 case NX_P3_MN_ROMIMAGE:
1003 fw_type = NX_P3_CT_ROMIMAGE;
1004 break;
1005
1006 case NX_P2_MN_ROMIMAGE:
1007 case NX_P3_CT_ROMIMAGE:
1008 default:
1009 fw_type = NX_FLASH_ROMIMAGE;
1010 break;
1011 }
1012
1013 adapter->fw_type = fw_type;
1014}
1015
813static int 1016static int
814netxen_p3_has_mn(struct netxen_adapter *adapter) 1017netxen_p3_has_mn(struct netxen_adapter *adapter)
815{ 1018{
@@ -831,49 +1034,29 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
831 1034
832void netxen_request_firmware(struct netxen_adapter *adapter) 1035void netxen_request_firmware(struct netxen_adapter *adapter)
833{ 1036{
834 u8 fw_type;
835 struct pci_dev *pdev = adapter->pdev; 1037 struct pci_dev *pdev = adapter->pdev;
836 int rc = 0; 1038 int rc = 0;
837 1039
838 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1040 adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
839 fw_type = NX_P2_MN_ROMIMAGE;
840 goto request_fw;
841 }
842
843 fw_type = netxen_p3_has_mn(adapter) ?
844 NX_P3_MN_ROMIMAGE : NX_P3_CT_ROMIMAGE;
845 1041
846request_fw: 1042next:
847 rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev); 1043 nx_get_next_fwtype(adapter);
848 if (rc != 0) {
849 if (fw_type == NX_P3_MN_ROMIMAGE) {
850 msleep(1);
851 fw_type = NX_P3_CT_ROMIMAGE;
852 goto request_fw;
853 }
854 1044
855 fw_type = NX_FLASH_ROMIMAGE; 1045 if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
856 adapter->fw = NULL; 1046 adapter->fw = NULL;
857 goto done; 1047 } else {
858 } 1048 rc = request_firmware(&adapter->fw,
859 1049 fw_name[adapter->fw_type], &pdev->dev);
860 rc = netxen_validate_firmware(adapter, fw_name[fw_type]); 1050 if (rc != 0)
861 if (rc != 0) { 1051 goto next;
862 release_firmware(adapter->fw); 1052
863 1053 rc = netxen_validate_firmware(adapter);
864 if (fw_type == NX_P3_MN_ROMIMAGE) { 1054 if (rc != 0) {
1055 release_firmware(adapter->fw);
865 msleep(1); 1056 msleep(1);
866 fw_type = NX_P3_CT_ROMIMAGE; 1057 goto next;
867 goto request_fw;
868 } 1058 }
869
870 fw_type = NX_FLASH_ROMIMAGE;
871 adapter->fw = NULL;
872 goto done;
873 } 1059 }
874
875done:
876 adapter->fw_type = fw_type;
877} 1060}
878 1061
879 1062
@@ -1508,10 +1691,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1508 (rds_ring->num_desc - 1))); 1691 (rds_ring->num_desc - 1)));
1509 netxen_set_msg_ctxid(msg, adapter->portnum); 1692 netxen_set_msg_ctxid(msg, adapter->portnum);
1510 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); 1693 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1511 read_lock(&adapter->adapter_lock); 1694 NXWRIO(adapter, DB_NORMALIZE(adapter,
1512 writel(msg, DB_NORMALIZE(adapter, 1695 NETXEN_RCV_PRODUCER_OFFSET), msg);
1513 NETXEN_RCV_PRODUCER_OFFSET));
1514 read_unlock(&adapter->adapter_lock);
1515 } 1696 }
1516 } 1697 }
1517} 1698}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 3bf78dbfbf0f..838420dbc633 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -34,13 +34,18 @@
34#include <net/ip.h> 34#include <net/ip.h>
35#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
37#include <linux/sysfs.h>
37 38
38MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); 39MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 41MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
43MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
44MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
45MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
41 46
42char netxen_nic_driver_name[] = "netxen_nic"; 47char netxen_nic_driver_name[] = "netxen_nic";
43static char netxen_nic_driver_string[] = "NetXen Network Driver version " 48static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
44 NETXEN_NIC_LINUX_VERSIONID; 49 NETXEN_NIC_LINUX_VERSIONID;
45 50
46static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; 51static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
@@ -52,7 +57,8 @@ static int use_msi = 1;
52 57
53static int use_msi_x = 1; 58static int use_msi_x = 1;
54 59
55/* Local functions to NetXen NIC driver */ 60static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
61
56static int __devinit netxen_nic_probe(struct pci_dev *pdev, 62static int __devinit netxen_nic_probe(struct pci_dev *pdev,
57 const struct pci_device_id *ent); 63 const struct pci_device_id *ent);
58static void __devexit netxen_nic_remove(struct pci_dev *pdev); 64static void __devexit netxen_nic_remove(struct pci_dev *pdev);
@@ -73,6 +79,8 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
73 79
74static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); 80static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
75static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 81static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
82static void netxen_create_diag_entries(struct netxen_adapter *adapter);
83static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
76 84
77static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 85static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
78static int netxen_can_start_firmware(struct netxen_adapter *adapter); 86static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -609,14 +617,12 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
609 * Set the CRB window to invalid. If any register in window 0 is 617 * Set the CRB window to invalid. If any register in window 0 is
610 * accessed it should set the window to 0 and then reset it to 1. 618 * accessed it should set the window to 0 and then reset it to 1.
611 */ 619 */
612 adapter->curr_window = 255; 620 adapter->ahw.crb_win = -1;
613 adapter->ahw.qdr_sn_window = -1; 621 adapter->ahw.ocm_win = -1;
614 adapter->ahw.ddr_mn_window = -1;
615 622
616 /* remap phys address */ 623 /* remap phys address */
617 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 624 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
618 mem_len = pci_resource_len(pdev, 0); 625 mem_len = pci_resource_len(pdev, 0);
619 pci_len0 = 0;
620 626
621 /* 128 Meg of memory */ 627 /* 128 Meg of memory */
622 if (mem_len == NETXEN_PCI_128MB_SIZE) { 628 if (mem_len == NETXEN_PCI_128MB_SIZE) {
@@ -625,6 +631,7 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
625 SECOND_PAGE_GROUP_SIZE); 631 SECOND_PAGE_GROUP_SIZE);
626 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, 632 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
627 THIRD_PAGE_GROUP_SIZE); 633 THIRD_PAGE_GROUP_SIZE);
634 pci_len0 = FIRST_PAGE_GROUP_SIZE;
628 } else if (mem_len == NETXEN_PCI_32MB_SIZE) { 635 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
629 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); 636 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
630 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - 637 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
@@ -637,19 +644,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
637 return -EIO; 644 return -EIO;
638 } 645 }
639 pci_len0 = mem_len; 646 pci_len0 = mem_len;
640
641 adapter->ahw.ddr_mn_window = 0;
642 adapter->ahw.qdr_sn_window = 0;
643
644 adapter->ahw.mn_win_crb = NETXEN_PCI_CRBSPACE +
645 0x100000 + PCIX_MN_WINDOW + (pci_func * 0x20);
646 adapter->ahw.ms_win_crb = NETXEN_PCI_CRBSPACE +
647 0x100000 + PCIX_SN_WINDOW;
648 if (pci_func < 4)
649 adapter->ahw.ms_win_crb += (pci_func * 0x20);
650 else
651 adapter->ahw.ms_win_crb +=
652 0xA0 + ((pci_func - 4) * 0x10);
653 } else { 647 } else {
654 return -EIO; 648 return -EIO;
655 } 649 }
@@ -663,6 +657,15 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
663 adapter->ahw.pci_base1 = mem_ptr1; 657 adapter->ahw.pci_base1 = mem_ptr1;
664 adapter->ahw.pci_base2 = mem_ptr2; 658 adapter->ahw.pci_base2 = mem_ptr2;
665 659
660 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
661 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
662 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
663
664 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
665 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
666 NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
667 }
668
666 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 669 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
667 goto skip_doorbell; 670 goto skip_doorbell;
668 671
@@ -727,7 +730,8 @@ netxen_check_options(struct netxen_adapter *adapter)
727 if (adapter->portnum == 0) { 730 if (adapter->portnum == 0) {
728 get_brd_name_by_type(adapter->ahw.board_type, brd_name); 731 get_brd_name_by_type(adapter->ahw.board_type, brd_name);
729 732
730 printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n", 733 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
734 module_name(THIS_MODULE),
731 brd_name, serial_num, adapter->ahw.revision_id); 735 brd_name, serial_num, adapter->ahw.revision_id);
732 } 736 }
733 737
@@ -819,7 +823,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
819 823
820 if (first_boot != 0x55555555) { 824 if (first_boot != 0x55555555) {
821 NXWR32(adapter, CRB_CMDPEG_STATE, 0); 825 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
822 netxen_pinit_from_rom(adapter, 0); 826 netxen_pinit_from_rom(adapter);
823 msleep(1); 827 msleep(1);
824 } 828 }
825 829
@@ -1209,16 +1213,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1209 int pci_func_id = PCI_FUNC(pdev->devfn); 1213 int pci_func_id = PCI_FUNC(pdev->devfn);
1210 uint8_t revision_id; 1214 uint8_t revision_id;
1211 1215
1212 if (pdev->class != 0x020000) {
1213 printk(KERN_DEBUG "NetXen function %d, class %x will not "
1214 "be enabled.\n",pci_func_id, pdev->class);
1215 return -ENODEV;
1216 }
1217
1218 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { 1216 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
1219 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x" 1217 pr_warning("%s: chip revisions between 0x%x-0x%x"
1220 "will not be enabled.\n", 1218 "will not be enabled.\n",
1221 NX_P3_A0, NX_P3_B1); 1219 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
1222 return -ENODEV; 1220 return -ENODEV;
1223 } 1221 }
1224 1222
@@ -1252,7 +1250,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1252 revision_id = pdev->revision; 1250 revision_id = pdev->revision;
1253 adapter->ahw.revision_id = revision_id; 1251 adapter->ahw.revision_id = revision_id;
1254 1252
1255 rwlock_init(&adapter->adapter_lock); 1253 rwlock_init(&adapter->ahw.crb_lock);
1254 spin_lock_init(&adapter->ahw.mem_lock);
1255
1256 spin_lock_init(&adapter->tx_clean_lock); 1256 spin_lock_init(&adapter->tx_clean_lock);
1257 INIT_LIST_HEAD(&adapter->mac_list); 1257 INIT_LIST_HEAD(&adapter->mac_list);
1258 1258
@@ -1317,6 +1317,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1317 break; 1317 break;
1318 } 1318 }
1319 1319
1320 netxen_create_diag_entries(adapter);
1321
1320 return 0; 1322 return 0;
1321 1323
1322err_out_disable_msi: 1324err_out_disable_msi:
@@ -1369,6 +1371,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1369 1371
1370 netxen_teardown_intr(adapter); 1372 netxen_teardown_intr(adapter);
1371 1373
1374 netxen_remove_diag_entries(adapter);
1375
1372 netxen_cleanup_pci_map(adapter); 1376 netxen_cleanup_pci_map(adapter);
1373 1377
1374 netxen_release_firmware(adapter); 1378 netxen_release_firmware(adapter);
@@ -1449,7 +1453,8 @@ netxen_nic_resume(struct pci_dev *pdev)
1449 if (err) 1453 if (err)
1450 return err; 1454 return err;
1451 1455
1452 adapter->curr_window = 255; 1456 adapter->ahw.crb_win = -1;
1457 adapter->ahw.ocm_win = -1;
1453 1458
1454 err = netxen_start_firmware(adapter); 1459 err = netxen_start_firmware(adapter);
1455 if (err) { 1460 if (err) {
@@ -1927,7 +1932,7 @@ request_reset:
1927struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 1932struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1928{ 1933{
1929 struct netxen_adapter *adapter = netdev_priv(netdev); 1934 struct netxen_adapter *adapter = netdev_priv(netdev);
1930 struct net_device_stats *stats = &adapter->net_stats; 1935 struct net_device_stats *stats = &netdev->stats;
1931 1936
1932 memset(stats, 0, sizeof(*stats)); 1937 memset(stats, 0, sizeof(*stats));
1933 1938
@@ -2263,7 +2268,8 @@ netxen_check_health(struct netxen_adapter *adapter)
2263 dev_info(&netdev->dev, "firmware hang detected\n"); 2268 dev_info(&netdev->dev, "firmware hang detected\n");
2264 2269
2265detach: 2270detach:
2266 if (!test_and_set_bit(__NX_RESETTING, &adapter->state)) 2271 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2272 !test_and_set_bit(__NX_RESETTING, &adapter->state))
2267 netxen_schedule_work(adapter, netxen_detach_work, 0); 2273 netxen_schedule_work(adapter, netxen_detach_work, 0);
2268 return 1; 2274 return 1;
2269} 2275}
@@ -2341,6 +2347,197 @@ static struct device_attribute dev_attr_bridged_mode = {
2341 .store = netxen_store_bridged_mode, 2347 .store = netxen_store_bridged_mode,
2342}; 2348};
2343 2349
2350static ssize_t
2351netxen_store_diag_mode(struct device *dev,
2352 struct device_attribute *attr, const char *buf, size_t len)
2353{
2354 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2355 unsigned long new;
2356
2357 if (strict_strtoul(buf, 2, &new))
2358 return -EINVAL;
2359
2360 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2361 adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
2362
2363 return len;
2364}
2365
2366static ssize_t
2367netxen_show_diag_mode(struct device *dev,
2368 struct device_attribute *attr, char *buf)
2369{
2370 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2371
2372 return sprintf(buf, "%d\n",
2373 !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
2374}
2375
2376static struct device_attribute dev_attr_diag_mode = {
2377 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2378 .show = netxen_show_diag_mode,
2379 .store = netxen_store_diag_mode,
2380};
2381
2382static int
2383netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
2384 loff_t offset, size_t size)
2385{
2386 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2387 return -EIO;
2388
2389 if ((size != 4) || (offset & 0x3))
2390 return -EINVAL;
2391
2392 if (offset < NETXEN_PCI_CRBSPACE)
2393 return -EINVAL;
2394
2395 return 0;
2396}
2397
2398static ssize_t
2399netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2400 char *buf, loff_t offset, size_t size)
2401{
2402 struct device *dev = container_of(kobj, struct device, kobj);
2403 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2404 u32 data;
2405 int ret;
2406
2407 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2408 if (ret != 0)
2409 return ret;
2410
2411 data = NXRD32(adapter, offset);
2412 memcpy(buf, &data, size);
2413 return size;
2414}
2415
2416static ssize_t
2417netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2418 char *buf, loff_t offset, size_t size)
2419{
2420 struct device *dev = container_of(kobj, struct device, kobj);
2421 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2422 u32 data;
2423 int ret;
2424
2425 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2426 if (ret != 0)
2427 return ret;
2428
2429 memcpy(&data, buf, size);
2430 NXWR32(adapter, offset, data);
2431 return size;
2432}
2433
2434static int
2435netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
2436 loff_t offset, size_t size)
2437{
2438 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2439 return -EIO;
2440
2441 if ((size != 8) || (offset & 0x7))
2442 return -EIO;
2443
2444 return 0;
2445}
2446
2447static ssize_t
2448netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2449 char *buf, loff_t offset, size_t size)
2450{
2451 struct device *dev = container_of(kobj, struct device, kobj);
2452 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2453 u64 data;
2454 int ret;
2455
2456 ret = netxen_sysfs_validate_mem(adapter, offset, size);
2457 if (ret != 0)
2458 return ret;
2459
2460 if (adapter->pci_mem_read(adapter, offset, &data))
2461 return -EIO;
2462
2463 memcpy(buf, &data, size);
2464
2465 return size;
2466}
2467
2468ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
2469 struct bin_attribute *attr, char *buf,
2470 loff_t offset, size_t size)
2471{
2472 struct device *dev = container_of(kobj, struct device, kobj);
2473 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2474 u64 data;
2475 int ret;
2476
2477 ret = netxen_sysfs_validate_mem(adapter, offset, size);
2478 if (ret != 0)
2479 return ret;
2480
2481 memcpy(&data, buf, size);
2482
2483 if (adapter->pci_mem_write(adapter, offset, data))
2484 return -EIO;
2485
2486 return size;
2487}
2488
2489
2490static struct bin_attribute bin_attr_crb = {
2491 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2492 .size = 0,
2493 .read = netxen_sysfs_read_crb,
2494 .write = netxen_sysfs_write_crb,
2495};
2496
2497static struct bin_attribute bin_attr_mem = {
2498 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2499 .size = 0,
2500 .read = netxen_sysfs_read_mem,
2501 .write = netxen_sysfs_write_mem,
2502};
2503
2504#ifdef CONFIG_MODULES
2505static ssize_t
2506netxen_store_auto_fw_reset(struct module_attribute *mattr,
2507 struct module *mod, const char *buf, size_t count)
2508
2509{
2510 unsigned long new;
2511
2512 if (strict_strtoul(buf, 16, &new))
2513 return -EINVAL;
2514
2515 if ((new == AUTO_FW_RESET_ENABLED) || (new == AUTO_FW_RESET_DISABLED)) {
2516 auto_fw_reset = new;
2517 return count;
2518 }
2519
2520 return -EINVAL;
2521}
2522
2523static ssize_t
2524netxen_show_auto_fw_reset(struct module_attribute *mattr,
2525 struct module *mod, char *buf)
2526
2527{
2528 if (auto_fw_reset == AUTO_FW_RESET_ENABLED)
2529 return sprintf(buf, "enabled\n");
2530 else
2531 return sprintf(buf, "disabled\n");
2532}
2533
2534static struct module_attribute mod_attr_fw_reset = {
2535 .attr = {.name = "auto_fw_reset", .mode = (S_IRUGO | S_IWUSR)},
2536 .show = netxen_show_auto_fw_reset,
2537 .store = netxen_store_auto_fw_reset,
2538};
2539#endif
2540
2344static void 2541static void
2345netxen_create_sysfs_entries(struct netxen_adapter *adapter) 2542netxen_create_sysfs_entries(struct netxen_adapter *adapter)
2346{ 2543{
@@ -2366,6 +2563,33 @@ netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
2366 device_remove_file(dev, &dev_attr_bridged_mode); 2563 device_remove_file(dev, &dev_attr_bridged_mode);
2367} 2564}
2368 2565
2566static void
2567netxen_create_diag_entries(struct netxen_adapter *adapter)
2568{
2569 struct pci_dev *pdev = adapter->pdev;
2570 struct device *dev;
2571
2572 dev = &pdev->dev;
2573 if (device_create_file(dev, &dev_attr_diag_mode))
2574 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2575 if (device_create_bin_file(dev, &bin_attr_crb))
2576 dev_info(dev, "failed to create crb sysfs entry\n");
2577 if (device_create_bin_file(dev, &bin_attr_mem))
2578 dev_info(dev, "failed to create mem sysfs entry\n");
2579}
2580
2581
2582static void
2583netxen_remove_diag_entries(struct netxen_adapter *adapter)
2584{
2585 struct pci_dev *pdev = adapter->pdev;
2586 struct device *dev = &pdev->dev;
2587
2588 device_remove_file(dev, &dev_attr_diag_mode);
2589 device_remove_bin_file(dev, &bin_attr_crb);
2590 device_remove_bin_file(dev, &bin_attr_mem);
2591}
2592
2369#ifdef CONFIG_INET 2593#ifdef CONFIG_INET
2370 2594
2371#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) 2595#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
@@ -2518,6 +2742,10 @@ static struct pci_driver netxen_driver = {
2518 2742
2519static int __init netxen_init_module(void) 2743static int __init netxen_init_module(void)
2520{ 2744{
2745#ifdef CONFIG_MODULES
2746 struct module *mod = THIS_MODULE;
2747#endif
2748
2521 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 2749 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
2522 2750
2523#ifdef CONFIG_INET 2751#ifdef CONFIG_INET
@@ -2525,6 +2753,12 @@ static int __init netxen_init_module(void)
2525 register_inetaddr_notifier(&netxen_inetaddr_cb); 2753 register_inetaddr_notifier(&netxen_inetaddr_cb);
2526#endif 2754#endif
2527 2755
2756#ifdef CONFIG_MODULES
2757 if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
2758 printk(KERN_ERR "%s: Failed to create auto_fw_reset "
2759 "sysfs entry.", netxen_nic_driver_name);
2760#endif
2761
2528 return pci_register_driver(&netxen_driver); 2762 return pci_register_driver(&netxen_driver);
2529} 2763}
2530 2764
@@ -2532,6 +2766,12 @@ module_init(netxen_init_module);
2532 2766
2533static void __exit netxen_exit_module(void) 2767static void __exit netxen_exit_module(void)
2534{ 2768{
2769#ifdef CONFIG_MODULES
2770 struct module *mod = THIS_MODULE;
2771
2772 sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
2773#endif
2774
2535 pci_unregister_driver(&netxen_driver); 2775 pci_unregister_driver(&netxen_driver);
2536 2776
2537#ifdef CONFIG_INET 2777#ifdef CONFIG_INET
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 462d20f26436..6a87d810e59d 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -377,7 +377,7 @@ static int ni5010_open(struct net_device *dev)
377 377
378 PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name)); 378 PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
379 379
380 if (request_irq(dev->irq, &ni5010_interrupt, 0, boardname, dev)) { 380 if (request_irq(dev->irq, ni5010_interrupt, 0, boardname, dev)) {
381 printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq); 381 printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
382 return -EAGAIN; 382 return -EAGAIN;
383 } 383 }
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index aad3b370c562..305f4ba36999 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -284,7 +284,7 @@ static int ni52_open(struct net_device *dev)
284 startrecv586(dev); 284 startrecv586(dev);
285 ni_enaint(); 285 ni_enaint();
286 286
287 ret = request_irq(dev->irq, &ni52_interrupt, 0, dev->name, dev); 287 ret = request_irq(dev->irq, ni52_interrupt, 0, dev->name, dev);
288 if (ret) { 288 if (ret) {
289 ni_reset586(); 289 ni_reset586();
290 return ret; 290 return ret;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 752c2e4d9cf4..ae19aafd2c7e 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -294,7 +294,7 @@ static void ni65_set_performance(struct priv *p)
294static int ni65_open(struct net_device *dev) 294static int ni65_open(struct net_device *dev)
295{ 295{
296 struct priv *p = dev->ml_priv; 296 struct priv *p = dev->ml_priv;
297 int irqval = request_irq(dev->irq, &ni65_interrupt,0, 297 int irqval = request_irq(dev->irq, ni65_interrupt,0,
298 cards[p->cardno].cardname,dev); 298 cards[p->cardno].cardname,dev);
299 if (irqval) { 299 if (irqval) {
300 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n", 300 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d6c7ac68f6ea..8ce58c4c7dd3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -45,10 +45,6 @@ MODULE_DESCRIPTION("NIU ethernet driver");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46MODULE_VERSION(DRV_MODULE_VERSION); 46MODULE_VERSION(DRV_MODULE_VERSION);
47 47
48#ifndef DMA_44BIT_MASK
49#define DMA_44BIT_MASK 0x00000fffffffffffULL
50#endif
51
52#ifndef readq 48#ifndef readq
53static u64 readq(void __iomem *reg) 49static u64 readq(void __iomem *reg)
54{ 50{
@@ -7855,10 +7851,13 @@ static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7855 } 7851 }
7856} 7852}
7857 7853
7858static int niu_get_stats_count(struct net_device *dev) 7854static int niu_get_sset_count(struct net_device *dev, int stringset)
7859{ 7855{
7860 struct niu *np = netdev_priv(dev); 7856 struct niu *np = netdev_priv(dev);
7861 7857
7858 if (stringset != ETH_SS_STATS)
7859 return -EINVAL;
7860
7862 return ((np->flags & NIU_FLAGS_XMAC ? 7861 return ((np->flags & NIU_FLAGS_XMAC ?
7863 NUM_XMAC_STAT_KEYS : 7862 NUM_XMAC_STAT_KEYS :
7864 NUM_BMAC_STAT_KEYS) + 7863 NUM_BMAC_STAT_KEYS) +
@@ -7978,7 +7977,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
7978 .get_settings = niu_get_settings, 7977 .get_settings = niu_get_settings,
7979 .set_settings = niu_set_settings, 7978 .set_settings = niu_set_settings,
7980 .get_strings = niu_get_strings, 7979 .get_strings = niu_get_strings,
7981 .get_stats_count = niu_get_stats_count, 7980 .get_sset_count = niu_get_sset_count,
7982 .get_ethtool_stats = niu_get_ethtool_stats, 7981 .get_ethtool_stats = niu_get_ethtool_stats,
7983 .phys_id = niu_phys_id, 7982 .phys_id = niu_phys_id,
7984 .get_rxnfc = niu_get_nfc, 7983 .get_rxnfc = niu_get_nfc,
@@ -8144,7 +8143,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
8144 int i; 8143 int i;
8145 8144
8146 for (i = 0; i < len - 5; i++) { 8145 for (i = 0; i < len - 5; i++) {
8147 if (!strncmp(s + i, "FCode ", 5)) 8146 if (!strncmp(s + i, "FCode ", 6))
8148 break; 8147 break;
8149 } 8148 }
8150 if (i >= len - 5) 8149 if (i >= len - 5)
@@ -9915,7 +9914,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9915 PCI_EXP_DEVCTL_RELAX_EN); 9914 PCI_EXP_DEVCTL_RELAX_EN);
9916 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 9915 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
9917 9916
9918 dma_mask = DMA_44BIT_MASK; 9917 dma_mask = DMA_BIT_MASK(44);
9919 err = pci_set_dma_mask(pdev, dma_mask); 9918 err = pci_set_dma_mask(pdev, dma_mask);
9920 if (!err) { 9919 if (!err) {
9921 dev->features |= NETIF_F_HIGHDMA; 9920 dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index c254a7f5b9f5..1673eb045e1e 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1216,7 +1216,7 @@ static int pasemi_mac_open(struct net_device *dev)
1216 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", 1216 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
1217 dev->name); 1217 dev->name);
1218 1218
1219 ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED, 1219 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, IRQF_DISABLED,
1220 mac->tx_irq_name, mac->tx); 1220 mac->tx_irq_name, mac->tx);
1221 if (ret) { 1221 if (ret) {
1222 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1222 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
@@ -1227,7 +1227,7 @@ static int pasemi_mac_open(struct net_device *dev)
1227 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx", 1227 snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
1228 dev->name); 1228 dev->name);
1229 1229
1230 ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED, 1230 ret = request_irq(mac->rx->chan.irq, pasemi_mac_rx_intr, IRQF_DISABLED,
1231 mac->rx_irq_name, mac->rx); 1231 mac->rx_irq_name, mac->rx);
1232 if (ret) { 1232 if (ret) {
1233 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n", 1233 dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 28a86224879d..fefa79e34b95 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -77,6 +77,19 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
77 return phy_ethtool_gset(phydev, cmd); 77 return phy_ethtool_gset(phydev, cmd);
78} 78}
79 79
80static int
81pasemi_mac_ethtool_set_settings(struct net_device *netdev,
82 struct ethtool_cmd *cmd)
83{
84 struct pasemi_mac *mac = netdev_priv(netdev);
85 struct phy_device *phydev = mac->phydev;
86
87 if (!phydev)
88 return -EOPNOTSUPP;
89
90 return phy_ethtool_sset(phydev, cmd);
91}
92
80static void 93static void
81pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev, 94pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
82 struct ethtool_drvinfo *drvinfo) 95 struct ethtool_drvinfo *drvinfo)
@@ -150,6 +163,7 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
150 163
151const struct ethtool_ops pasemi_mac_ethtool_ops = { 164const struct ethtool_ops pasemi_mac_ethtool_ops = {
152 .get_settings = pasemi_mac_ethtool_get_settings, 165 .get_settings = pasemi_mac_ethtool_get_settings,
166 .set_settings = pasemi_mac_ethtool_set_settings,
153 .get_drvinfo = pasemi_mac_ethtool_get_drvinfo, 167 .get_drvinfo = pasemi_mac_ethtool_get_drvinfo,
154 .get_msglevel = pasemi_mac_ethtool_get_msglevel, 168 .get_msglevel = pasemi_mac_ethtool_get_msglevel,
155 .set_msglevel = pasemi_mac_ethtool_set_msglevel, 169 .set_msglevel = pasemi_mac_ethtool_set_msglevel,
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7e01fbdb87e0..57e09616330a 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -264,7 +264,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
264 /* Interrupt setup */ 264 /* Interrupt setup */
265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT; 265 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING|IRQ_HANDLE_PRESENT;
266 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 266 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
267 link->irq.Handler = &fjn_interrupt; 267 link->irq.Handler = fjn_interrupt;
268 link->irq.Instance = dev; 268 link->irq.Instance = dev;
269 269
270 /* General socket configuration */ 270 /* General socket configuration */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 5ed6339c52bc..b12e69592d18 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -479,7 +479,7 @@ static int nmclan_probe(struct pcmcia_device *link)
479 link->io.IOAddrLines = 5; 479 link->io.IOAddrLines = 5;
480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT; 480 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
481 link->irq.IRQInfo1 = IRQ_LEVEL_ID; 481 link->irq.IRQInfo1 = IRQ_LEVEL_ID;
482 link->irq.Handler = &mace_interrupt; 482 link->irq.Handler = mace_interrupt;
483 link->irq.Instance = dev; 483 link->irq.Instance = dev;
484 link->conf.Attributes = CONF_ENABLE_IRQ; 484 link->conf.Attributes = CONF_ENABLE_IRQ;
485 link->conf.IntType = INT_MEMORY_AND_IO; 485 link->conf.IntType = INT_MEMORY_AND_IO;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 94c9ad2746bc..469684474b72 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1768,6 +1768,13 @@ static struct pcmcia_device_id pcnet_ids[] = {
1768 PCMCIA_DEVICE_NULL 1768 PCMCIA_DEVICE_NULL
1769}; 1769};
1770MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); 1770MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
1771MODULE_FIRMWARE("cis/PCMLM28.cis");
1772MODULE_FIRMWARE("cis/DP83903.cis");
1773MODULE_FIRMWARE("cis/LA-PCM.cis");
1774MODULE_FIRMWARE("PE520.cis");
1775MODULE_FIRMWARE("cis/NE2K.cis");
1776MODULE_FIRMWARE("cis/PE-200.cis");
1777MODULE_FIRMWARE("cis/tamarack.cis");
1771 1778
1772static struct pcmcia_driver pcnet_driver = { 1779static struct pcmcia_driver pcnet_driver = {
1773 .drv = { 1780 .drv = {
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index c1b3f09f452c..076f23a10517 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -2095,7 +2095,7 @@ static int pcnet32_open(struct net_device *dev)
2095 int rc; 2095 int rc;
2096 unsigned long flags; 2096 unsigned long flags;
2097 2097
2098 if (request_irq(dev->irq, &pcnet32_interrupt, 2098 if (request_irq(dev->irq, pcnet32_interrupt,
2099 lp->shared_irq ? IRQF_SHARED : 0, dev->name, 2099 lp->shared_irq ? IRQF_SHARED : 0, dev->name,
2100 (void *)dev)) { 2100 (void *)dev)) {
2101 return -EAGAIN; 2101 return -EAGAIN;
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f81e53222230..f63c96a4ecb4 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/brcmphy.h>
19 20
20#define PHY_ID_BCM50610 0x0143bd60 21#define PHY_ID_BCM50610 0x0143bd60
21#define PHY_ID_BCM50610M 0x0143bd70 22#define PHY_ID_BCM50610M 0x0143bd70
@@ -24,6 +25,9 @@
24#define BRCM_PHY_MODEL(phydev) \ 25#define BRCM_PHY_MODEL(phydev) \
25 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask) 26 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
26 27
28#define BRCM_PHY_REV(phydev) \
29 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
30
27 31
28#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ 32#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
29#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ 33#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
@@ -94,22 +98,35 @@
94#define BCM_LED_SRC_OFF 0xe /* Tied high */ 98#define BCM_LED_SRC_OFF 0xe /* Tied high */
95#define BCM_LED_SRC_ON 0xf /* Tied low */ 99#define BCM_LED_SRC_ON 0xf /* Tied low */
96 100
101
97/* 102/*
98 * BCM5482: Shadow registers 103 * BCM5482: Shadow registers
99 * Shadow values go into bits [14:10] of register 0x1c to select a shadow 104 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
100 * register to access. 105 * register to access.
101 */ 106 */
107/* 00101: Spare Control Register 3 */
108#define BCM54XX_SHD_SCR3 0x05
109#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
110#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
111#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
112
113/* 01010: Auto Power-Down */
114#define BCM54XX_SHD_APD 0x0a
115#define BCM54XX_SHD_APD_EN 0x0020
116
102#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ 117#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
103 /* LED3 / ~LINKSPD[2] selector */ 118 /* LED3 / ~LINKSPD[2] selector */
104#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) 119#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
105 /* LED1 / ~LINKSPD[1] selector */ 120 /* LED1 / ~LINKSPD[1] selector */
106#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) 121#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
122#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
107#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ 123#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
108#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ 124#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
109#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ 125#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
110#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ 126#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
111#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ 127#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
112 128
129
113/* 130/*
114 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) 131 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
115 */ 132 */
@@ -138,16 +155,6 @@
138#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ 155#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
139#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ 156#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
140 157
141/*
142 * Device flags for PHYs that can be configured for different operating
143 * modes.
144 */
145#define PHY_BCM_FLAGS_VALID 0x80000000
146#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
147#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
148#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
149#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
150
151 158
152/*****************************************************************************/ 159/*****************************************************************************/
153/* Fast Ethernet Transceiver definitions. */ 160/* Fast Ethernet Transceiver definitions. */
@@ -237,53 +244,145 @@ static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
237 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val); 244 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
238} 245}
239 246
247/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
240static int bcm50610_a0_workaround(struct phy_device *phydev) 248static int bcm50610_a0_workaround(struct phy_device *phydev)
241{ 249{
242 int err; 250 int err;
243 251
244 err = bcm54xx_auxctl_write(phydev,
245 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
246 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
247 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
248 if (err < 0)
249 return err;
250
251 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
252 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ |
253 MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE);
254 if (err < 0)
255 goto error;
256
257 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0, 252 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
258 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN | 253 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
259 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF); 254 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
260 if (err < 0) 255 if (err < 0)
261 goto error; 256 return err;
262 257
263 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3, 258 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
264 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ); 259 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
265 if (err < 0) 260 if (err < 0)
266 goto error; 261 return err;
267 262
268 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, 263 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
269 MII_BCM54XX_EXP_EXP75_VDACCTRL); 264 MII_BCM54XX_EXP_EXP75_VDACCTRL);
270 if (err < 0) 265 if (err < 0)
271 goto error; 266 return err;
272 267
273 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96, 268 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
274 MII_BCM54XX_EXP_EXP96_MYST); 269 MII_BCM54XX_EXP_EXP96_MYST);
275 if (err < 0) 270 if (err < 0)
276 goto error; 271 return err;
277 272
278 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97, 273 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
279 MII_BCM54XX_EXP_EXP97_MYST); 274 MII_BCM54XX_EXP_EXP97_MYST);
280 275
276 return err;
277}
278
279static int bcm54xx_phydsp_config(struct phy_device *phydev)
280{
281 int err, err2;
282
283 /* Enable the SMDSP clock */
284 err = bcm54xx_auxctl_write(phydev,
285 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
286 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
287 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
288 if (err < 0)
289 return err;
290
291 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
292 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
293 /* Clear bit 9 to fix a phy interop issue. */
294 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
295 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
296 if (err < 0)
297 goto error;
298
299 if (phydev->drv->phy_id == PHY_ID_BCM50610) {
300 err = bcm50610_a0_workaround(phydev);
301 if (err < 0)
302 goto error;
303 }
304 }
305
306 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
307 int val;
308
309 val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
310 if (val < 0)
311 goto error;
312
313 val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
314 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
315 }
316
281error: 317error:
282 bcm54xx_auxctl_write(phydev, 318 /* Disable the SMDSP clock */
283 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL, 319 err2 = bcm54xx_auxctl_write(phydev,
284 MII_BCM54XX_AUXCTL_ACTL_TX_6DB); 320 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
321 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
285 322
286 return err; 323 /* Return the first error reported. */
324 return err ? err : err2;
325}
326
327static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
328{
329 u32 val, orig;
330 bool clk125en = true;
331
332 /* Abort if we are using an untested phy. */
333 if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
334 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
335 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
336 return;
337
338 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
339 if (val < 0)
340 return;
341
342 orig = val;
343
344 if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
345 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
346 BRCM_PHY_REV(phydev) >= 0x3) {
347 /*
348 * Here, bit 0 _disables_ CLK125 when set.
349 * This bit is set by default.
350 */
351 clk125en = false;
352 } else {
353 if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
354 /* Here, bit 0 _enables_ CLK125 when set */
355 val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
356 clk125en = false;
357 }
358 }
359
360 if (clk125en == false ||
361 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
362 val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
363 else
364 val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
365
366 if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
367 val |= BCM54XX_SHD_SCR3_TRDDAPD;
368
369 if (orig != val)
370 bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
371
372 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
373 if (val < 0)
374 return;
375
376 orig = val;
377
378 if (clk125en == false ||
379 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
380 val |= BCM54XX_SHD_APD_EN;
381 else
382 val &= ~BCM54XX_SHD_APD_EN;
383
384 if (orig != val)
385 bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
287} 386}
288 387
289static int bcm54xx_config_init(struct phy_device *phydev) 388static int bcm54xx_config_init(struct phy_device *phydev)
@@ -308,38 +407,17 @@ static int bcm54xx_config_init(struct phy_device *phydev)
308 if (err < 0) 407 if (err < 0)
309 return err; 408 return err;
310 409
311 if (phydev->drv->phy_id == PHY_ID_BCM50610) { 410 if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
312 err = bcm50610_a0_workaround(phydev); 411 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
313 if (err < 0) 412 (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
314 return err; 413 bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
315 }
316
317 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
318 int err2;
319
320 err = bcm54xx_auxctl_write(phydev,
321 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
322 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
323 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
324 if (err < 0)
325 return err;
326
327 reg = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
328 if (reg < 0)
329 goto error;
330 414
331 reg |= MII_BCM54XX_EXP_EXP75_CM_OSC; 415 if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
332 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, reg); 416 (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
417 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
418 bcm54xx_adjust_rxrefclk(phydev);
333 419
334error: 420 bcm54xx_phydsp_config(phydev);
335 err2 = bcm54xx_auxctl_write(phydev,
336 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
337 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
338 if (err)
339 return err;
340 if (err2)
341 return err2;
342 }
343 421
344 return 0; 422 return 0;
345} 423}
@@ -564,9 +642,11 @@ static int brcm_fet_config_init(struct phy_device *phydev)
564 if (err < 0) 642 if (err < 0)
565 goto done; 643 goto done;
566 644
567 /* Enable auto power down */ 645 if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) {
568 err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, 646 /* Enable auto power down */
569 MII_BRCM_FET_SHDW_AS2_APDE); 647 err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
648 MII_BRCM_FET_SHDW_AS2_APDE);
649 }
570 650
571done: 651done:
572 /* Disable shadow register access */ 652 /* Disable shadow register access */
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399d6dd9..c311fa6597f5 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -36,7 +36,7 @@
36 36
37#define PPP_VERSION "2.4.2" 37#define PPP_VERSION "2.4.2"
38 38
39#define OBUFSIZE 256 39#define OBUFSIZE 4096
40 40
41/* Structure for storing local state. */ 41/* Structure for storing local state. */
42struct asyncppp { 42struct asyncppp {
@@ -337,10 +337,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
337 return 0; 337 return 0;
338} 338}
339 339
340/* 340/* May sleep, don't call from interrupt level or with interrupts disabled */
341 * This can now be called from hard interrupt level as well
342 * as soft interrupt level or mainline.
343 */
344static void 341static void
345ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 342ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
346 char *cflags, int count) 343 char *cflags, int count)
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 965adb6174c3..0a56a778af0a 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -184,7 +184,7 @@ static atomic_t ppp_unit_count = ATOMIC_INIT(0);
184static atomic_t channel_count = ATOMIC_INIT(0); 184static atomic_t channel_count = ATOMIC_INIT(0);
185 185
186/* per-net private data for this module */ 186/* per-net private data for this module */
187static int ppp_net_id; 187static int ppp_net_id __read_mostly;
188struct ppp_net { 188struct ppp_net {
189 /* units to ppp mapping */ 189 /* units to ppp mapping */
190 struct idr units_idr; 190 struct idr units_idr;
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db13586..c908b08dc981 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -378,10 +378,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
378 return 0; 378 return 0;
379} 379}
380 380
381/* 381/* May sleep, don't call from interrupt level or with interrupts disabled */
382 * This can now be called from hard interrupt level as well
383 * as soft interrupt level or mainline.
384 */
385static void 382static void
386ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, 383ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
387 char *cflags, int count) 384 char *cflags, int count)
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 2559991eea6a..a1dcba255b06 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -97,7 +97,7 @@ static const struct proto_ops pppoe_ops;
97static struct ppp_channel_ops pppoe_chan_ops; 97static struct ppp_channel_ops pppoe_chan_ops;
98 98
99/* per-net private data for this module */ 99/* per-net private data for this module */
100static int pppoe_net_id; 100static int pppoe_net_id __read_mostly;
101struct pppoe_net { 101struct pppoe_net {
102 /* 102 /*
103 * we could use _single_ hash table for all 103 * we could use _single_ hash table for all
@@ -250,20 +250,19 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
250{ 250{
251 struct net_device *dev; 251 struct net_device *dev;
252 struct pppoe_net *pn; 252 struct pppoe_net *pn;
253 struct pppox_sock *pppox_sock; 253 struct pppox_sock *pppox_sock = NULL;
254 254
255 int ifindex; 255 int ifindex;
256 256
257 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); 257 rcu_read_lock();
258 if (!dev) 258 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
259 return NULL; 259 if (dev) {
260 260 ifindex = dev->ifindex;
261 ifindex = dev->ifindex; 261 pn = net_generic(net, pppoe_net_id);
262 pn = net_generic(net, pppoe_net_id); 262 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
263 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
264 sp->sa_addr.pppoe.remote, ifindex); 263 sp->sa_addr.pppoe.remote, ifindex);
265 dev_put(dev); 264 }
266 265 rcu_read_unlock();
267 return pppox_sock; 266 return pppox_sock;
268} 267}
269 268
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5910df60c93e..c58b50f8ba3b 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -232,7 +232,7 @@ static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
232static const struct proto_ops pppol2tp_ops; 232static const struct proto_ops pppol2tp_ops;
233 233
234/* per-net private data for this module */ 234/* per-net private data for this module */
235static int pppol2tp_net_id; 235static int pppol2tp_net_id __read_mostly;
236struct pppol2tp_net { 236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list; 237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock; 238 rwlock_t pppol2tp_tunnel_list_lock;
@@ -516,7 +516,7 @@ static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
516 return 0; 516 return 0;
517 517
518 inet = inet_sk(sk); 518 inet = inet_sk(sk);
519 psum = csum_tcpudp_nofold(inet->saddr, inet->daddr, ulen, 519 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
520 IPPROTO_UDP, 0); 520 IPPROTO_UDP, 0);
521 521
522 if ((skb->ip_summed == CHECKSUM_COMPLETE) && 522 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
@@ -949,8 +949,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
949 inet = inet_sk(sk_tun); 949 inet = inet_sk(sk_tun);
950 udp_len = hdr_len + sizeof(ppph) + total_len; 950 udp_len = hdr_len + sizeof(ppph) + total_len;
951 uh = (struct udphdr *) skb->data; 951 uh = (struct udphdr *) skb->data;
952 uh->source = inet->sport; 952 uh->source = inet->inet_sport;
953 uh->dest = inet->dport; 953 uh->dest = inet->inet_dport;
954 uh->len = htons(udp_len); 954 uh->len = htons(udp_len);
955 uh->check = 0; 955 uh->check = 0;
956 skb_put(skb, sizeof(struct udphdr)); 956 skb_put(skb, sizeof(struct udphdr));
@@ -978,7 +978,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
978 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { 978 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
979 skb->ip_summed = CHECKSUM_COMPLETE; 979 skb->ip_summed = CHECKSUM_COMPLETE;
980 csum = skb_checksum(skb, 0, udp_len, 0); 980 csum = skb_checksum(skb, 0, udp_len, 0);
981 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr, 981 uh->check = csum_tcpudp_magic(inet->inet_saddr,
982 inet->inet_daddr,
982 udp_len, IPPROTO_UDP, csum); 983 udp_len, IPPROTO_UDP, csum);
983 if (uh->check == 0) 984 if (uh->check == 0)
984 uh->check = CSUM_MANGLED_0; 985 uh->check = CSUM_MANGLED_0;
@@ -986,7 +987,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
986 skb->ip_summed = CHECKSUM_PARTIAL; 987 skb->ip_summed = CHECKSUM_PARTIAL;
987 skb->csum_start = skb_transport_header(skb) - skb->head; 988 skb->csum_start = skb_transport_header(skb) - skb->head;
988 skb->csum_offset = offsetof(struct udphdr, check); 989 skb->csum_offset = offsetof(struct udphdr, check);
989 uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr, 990 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
991 inet->inet_daddr,
990 udp_len, IPPROTO_UDP, 0); 992 udp_len, IPPROTO_UDP, 0);
991 } 993 }
992 994
@@ -1136,8 +1138,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1136 __skb_push(skb, sizeof(*uh)); 1138 __skb_push(skb, sizeof(*uh));
1137 skb_reset_transport_header(skb); 1139 skb_reset_transport_header(skb);
1138 uh = udp_hdr(skb); 1140 uh = udp_hdr(skb);
1139 uh->source = inet->sport; 1141 uh->source = inet->inet_sport;
1140 uh->dest = inet->dport; 1142 uh->dest = inet->inet_dport;
1141 uh->len = htons(udp_len); 1143 uh->len = htons(udp_len);
1142 uh->check = 0; 1144 uh->check = 0;
1143 1145
@@ -1181,7 +1183,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1181 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { 1183 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
1182 skb->ip_summed = CHECKSUM_COMPLETE; 1184 skb->ip_summed = CHECKSUM_COMPLETE;
1183 csum = skb_checksum(skb, 0, udp_len, 0); 1185 csum = skb_checksum(skb, 0, udp_len, 0);
1184 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr, 1186 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1187 inet->inet_daddr,
1185 udp_len, IPPROTO_UDP, csum); 1188 udp_len, IPPROTO_UDP, csum);
1186 if (uh->check == 0) 1189 if (uh->check == 0)
1187 uh->check = CSUM_MANGLED_0; 1190 uh->check = CSUM_MANGLED_0;
@@ -1189,7 +1192,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1189 skb->ip_summed = CHECKSUM_PARTIAL; 1192 skb->ip_summed = CHECKSUM_PARTIAL;
1190 skb->csum_start = skb_transport_header(skb) - skb->head; 1193 skb->csum_start = skb_transport_header(skb) - skb->head;
1191 skb->csum_offset = offsetof(struct udphdr, check); 1194 skb->csum_offset = offsetof(struct udphdr, check);
1192 uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr, 1195 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1196 inet->inet_daddr,
1193 udp_len, IPPROTO_UDP, 0); 1197 udp_len, IPPROTO_UDP, 0);
1194 } 1198 }
1195 1199
@@ -1533,7 +1537,7 @@ static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1533 * if the tunnel socket goes away. 1537 * if the tunnel socket goes away.
1534 */ 1538 */
1535 tunnel->old_sk_destruct = sk->sk_destruct; 1539 tunnel->old_sk_destruct = sk->sk_destruct;
1536 sk->sk_destruct = &pppol2tp_tunnel_destruct; 1540 sk->sk_destruct = pppol2tp_tunnel_destruct;
1537 1541
1538 tunnel->sock = sk; 1542 tunnel->sock = sk;
1539 sk->sk_allocation = GFP_ATOMIC; 1543 sk->sk_allocation = GFP_ATOMIC;
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 4f6d33fbc673..ac806b27c658 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -104,7 +104,8 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
104 104
105EXPORT_SYMBOL(pppox_ioctl); 105EXPORT_SYMBOL(pppox_ioctl);
106 106
107static int pppox_create(struct net *net, struct socket *sock, int protocol) 107static int pppox_create(struct net *net, struct socket *sock, int protocol,
108 int kern)
108{ 109{
109 int rc = -EPROTOTYPE; 110 int rc = -EPROTOTYPE;
110 111
@@ -125,7 +126,7 @@ out:
125 return rc; 126 return rc;
126} 127}
127 128
128static struct net_proto_family pppox_proto_family = { 129static const struct net_proto_family pppox_proto_family = {
129 .family = PF_PPPOX, 130 .family = PF_PPPOX,
130 .create = pppox_create, 131 .create = pppox_create,
131 .owner = THIS_MODULE, 132 .owner = THIS_MODULE,
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index c2383adcd527..862c1aaf3860 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00-b3" 19#define DRV_VERSION "v1.00.00.23.00.00-01"
20 20
21#define PFX "qlge: " 21#define PFX "qlge: "
22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ 22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
@@ -54,8 +54,10 @@
54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ 54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ 55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 256 57#define SMALL_BUFFER_SIZE 512
58#define LARGE_BUFFER_SIZE PAGE_SIZE 58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048
59#define MAX_SPLIT_SIZE 1023 61#define MAX_SPLIT_SIZE 1023
60#define QLGE_SB_PAD 32 62#define QLGE_SB_PAD 32
61 63
@@ -795,6 +797,7 @@ enum {
795 MB_WOL_BCAST = (1 << 5), 797 MB_WOL_BCAST = (1 << 5),
796 MB_WOL_LINK_UP = (1 << 6), 798 MB_WOL_LINK_UP = (1 << 6),
797 MB_WOL_LINK_DOWN = (1 << 7), 799 MB_WOL_LINK_DOWN = (1 << 7),
800 MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
798 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ 801 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
799 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ 802 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
800 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ 803 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
@@ -804,12 +807,27 @@ enum {
804 MB_CMD_SET_PORT_CFG = 0x00000122, 807 MB_CMD_SET_PORT_CFG = 0x00000122,
805 MB_CMD_GET_PORT_CFG = 0x00000123, 808 MB_CMD_GET_PORT_CFG = 0x00000123,
806 MB_CMD_GET_LINK_STS = 0x00000124, 809 MB_CMD_GET_LINK_STS = 0x00000124,
810 MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
811 QL_LED_BLINK = 0x03e803e8,
812 MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
807 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ 813 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
808 MB_SET_MPI_TFK_STOP = (1 << 0), 814 MB_SET_MPI_TFK_STOP = (1 << 0),
809 MB_SET_MPI_TFK_RESUME = (1 << 1), 815 MB_SET_MPI_TFK_RESUME = (1 << 1),
810 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ 816 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
811 MB_GET_MPI_TFK_STOPPED = (1 << 0), 817 MB_GET_MPI_TFK_STOPPED = (1 << 0),
812 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), 818 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
819 /* Sub-commands for IDC request.
820 * This describes the reason for the
821 * IDC request.
822 */
823 MB_CMD_IOP_NONE = 0x0000,
824 MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
825 MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
826 MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
827 MB_CMD_IOP_DVR_START = 0x0100,
828 MB_CMD_IOP_FLASH_ACC = 0x0101,
829 MB_CMD_IOP_RESTART_MPI = 0x0102,
830 MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
813 831
814 /* Mailbox Command Status. */ 832 /* Mailbox Command Status. */
815 MB_CMD_STS_GOOD = 0x00004000, /* Success. */ 833 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
@@ -1201,9 +1219,17 @@ struct tx_ring_desc {
1201 struct tx_ring_desc *next; 1219 struct tx_ring_desc *next;
1202}; 1220};
1203 1221
1222struct page_chunk {
1223 struct page *page; /* master page */
1224 char *va; /* virt addr for this chunk */
1225 u64 map; /* mapping for master */
1226 unsigned int offset; /* offset for this chunk */
1227 unsigned int last_flag; /* flag set for last chunk in page */
1228};
1229
1204struct bq_desc { 1230struct bq_desc {
1205 union { 1231 union {
1206 struct page *lbq_page; 1232 struct page_chunk pg_chunk;
1207 struct sk_buff *skb; 1233 struct sk_buff *skb;
1208 } p; 1234 } p;
1209 __le64 *addr; 1235 __le64 *addr;
@@ -1237,6 +1263,9 @@ struct tx_ring {
1237 atomic_t queue_stopped; /* Turns queue off when full. */ 1263 atomic_t queue_stopped; /* Turns queue off when full. */
1238 struct delayed_work tx_work; 1264 struct delayed_work tx_work;
1239 struct ql_adapter *qdev; 1265 struct ql_adapter *qdev;
1266 u64 tx_packets;
1267 u64 tx_bytes;
1268 u64 tx_errors;
1240}; 1269};
1241 1270
1242/* 1271/*
@@ -1272,6 +1301,7 @@ struct rx_ring {
1272 dma_addr_t lbq_base_dma; 1301 dma_addr_t lbq_base_dma;
1273 void *lbq_base_indirect; 1302 void *lbq_base_indirect;
1274 dma_addr_t lbq_base_indirect_dma; 1303 dma_addr_t lbq_base_indirect_dma;
1304 struct page_chunk pg_chunk; /* current page for chunks */
1275 struct bq_desc *lbq; /* array of control blocks */ 1305 struct bq_desc *lbq; /* array of control blocks */
1276 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ 1306 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1277 u32 lbq_prod_idx; /* current sw prod idx */ 1307 u32 lbq_prod_idx; /* current sw prod idx */
@@ -1302,6 +1332,11 @@ struct rx_ring {
1302 struct napi_struct napi; 1332 struct napi_struct napi;
1303 u8 reserved; 1333 u8 reserved;
1304 struct ql_adapter *qdev; 1334 struct ql_adapter *qdev;
1335 u64 rx_packets;
1336 u64 rx_multicast;
1337 u64 rx_bytes;
1338 u64 rx_dropped;
1339 u64 rx_errors;
1305}; 1340};
1306 1341
1307/* 1342/*
@@ -1363,6 +1398,174 @@ struct nic_stats {
1363 u64 rx_1024_to_1518_pkts; 1398 u64 rx_1024_to_1518_pkts;
1364 u64 rx_1519_to_max_pkts; 1399 u64 rx_1519_to_max_pkts;
1365 u64 rx_len_err_pkts; 1400 u64 rx_len_err_pkts;
1401 /*
1402 * These stats come from offset 500h to 5C8h
1403 * in the XGMAC register.
1404 */
1405 u64 tx_cbfc_pause_frames0;
1406 u64 tx_cbfc_pause_frames1;
1407 u64 tx_cbfc_pause_frames2;
1408 u64 tx_cbfc_pause_frames3;
1409 u64 tx_cbfc_pause_frames4;
1410 u64 tx_cbfc_pause_frames5;
1411 u64 tx_cbfc_pause_frames6;
1412 u64 tx_cbfc_pause_frames7;
1413 u64 rx_cbfc_pause_frames0;
1414 u64 rx_cbfc_pause_frames1;
1415 u64 rx_cbfc_pause_frames2;
1416 u64 rx_cbfc_pause_frames3;
1417 u64 rx_cbfc_pause_frames4;
1418 u64 rx_cbfc_pause_frames5;
1419 u64 rx_cbfc_pause_frames6;
1420 u64 rx_cbfc_pause_frames7;
1421 u64 rx_nic_fifo_drop;
1422};
1423
1424/* Address/Length pairs for the coredump. */
1425enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127,
1428 MPI_CORE_SH_REGS_CNT = 16,
1429 TEST_REGS_ADDR = 0x00001000,
1430 TEST_REGS_CNT = 23,
1431 RMII_REGS_ADDR = 0x00001040,
1432 RMII_REGS_CNT = 64,
1433 FCMAC1_REGS_ADDR = 0x00001080,
1434 FCMAC2_REGS_ADDR = 0x000010c0,
1435 FCMAC_REGS_CNT = 64,
1436 FC1_MBX_REGS_ADDR = 0x00001100,
1437 FC2_MBX_REGS_ADDR = 0x00001240,
1438 FC_MBX_REGS_CNT = 64,
1439 IDE_REGS_ADDR = 0x00001140,
1440 IDE_REGS_CNT = 64,
1441 NIC1_MBX_REGS_ADDR = 0x00001180,
1442 NIC2_MBX_REGS_ADDR = 0x00001280,
1443 NIC_MBX_REGS_CNT = 64,
1444 SMBUS_REGS_ADDR = 0x00001200,
1445 SMBUS_REGS_CNT = 64,
1446 I2C_REGS_ADDR = 0x00001fc0,
1447 I2C_REGS_CNT = 64,
1448 MEMC_REGS_ADDR = 0x00003000,
1449 MEMC_REGS_CNT = 256,
1450 PBUS_REGS_ADDR = 0x00007c00,
1451 PBUS_REGS_CNT = 256,
1452 MDE_REGS_ADDR = 0x00010000,
1453 MDE_REGS_CNT = 6,
1454 CODE_RAM_ADDR = 0x00020000,
1455 CODE_RAM_CNT = 0x2000,
1456 MEMC_RAM_ADDR = 0x00100000,
1457 MEMC_RAM_CNT = 0x2000,
1458};
1459
1460#define MPI_COREDUMP_COOKIE 0x5555aaaa
1461struct mpi_coredump_global_header {
1462 u32 cookie;
1463 u8 idString[16];
1464 u32 timeLo;
1465 u32 timeHi;
1466 u32 imageSize;
1467 u32 headerSize;
1468 u8 info[220];
1469};
1470
1471struct mpi_coredump_segment_header {
1472 u32 cookie;
1473 u32 segNum;
1474 u32 segSize;
1475 u32 extra;
1476 u8 description[16];
1477};
1478
1479/* Reg dump segment numbers. */
1480enum {
1481 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2,
1483 RMII_SEG_NUM = 3,
1484 FCMAC1_SEG_NUM = 4,
1485 FCMAC2_SEG_NUM = 5,
1486 FC1_MBOX_SEG_NUM = 6,
1487 IDE_SEG_NUM = 7,
1488 NIC1_MBOX_SEG_NUM = 8,
1489 SMBUS_SEG_NUM = 9,
1490 FC2_MBOX_SEG_NUM = 10,
1491 NIC2_MBOX_SEG_NUM = 11,
1492 I2C_SEG_NUM = 12,
1493 MEMC_SEG_NUM = 13,
1494 PBUS_SEG_NUM = 14,
1495 MDE_SEG_NUM = 15,
1496 NIC1_CONTROL_SEG_NUM = 16,
1497 NIC2_CONTROL_SEG_NUM = 17,
1498 NIC1_XGMAC_SEG_NUM = 18,
1499 NIC2_XGMAC_SEG_NUM = 19,
1500 WCS_RAM_SEG_NUM = 20,
1501 MEMC_RAM_SEG_NUM = 21,
1502 XAUI_AN_SEG_NUM = 22,
1503 XAUI_HSS_PCS_SEG_NUM = 23,
1504 XFI_AN_SEG_NUM = 24,
1505 XFI_TRAIN_SEG_NUM = 25,
1506 XFI_HSS_PCS_SEG_NUM = 26,
1507 XFI_HSS_TX_SEG_NUM = 27,
1508 XFI_HSS_RX_SEG_NUM = 28,
1509 XFI_HSS_PLL_SEG_NUM = 29,
1510 MISC_NIC_INFO_SEG_NUM = 30,
1511 INTR_STATES_SEG_NUM = 31,
1512 CAM_ENTRIES_SEG_NUM = 32,
1513 ROUTING_WORDS_SEG_NUM = 33,
1514 ETS_SEG_NUM = 34,
1515 PROBE_DUMP_SEG_NUM = 35,
1516 ROUTING_INDEX_SEG_NUM = 36,
1517 MAC_PROTOCOL_SEG_NUM = 37,
1518 XAUI2_AN_SEG_NUM = 38,
1519 XAUI2_HSS_PCS_SEG_NUM = 39,
1520 XFI2_AN_SEG_NUM = 40,
1521 XFI2_TRAIN_SEG_NUM = 41,
1522 XFI2_HSS_PCS_SEG_NUM = 42,
1523 XFI2_HSS_TX_SEG_NUM = 43,
1524 XFI2_HSS_RX_SEG_NUM = 44,
1525 XFI2_HSS_PLL_SEG_NUM = 45,
1526 SEM_REGS_SEG_NUM = 50
1527
1528};
1529
1530struct ql_nic_misc {
1531 u32 rx_ring_count;
1532 u32 tx_ring_count;
1533 u32 intr_count;
1534 u32 function;
1535};
1536
1537struct ql_reg_dump {
1538
1539 /* segment 0 */
1540 struct mpi_coredump_global_header mpi_global_header;
1541
1542 /* segment 16 */
1543 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1544 u32 nic_regs[64];
1545
1546 /* segment 30 */
1547 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1548 struct ql_nic_misc misc_nic_info;
1549
1550 /* segment 31 */
1551 /* one interrupt state for each CQ */
1552 struct mpi_coredump_segment_header intr_states_seg_hdr;
1553 u32 intr_states[MAX_CPUS];
1554
1555 /* segment 32 */
1556 /* 3 cam words each for 16 unicast,
1557 * 2 cam words for each of 32 multicast.
1558 */
1559 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1560 u32 cam_entries[(16 * 3) + (32 * 3)];
1561
1562 /* segment 33 */
1563 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1564 u32 nic_routing_words[16];
1565
1566 /* segment 34 */
1567 struct mpi_coredump_segment_header ets_seg_hdr;
1568 u32 ets[8+2];
1366}; 1569};
1367 1570
1368/* 1571/*
@@ -1398,6 +1601,8 @@ enum {
1398 QL_ALLMULTI = 6, 1601 QL_ALLMULTI = 6,
1399 QL_PORT_CFG = 7, 1602 QL_PORT_CFG = 7,
1400 QL_CAM_RT_SET = 8, 1603 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10,
1401}; 1606};
1402 1607
1403/* link_status bit definitions */ 1608/* link_status bit definitions */
@@ -1505,6 +1710,7 @@ struct ql_adapter {
1505 1710
1506 struct rx_ring rx_ring[MAX_RX_RINGS]; 1711 struct rx_ring rx_ring[MAX_RX_RINGS];
1507 struct tx_ring tx_ring[MAX_TX_RINGS]; 1712 struct tx_ring tx_ring[MAX_TX_RINGS];
1713 unsigned int lbq_buf_order;
1508 1714
1509 int rx_csum; 1715 int rx_csum;
1510 u32 default_rx_queue; 1716 u32 default_rx_queue;
@@ -1519,11 +1725,11 @@ struct ql_adapter {
1519 u32 port_init; 1725 u32 port_init;
1520 u32 link_status; 1726 u32 link_status;
1521 u32 link_config; 1727 u32 link_config;
1728 u32 led_config;
1522 u32 max_frame_size; 1729 u32 max_frame_size;
1523 1730
1524 union flash_params flash; 1731 union flash_params flash;
1525 1732
1526 struct net_device_stats stats;
1527 struct workqueue_struct *workqueue; 1733 struct workqueue_struct *workqueue;
1528 struct delayed_work asic_reset_work; 1734 struct delayed_work asic_reset_work;
1529 struct delayed_work mpi_reset_work; 1735 struct delayed_work mpi_reset_work;
@@ -1533,6 +1739,7 @@ struct ql_adapter {
1533 struct completion ide_completion; 1739 struct completion ide_completion;
1534 struct nic_operations *nic_ops; 1740 struct nic_operations *nic_ops;
1535 u16 device_id; 1741 u16 device_id;
1742 atomic_t lb_count;
1536}; 1743};
1537 1744
1538/* 1745/*
@@ -1611,10 +1818,22 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
1611int ql_cam_route_initialize(struct ql_adapter *qdev); 1818int ql_cam_route_initialize(struct ql_adapter *qdev);
1612int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1613int ql_mb_about_fw(struct ql_adapter *qdev); 1820int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
1823int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
1824int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
1825int ql_mb_get_led_cfg(struct ql_adapter *qdev);
1614void ql_link_on(struct ql_adapter *qdev); 1826void ql_link_on(struct ql_adapter *qdev);
1615void ql_link_off(struct ql_adapter *qdev); 1827void ql_link_off(struct ql_adapter *qdev);
1616int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); 1828int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
1829int ql_mb_get_port_cfg(struct ql_adapter *qdev);
1830int ql_mb_set_port_cfg(struct ql_adapter *qdev);
1617int ql_wait_fifo_empty(struct ql_adapter *qdev); 1831int ql_wait_fifo_empty(struct ql_adapter *qdev);
1832void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1618 1837
1619#if 1 1838#if 1
1620#define QL_ALL_DUMP 1839#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index aa88cb3f41c7..9f58c4710761 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,185 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{
6 int status = 0;
7 int i;
8
9 for (i = 0; i < 8; i++, buf++) {
10 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
11 *buf = ql_read32(qdev, NIC_ETS);
12 }
13
14 for (i = 0; i < 2; i++, buf++) {
15 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
16 *buf = ql_read32(qdev, CNA_ETS);
17 }
18
19 return status;
20}
21
22static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
23{
24 int i;
25
26 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
27 ql_write32(qdev, INTR_EN,
28 qdev->intr_context[i].intr_read_mask);
29 *buf = ql_read32(qdev, INTR_EN);
30 }
31}
32
33static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
34{
35 int i, status;
36 u32 value[3];
37
38 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
39 if (status)
40 return status;
41
42 for (i = 0; i < 16; i++) {
43 status = ql_get_mac_addr_reg(qdev,
44 MAC_ADDR_TYPE_CAM_MAC, i, value);
45 if (status) {
46 QPRINTK(qdev, DRV, ERR,
47 "Failed read of mac index register.\n");
48 goto err;
49 }
50 *buf++ = value[0]; /* lower MAC address */
51 *buf++ = value[1]; /* upper MAC address */
52 *buf++ = value[2]; /* output */
53 }
54 for (i = 0; i < 32; i++) {
55 status = ql_get_mac_addr_reg(qdev,
56 MAC_ADDR_TYPE_MULTI_MAC, i, value);
57 if (status) {
58 QPRINTK(qdev, DRV, ERR,
59 "Failed read of mac index register.\n");
60 goto err;
61 }
62 *buf++ = value[0]; /* lower Mcast address */
63 *buf++ = value[1]; /* upper Mcast address */
64 }
65err:
66 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
67 return status;
68}
69
70static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
71{
72 int status;
73 u32 value, i;
74
75 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
76 if (status)
77 return status;
78
79 for (i = 0; i < 16; i++) {
80 status = ql_get_routing_reg(qdev, i, &value);
81 if (status) {
82 QPRINTK(qdev, DRV, ERR,
83 "Failed read of routing index register.\n");
84 goto err;
85 } else {
86 *buf++ = value;
87 }
88 }
89err:
90 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
91 return status;
92}
93
94/* Create a coredump segment header */
95static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr,
97 u32 seg_number, u32 seg_size, u8 *desc)
98{
99 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
100 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
101 seg_hdr->segNum = seg_number;
102 seg_hdr->segSize = seg_size;
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104}
105
106void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump)
108{
109 int i, status;
110
111
112 memset(&(mpi_coredump->mpi_global_header), 0,
113 sizeof(struct mpi_coredump_global_header));
114 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
115 mpi_coredump->mpi_global_header.headerSize =
116 sizeof(struct mpi_coredump_global_header);
117 mpi_coredump->mpi_global_header.imageSize =
118 sizeof(struct ql_reg_dump);
119 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
120 sizeof(mpi_coredump->mpi_global_header.idString));
121
122
123 /* segment 16 */
124 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
125 MISC_NIC_INFO_SEG_NUM,
126 sizeof(struct mpi_coredump_segment_header)
127 + sizeof(mpi_coredump->misc_nic_info),
128 "MISC NIC INFO");
129 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
130 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
131 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
132 mpi_coredump->misc_nic_info.function = qdev->func;
133
134 /* Segment 16, Rev C. Step 18 */
135 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
136 NIC1_CONTROL_SEG_NUM,
137 sizeof(struct mpi_coredump_segment_header)
138 + sizeof(mpi_coredump->nic_regs),
139 "NIC Registers");
140 /* Get generic reg dump */
141 for (i = 0; i < 64; i++)
142 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
143
144 /* Segment 31 */
145 /* Get indexed register values. */
146 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
147 INTR_STATES_SEG_NUM,
148 sizeof(struct mpi_coredump_segment_header)
149 + sizeof(mpi_coredump->intr_states),
150 "INTR States");
151 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
152
153 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
154 CAM_ENTRIES_SEG_NUM,
155 sizeof(struct mpi_coredump_segment_header)
156 + sizeof(mpi_coredump->cam_entries),
157 "CAM Entries");
158 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
159 if (status)
160 return;
161
162 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
163 ROUTING_WORDS_SEG_NUM,
164 sizeof(struct mpi_coredump_segment_header)
165 + sizeof(mpi_coredump->nic_routing_words),
166 "Routing Words");
167 status = ql_get_routing_entries(qdev,
168 &mpi_coredump->nic_routing_words[0]);
169 if (status)
170 return;
171
172 /* Segment 34 (Rev C. step 23) */
173 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
174 ETS_SEG_NUM,
175 sizeof(struct mpi_coredump_segment_header)
176 + sizeof(mpi_coredump->ets),
177 "ETS Registers");
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status)
180 return;
181}
182
3#ifdef QL_REG_DUMP 183#ifdef QL_REG_DUMP
4static void ql_dump_intr_states(struct ql_adapter *qdev) 184static void ql_dump_intr_states(struct ql_adapter *qdev)
5{ 185{
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 52073946bce3..058fa0a48c6f 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -36,6 +36,11 @@
36 36
37#include "qlge.h" 37#include "qlge.h"
38 38
39static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
40 "Loopback test (offline)"
41};
42#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
43
39static int ql_update_ring_coalescing(struct ql_adapter *qdev) 44static int ql_update_ring_coalescing(struct ql_adapter *qdev)
40{ 45{
41 int i, status = 0; 46 int i, status = 0;
@@ -132,6 +137,41 @@ static void ql_update_stats(struct ql_adapter *qdev)
132 iter++; 137 iter++;
133 } 138 }
134 139
140 /*
141 * Get Per-priority TX pause frame counter statistics.
142 */
143 for (i = 0x500; i < 0x540; i += 8) {
144 if (ql_read_xgmac_reg64(qdev, i, &data)) {
145 QPRINTK(qdev, DRV, ERR,
146 "Error reading status register 0x%.04x.\n", i);
147 goto end;
148 } else
149 *iter = data;
150 iter++;
151 }
152
153 /*
154 * Get Per-priority RX pause frame counter statistics.
155 */
156 for (i = 0x568; i < 0x5a8; i += 8) {
157 if (ql_read_xgmac_reg64(qdev, i, &data)) {
158 QPRINTK(qdev, DRV, ERR,
159 "Error reading status register 0x%.04x.\n", i);
160 goto end;
161 } else
162 *iter = data;
163 iter++;
164 }
165
166 /*
167 * Get RX NIC FIFO DROP statistics.
168 */
169 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
170 QPRINTK(qdev, DRV, ERR,
171 "Error reading status register 0x%.04x.\n", i);
172 goto end;
173 } else
174 *iter = data;
135end: 175end:
136 ql_sem_unlock(qdev, qdev->xg_sem_mask); 176 ql_sem_unlock(qdev, qdev->xg_sem_mask);
137quit: 177quit:
@@ -185,6 +225,23 @@ static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
185 {"rx_1024_to_1518_pkts"}, 225 {"rx_1024_to_1518_pkts"},
186 {"rx_1519_to_max_pkts"}, 226 {"rx_1519_to_max_pkts"},
187 {"rx_len_err_pkts"}, 227 {"rx_len_err_pkts"},
228 {"tx_cbfc_pause_frames0"},
229 {"tx_cbfc_pause_frames1"},
230 {"tx_cbfc_pause_frames2"},
231 {"tx_cbfc_pause_frames3"},
232 {"tx_cbfc_pause_frames4"},
233 {"tx_cbfc_pause_frames5"},
234 {"tx_cbfc_pause_frames6"},
235 {"tx_cbfc_pause_frames7"},
236 {"rx_cbfc_pause_frames0"},
237 {"rx_cbfc_pause_frames1"},
238 {"rx_cbfc_pause_frames2"},
239 {"rx_cbfc_pause_frames3"},
240 {"rx_cbfc_pause_frames4"},
241 {"rx_cbfc_pause_frames5"},
242 {"rx_cbfc_pause_frames6"},
243 {"rx_cbfc_pause_frames7"},
244 {"rx_nic_fifo_drop"},
188}; 245};
189 246
190static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 247static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -199,6 +256,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
199static int ql_get_sset_count(struct net_device *dev, int sset) 256static int ql_get_sset_count(struct net_device *dev, int sset)
200{ 257{
201 switch (sset) { 258 switch (sset) {
259 case ETH_SS_TEST:
260 return QLGE_TEST_LEN;
202 case ETH_SS_STATS: 261 case ETH_SS_STATS:
203 return ARRAY_SIZE(ql_stats_str_arr); 262 return ARRAY_SIZE(ql_stats_str_arr);
204 default: 263 default:
@@ -257,6 +316,23 @@ ql_get_ethtool_stats(struct net_device *ndev,
257 *data++ = s->rx_1024_to_1518_pkts; 316 *data++ = s->rx_1024_to_1518_pkts;
258 *data++ = s->rx_1519_to_max_pkts; 317 *data++ = s->rx_1519_to_max_pkts;
259 *data++ = s->rx_len_err_pkts; 318 *data++ = s->rx_len_err_pkts;
319 *data++ = s->tx_cbfc_pause_frames0;
320 *data++ = s->tx_cbfc_pause_frames1;
321 *data++ = s->tx_cbfc_pause_frames2;
322 *data++ = s->tx_cbfc_pause_frames3;
323 *data++ = s->tx_cbfc_pause_frames4;
324 *data++ = s->tx_cbfc_pause_frames5;
325 *data++ = s->tx_cbfc_pause_frames6;
326 *data++ = s->tx_cbfc_pause_frames7;
327 *data++ = s->rx_cbfc_pause_frames0;
328 *data++ = s->rx_cbfc_pause_frames1;
329 *data++ = s->rx_cbfc_pause_frames2;
330 *data++ = s->rx_cbfc_pause_frames3;
331 *data++ = s->rx_cbfc_pause_frames4;
332 *data++ = s->rx_cbfc_pause_frames5;
333 *data++ = s->rx_cbfc_pause_frames6;
334 *data++ = s->rx_cbfc_pause_frames7;
335 *data++ = s->rx_nic_fifo_drop;
260} 336}
261 337
262static int ql_get_settings(struct net_device *ndev, 338static int ql_get_settings(struct net_device *ndev,
@@ -302,6 +378,181 @@ static void ql_get_drvinfo(struct net_device *ndev,
302 drvinfo->eedump_len = 0; 378 drvinfo->eedump_len = 0;
303} 379}
304 380
381static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
382{
383 struct ql_adapter *qdev = netdev_priv(ndev);
384 /* What we support. */
385 wol->supported = WAKE_MAGIC;
386 /* What we've currently got set. */
387 wol->wolopts = qdev->wol;
388}
389
390static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
391{
392 struct ql_adapter *qdev = netdev_priv(ndev);
393 int status;
394
395 if (wol->wolopts & ~WAKE_MAGIC)
396 return -EINVAL;
397 qdev->wol = wol->wolopts;
398
399 QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
400 qdev->wol, ndev->name);
401 if (!qdev->wol) {
402 u32 wol = 0;
403 status = ql_mb_wol_mode(qdev, wol);
404 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
405 (status == 0) ? "cleared sucessfully" : "clear failed",
406 wol, qdev->ndev->name);
407 }
408
409 return 0;
410}
411
412static int ql_phys_id(struct net_device *ndev, u32 data)
413{
414 struct ql_adapter *qdev = netdev_priv(ndev);
415 u32 led_reg, i;
416 int status;
417
418 /* Save the current LED settings */
419 status = ql_mb_get_led_cfg(qdev);
420 if (status)
421 return status;
422 led_reg = qdev->led_config;
423
424 /* Start blinking the led */
425 if (!data || data > 300)
426 data = 300;
427
428 for (i = 0; i < (data * 10); i++)
429 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
430
431 /* Restore LED settings */
432 status = ql_mb_set_led_cfg(qdev, led_reg);
433 if (status)
434 return status;
435
436 return 0;
437}
438
439static int ql_start_loopback(struct ql_adapter *qdev)
440{
441 if (netif_carrier_ok(qdev->ndev)) {
442 set_bit(QL_LB_LINK_UP, &qdev->flags);
443 netif_carrier_off(qdev->ndev);
444 } else
445 clear_bit(QL_LB_LINK_UP, &qdev->flags);
446 qdev->link_config |= CFG_LOOPBACK_PCS;
447 return ql_mb_set_port_cfg(qdev);
448}
449
450static void ql_stop_loopback(struct ql_adapter *qdev)
451{
452 qdev->link_config &= ~CFG_LOOPBACK_PCS;
453 ql_mb_set_port_cfg(qdev);
454 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
455 netif_carrier_on(qdev->ndev);
456 clear_bit(QL_LB_LINK_UP, &qdev->flags);
457 }
458}
459
460static void ql_create_lb_frame(struct sk_buff *skb,
461 unsigned int frame_size)
462{
463 memset(skb->data, 0xFF, frame_size);
464 frame_size &= ~1;
465 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
466 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
467 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
468}
469
470void ql_check_lb_frame(struct ql_adapter *qdev,
471 struct sk_buff *skb)
472{
473 unsigned int frame_size = skb->len;
474
475 if ((*(skb->data + 3) == 0xFF) &&
476 (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
477 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
478 atomic_dec(&qdev->lb_count);
479 return;
480 }
481}
482
483static int ql_run_loopback_test(struct ql_adapter *qdev)
484{
485 int i;
486 netdev_tx_t rc;
487 struct sk_buff *skb;
488 unsigned int size = SMALL_BUF_MAP_SIZE;
489
490 for (i = 0; i < 64; i++) {
491 skb = netdev_alloc_skb(qdev->ndev, size);
492 if (!skb)
493 return -ENOMEM;
494
495 skb->queue_mapping = 0;
496 skb_put(skb, size);
497 ql_create_lb_frame(skb, size);
498 rc = ql_lb_send(skb, qdev->ndev);
499 if (rc != NETDEV_TX_OK)
500 return -EPIPE;
501 atomic_inc(&qdev->lb_count);
502 }
503
504 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
505 return atomic_read(&qdev->lb_count) ? -EIO : 0;
506}
507
508static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
509{
510 *data = ql_start_loopback(qdev);
511 if (*data)
512 goto out;
513 *data = ql_run_loopback_test(qdev);
514out:
515 ql_stop_loopback(qdev);
516 return *data;
517}
518
519static void ql_self_test(struct net_device *ndev,
520 struct ethtool_test *eth_test, u64 *data)
521{
522 struct ql_adapter *qdev = netdev_priv(ndev);
523
524 if (netif_running(ndev)) {
525 set_bit(QL_SELFTEST, &qdev->flags);
526 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
527 /* Offline tests */
528 if (ql_loopback_test(qdev, &data[0]))
529 eth_test->flags |= ETH_TEST_FL_FAILED;
530
531 } else {
532 /* Online tests */
533 data[0] = 0;
534 }
535 clear_bit(QL_SELFTEST, &qdev->flags);
536 } else {
537 QPRINTK(qdev, DRV, ERR,
538 "%s: is down, Loopback test will fail.\n", ndev->name);
539 eth_test->flags |= ETH_TEST_FL_FAILED;
540 }
541}
542
543static int ql_get_regs_len(struct net_device *ndev)
544{
545 return sizeof(struct ql_reg_dump);
546}
547
548static void ql_get_regs(struct net_device *ndev,
549 struct ethtool_regs *regs, void *p)
550{
551 struct ql_adapter *qdev = netdev_priv(ndev);
552
553 ql_gen_reg_dump(qdev, p);
554}
555
305static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 556static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
306{ 557{
307 struct ql_adapter *qdev = netdev_priv(dev); 558 struct ql_adapter *qdev = netdev_priv(dev);
@@ -355,6 +606,37 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
355 return ql_update_ring_coalescing(qdev); 606 return ql_update_ring_coalescing(qdev);
356} 607}
357 608
609static void ql_get_pauseparam(struct net_device *netdev,
610 struct ethtool_pauseparam *pause)
611{
612 struct ql_adapter *qdev = netdev_priv(netdev);
613
614 ql_mb_get_port_cfg(qdev);
615 if (qdev->link_config & CFG_PAUSE_STD) {
616 pause->rx_pause = 1;
617 pause->tx_pause = 1;
618 }
619}
620
621static int ql_set_pauseparam(struct net_device *netdev,
622 struct ethtool_pauseparam *pause)
623{
624 struct ql_adapter *qdev = netdev_priv(netdev);
625 int status = 0;
626
627 if ((pause->rx_pause) && (pause->tx_pause))
628 qdev->link_config |= CFG_PAUSE_STD;
629 else if (!pause->rx_pause && !pause->tx_pause)
630 qdev->link_config &= ~CFG_PAUSE_STD;
631 else
632 return -EINVAL;
633
634 status = ql_mb_set_port_cfg(qdev);
635 if (status)
636 return status;
637 return status;
638}
639
358static u32 ql_get_rx_csum(struct net_device *netdev) 640static u32 ql_get_rx_csum(struct net_device *netdev)
359{ 641{
360 struct ql_adapter *qdev = netdev_priv(netdev); 642 struct ql_adapter *qdev = netdev_priv(netdev);
@@ -396,9 +678,17 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
396const struct ethtool_ops qlge_ethtool_ops = { 678const struct ethtool_ops qlge_ethtool_ops = {
397 .get_settings = ql_get_settings, 679 .get_settings = ql_get_settings,
398 .get_drvinfo = ql_get_drvinfo, 680 .get_drvinfo = ql_get_drvinfo,
681 .get_wol = ql_get_wol,
682 .set_wol = ql_set_wol,
683 .get_regs_len = ql_get_regs_len,
684 .get_regs = ql_get_regs,
399 .get_msglevel = ql_get_msglevel, 685 .get_msglevel = ql_get_msglevel,
400 .set_msglevel = ql_set_msglevel, 686 .set_msglevel = ql_set_msglevel,
401 .get_link = ethtool_op_get_link, 687 .get_link = ethtool_op_get_link,
688 .phys_id = ql_phys_id,
689 .self_test = ql_self_test,
690 .get_pauseparam = ql_get_pauseparam,
691 .set_pauseparam = ql_set_pauseparam,
402 .get_rx_csum = ql_get_rx_csum, 692 .get_rx_csum = ql_get_rx_csum,
403 .set_rx_csum = ql_set_rx_csum, 693 .set_rx_csum = ql_set_rx_csum,
404 .get_tx_csum = ethtool_op_get_tx_csum, 694 .get_tx_csum = ethtool_op_get_tx_csum,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index a2fc70a0d0cc..7692299e7826 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -1025,6 +1025,11 @@ end:
1025 return status; 1025 return status;
1026} 1026}
1027 1027
1028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
1028/* Get the next large buffer. */ 1033/* Get the next large buffer. */
1029static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) 1034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1030{ 1035{
@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1036 return lbq_desc; 1041 return lbq_desc;
1037} 1042}
1038 1043
1044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
1039/* Get the next small buffer. */ 1066/* Get the next small buffer. */
1040static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) 1067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1041{ 1068{
@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
1063 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); 1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1064} 1091}
1065 1092
1093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
1066/* Process (refill) a large buffer queue. */ 1140/* Process (refill) a large buffer queue. */
1067static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) 1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1068{ 1142{
@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1072 u64 map; 1146 u64 map;
1073 int i; 1147 int i;
1074 1148
1075 while (rx_ring->lbq_free_cnt > 16) { 1149 while (rx_ring->lbq_free_cnt > 32) {
1076 for (i = 0; i < 16; i++) { 1150 for (i = 0; i < 16; i++) {
1077 QPRINTK(qdev, RX_STATUS, DEBUG, 1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1078 "lbq: try cleaning clean_idx = %d.\n", 1152 "lbq: try cleaning clean_idx = %d.\n",
1079 clean_idx); 1153 clean_idx);
1080 lbq_desc = &rx_ring->lbq[clean_idx]; 1154 lbq_desc = &rx_ring->lbq[clean_idx];
1081 if (lbq_desc->p.lbq_page == NULL) { 1155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1082 QPRINTK(qdev, RX_STATUS, DEBUG, 1156 QPRINTK(qdev, IFUP, ERR,
1083 "lbq: getting new page for index %d.\n", 1157 "Could not get a page chunk.\n");
1084 lbq_desc->index);
1085 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1086 if (lbq_desc->p.lbq_page == NULL) {
1087 rx_ring->lbq_clean_idx = clean_idx;
1088 QPRINTK(qdev, RX_STATUS, ERR,
1089 "Couldn't get a page.\n");
1090 return;
1091 }
1092 map = pci_map_page(qdev->pdev,
1093 lbq_desc->p.lbq_page,
1094 0, PAGE_SIZE,
1095 PCI_DMA_FROMDEVICE);
1096 if (pci_dma_mapping_error(qdev->pdev, map)) {
1097 rx_ring->lbq_clean_idx = clean_idx;
1098 put_page(lbq_desc->p.lbq_page);
1099 lbq_desc->p.lbq_page = NULL;
1100 QPRINTK(qdev, RX_STATUS, ERR,
1101 "PCI mapping failed.\n");
1102 return; 1158 return;
1103 } 1159 }
1160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1104 pci_unmap_addr_set(lbq_desc, mapaddr, map); 1163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1105 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 1164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1106 *lbq_desc->addr = cpu_to_le64(map); 1166 *lbq_desc->addr = cpu_to_le64(map);
1107 } 1167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1108 clean_idx++; 1171 clean_idx++;
1109 if (clean_idx == rx_ring->lbq_len) 1172 if (clean_idx == rx_ring->lbq_len)
1110 clean_idx = 0; 1173 clean_idx = 0;
@@ -1147,7 +1210,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1147 sbq_desc->index); 1210 sbq_desc->index);
1148 sbq_desc->p.skb = 1211 sbq_desc->p.skb =
1149 netdev_alloc_skb(qdev->ndev, 1212 netdev_alloc_skb(qdev->ndev,
1150 rx_ring->sbq_buf_size); 1213 SMALL_BUFFER_SIZE);
1151 if (sbq_desc->p.skb == NULL) { 1214 if (sbq_desc->p.skb == NULL) {
1152 QPRINTK(qdev, PROBE, ERR, 1215 QPRINTK(qdev, PROBE, ERR,
1153 "Couldn't get an skb.\n"); 1216 "Couldn't get an skb.\n");
@@ -1157,8 +1220,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1157 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); 1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1158 map = pci_map_single(qdev->pdev, 1221 map = pci_map_single(qdev->pdev,
1159 sbq_desc->p.skb->data, 1222 sbq_desc->p.skb->data,
1160 rx_ring->sbq_buf_size / 1223 rx_ring->sbq_buf_size,
1161 2, PCI_DMA_FROMDEVICE); 1224 PCI_DMA_FROMDEVICE);
1162 if (pci_dma_mapping_error(qdev->pdev, map)) { 1225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1163 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); 1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1164 rx_ring->sbq_clean_idx = clean_idx; 1227 rx_ring->sbq_clean_idx = clean_idx;
@@ -1168,7 +1231,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1168 } 1231 }
1169 pci_unmap_addr_set(sbq_desc, mapaddr, map); 1232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1170 pci_unmap_len_set(sbq_desc, maplen, 1233 pci_unmap_len_set(sbq_desc, maplen,
1171 rx_ring->sbq_buf_size / 2); 1234 rx_ring->sbq_buf_size);
1172 *sbq_desc->addr = cpu_to_le64(map); 1235 *sbq_desc->addr = cpu_to_le64(map);
1173 } 1236 }
1174 1237
@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1480 * chain it to the header buffer's skb and let 1543 * chain it to the header buffer's skb and let
1481 * it rip. 1544 * it rip.
1482 */ 1545 */
1483 lbq_desc = ql_get_curr_lbuf(rx_ring); 1546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1484 pci_unmap_page(qdev->pdev,
1485 pci_unmap_addr(lbq_desc,
1486 mapaddr),
1487 pci_unmap_len(lbq_desc, maplen),
1488 PCI_DMA_FROMDEVICE);
1489 QPRINTK(qdev, RX_STATUS, DEBUG, 1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "Chaining page to skb.\n"); 1548 "Chaining page at offset = %d,"
1491 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, 1549 "for %d bytes to skb.\n",
1492 0, length); 1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
1493 skb->len += length; 1554 skb->len += length;
1494 skb->data_len += length; 1555 skb->data_len += length;
1495 skb->truesize += length; 1556 skb->truesize += length;
1496 lbq_desc->p.lbq_page = NULL;
1497 } else { 1557 } else {
1498 /* 1558 /*
1499 * The headers and data are in a single large buffer. We 1559 * The headers and data are in a single large buffer. We
1500 * copy it to a new skb and let it go. This can happen with 1560 * copy it to a new skb and let it go. This can happen with
1501 * jumbo mtu on a non-TCP/UDP frame. 1561 * jumbo mtu on a non-TCP/UDP frame.
1502 */ 1562 */
1503 lbq_desc = ql_get_curr_lbuf(rx_ring); 1563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1504 skb = netdev_alloc_skb(qdev->ndev, length); 1564 skb = netdev_alloc_skb(qdev->ndev, length);
1505 if (skb == NULL) { 1565 if (skb == NULL) {
1506 QPRINTK(qdev, PROBE, DEBUG, 1566 QPRINTK(qdev, PROBE, DEBUG,
@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1515 skb_reserve(skb, NET_IP_ALIGN); 1575 skb_reserve(skb, NET_IP_ALIGN);
1516 QPRINTK(qdev, RX_STATUS, DEBUG, 1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1517 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); 1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1518 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, 1578 skb_fill_page_desc(skb, 0,
1519 0, length); 1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
1520 skb->len += length; 1582 skb->len += length;
1521 skb->data_len += length; 1583 skb->data_len += length;
1522 skb->truesize += length; 1584 skb->truesize += length;
1523 length -= length; 1585 length -= length;
1524 lbq_desc->p.lbq_page = NULL;
1525 __pskb_pull_tail(skb, 1586 __pskb_pull_tail(skb,
1526 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1527 VLAN_ETH_HLEN : ETH_HLEN); 1588 VLAN_ETH_HLEN : ETH_HLEN);
@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1538 * frames. If the MTU goes up we could 1599 * frames. If the MTU goes up we could
1539 * eventually be in trouble. 1600 * eventually be in trouble.
1540 */ 1601 */
1541 int size, offset, i = 0; 1602 int size, i = 0;
1542 __le64 *bq, bq_array[8];
1543 sbq_desc = ql_get_curr_sbuf(rx_ring); 1603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1544 pci_unmap_single(qdev->pdev, 1604 pci_unmap_single(qdev->pdev,
1545 pci_unmap_addr(sbq_desc, mapaddr), 1605 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1558 QPRINTK(qdev, RX_STATUS, DEBUG, 1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1559 "%d bytes of headers & data in chain of large.\n", length); 1619 "%d bytes of headers & data in chain of large.\n", length);
1560 skb = sbq_desc->p.skb; 1620 skb = sbq_desc->p.skb;
1561 bq = &bq_array[0];
1562 memcpy(bq, skb->data, sizeof(bq_array));
1563 sbq_desc->p.skb = NULL; 1621 sbq_desc->p.skb = NULL;
1564 skb_reserve(skb, NET_IP_ALIGN); 1622 skb_reserve(skb, NET_IP_ALIGN);
1565 } else {
1566 QPRINTK(qdev, RX_STATUS, DEBUG,
1567 "Headers in small, %d bytes of data in chain of large.\n", length);
1568 bq = (__le64 *)sbq_desc->p.skb->data;
1569 } 1623 }
1570 while (length > 0) { 1624 while (length > 0) {
1571 lbq_desc = ql_get_curr_lbuf(rx_ring); 1625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1572 pci_unmap_page(qdev->pdev, 1626 size = (length < rx_ring->lbq_buf_size) ? length :
1573 pci_unmap_addr(lbq_desc, 1627 rx_ring->lbq_buf_size;
1574 mapaddr),
1575 pci_unmap_len(lbq_desc,
1576 maplen),
1577 PCI_DMA_FROMDEVICE);
1578 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1579 offset = 0;
1580 1628
1581 QPRINTK(qdev, RX_STATUS, DEBUG, 1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1582 "Adding page %d to skb for %d bytes.\n", 1630 "Adding page %d to skb for %d bytes.\n",
1583 i, size); 1631 i, size);
1584 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, 1632 skb_fill_page_desc(skb, i,
1585 offset, size); 1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
1586 skb->len += size; 1636 skb->len += size;
1587 skb->data_len += size; 1637 skb->data_len += size;
1588 skb->truesize += size; 1638 skb->truesize += size;
1589 length -= size; 1639 length -= size;
1590 lbq_desc->p.lbq_page = NULL;
1591 bq++;
1592 i++; 1640 i++;
1593 } 1641 }
1594 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@ -1613,6 +1661,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1613 if (unlikely(!skb)) { 1661 if (unlikely(!skb)) {
1614 QPRINTK(qdev, RX_STATUS, DEBUG, 1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1615 "No skb available, drop packet.\n"); 1663 "No skb available, drop packet.\n");
1664 rx_ring->rx_dropped++;
1616 return; 1665 return;
1617 } 1666 }
1618 1667
@@ -1621,6 +1670,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1621 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", 1670 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1622 ib_mac_rsp->flags2); 1671 ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb); 1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_errors++;
1624 return; 1674 return;
1625 } 1675 }
1626 1676
@@ -1629,6 +1679,14 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1629 */ 1679 */
1630 if (skb->len > ndev->mtu + ETH_HLEN) { 1680 if (skb->len > ndev->mtu + ETH_HLEN) {
1631 dev_kfree_skb_any(skb); 1681 dev_kfree_skb_any(skb);
1682 rx_ring->rx_dropped++;
1683 return;
1684 }
1685
1686 /* loopback self test for ethtool */
1687 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1688 ql_check_lb_frame(qdev, skb);
1689 dev_kfree_skb_any(skb);
1632 return; 1690 return;
1633 } 1691 }
1634 1692
@@ -1642,6 +1700,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1642 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", 1700 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1701 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); 1702 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1703 rx_ring->rx_multicast++;
1645 } 1704 }
1646 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { 1705 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1647 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); 1706 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
@@ -1673,8 +1732,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1673 } 1732 }
1674 } 1733 }
1675 1734
1676 qdev->stats.rx_packets++; 1735 rx_ring->rx_packets++;
1677 qdev->stats.rx_bytes += skb->len; 1736 rx_ring->rx_bytes += skb->len;
1678 skb_record_rx_queue(skb, rx_ring->cq_id); 1737 skb_record_rx_queue(skb, rx_ring->cq_id);
1679 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1738 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1680 if (qdev->vlgrp && 1739 if (qdev->vlgrp &&
@@ -1705,8 +1764,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1705 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 1764 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1706 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 1765 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1707 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 1766 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1708 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len; 1767 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1709 qdev->stats.tx_packets++; 1768 tx_ring->tx_packets++;
1710 dev_kfree_skb(tx_ring_desc->skb); 1769 dev_kfree_skb(tx_ring_desc->skb);
1711 tx_ring_desc->skb = NULL; 1770 tx_ring_desc->skb = NULL;
1712 1771
@@ -1929,7 +1988,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1929 return work_done; 1988 return work_done;
1930} 1989}
1931 1990
1932static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) 1991static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1933{ 1992{
1934 struct ql_adapter *qdev = netdev_priv(ndev); 1993 struct ql_adapter *qdev = netdev_priv(ndev);
1935 1994
@@ -1945,7 +2004,7 @@ static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1945 } 2004 }
1946} 2005}
1947 2006
1948static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2007static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1949{ 2008{
1950 struct ql_adapter *qdev = netdev_priv(ndev); 2009 struct ql_adapter *qdev = netdev_priv(ndev);
1951 u32 enable_bit = MAC_ADDR_E; 2010 u32 enable_bit = MAC_ADDR_E;
@@ -1961,7 +2020,7 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1961 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2020 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1962} 2021}
1963 2022
1964static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2023static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1965{ 2024{
1966 struct ql_adapter *qdev = netdev_priv(ndev); 2025 struct ql_adapter *qdev = netdev_priv(ndev);
1967 u32 enable_bit = 0; 2026 u32 enable_bit = 0;
@@ -2046,12 +2105,12 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2046 */ 2105 */
2047 var = ql_read32(qdev, ISR1); 2106 var = ql_read32(qdev, ISR1);
2048 if (var & intr_context->irq_mask) { 2107 if (var & intr_context->irq_mask) {
2049 QPRINTK(qdev, INTR, INFO, 2108 QPRINTK(qdev, INTR, INFO,
2050 "Waking handler for rx_ring[0].\n"); 2109 "Waking handler for rx_ring[0].\n");
2051 ql_disable_completion_interrupt(qdev, intr_context->intr); 2110 ql_disable_completion_interrupt(qdev, intr_context->intr);
2052 napi_schedule(&rx_ring->napi); 2111 napi_schedule(&rx_ring->napi);
2053 work_done++; 2112 work_done++;
2054 } 2113 }
2055 ql_enable_completion_interrupt(qdev, intr_context->intr); 2114 ql_enable_completion_interrupt(qdev, intr_context->intr);
2056 return work_done ? IRQ_HANDLED : IRQ_NONE; 2115 return work_done ? IRQ_HANDLED : IRQ_NONE;
2057} 2116}
@@ -2149,6 +2208,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2149 __func__, tx_ring_idx); 2208 __func__, tx_ring_idx);
2150 netif_stop_subqueue(ndev, tx_ring->wq_id); 2209 netif_stop_subqueue(ndev, tx_ring->wq_id);
2151 atomic_inc(&tx_ring->queue_stopped); 2210 atomic_inc(&tx_ring->queue_stopped);
2211 tx_ring->tx_errors++;
2152 return NETDEV_TX_BUSY; 2212 return NETDEV_TX_BUSY;
2153 } 2213 }
2154 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 2214 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
@@ -2183,6 +2243,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2183 NETDEV_TX_OK) { 2243 NETDEV_TX_OK) {
2184 QPRINTK(qdev, TX_QUEUED, ERR, 2244 QPRINTK(qdev, TX_QUEUED, ERR,
2185 "Could not map the segments.\n"); 2245 "Could not map the segments.\n");
2246 tx_ring->tx_errors++;
2186 return NETDEV_TX_BUSY; 2247 return NETDEV_TX_BUSY;
2187 } 2248 }
2188 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); 2249 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
@@ -2199,6 +2260,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2199 return NETDEV_TX_OK; 2260 return NETDEV_TX_OK;
2200} 2261}
2201 2262
2263
2202static void ql_free_shadow_space(struct ql_adapter *qdev) 2264static void ql_free_shadow_space(struct ql_adapter *qdev)
2203{ 2265{
2204 if (qdev->rx_ring_shadow_reg_area) { 2266 if (qdev->rx_ring_shadow_reg_area) {
@@ -2304,20 +2366,29 @@ err:
2304 2366
2305static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2367static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2306{ 2368{
2307 int i;
2308 struct bq_desc *lbq_desc; 2369 struct bq_desc *lbq_desc;
2309 2370
2310 for (i = 0; i < rx_ring->lbq_len; i++) { 2371 uint32_t curr_idx, clean_idx;
2311 lbq_desc = &rx_ring->lbq[i]; 2372
2312 if (lbq_desc->p.lbq_page) { 2373 curr_idx = rx_ring->lbq_curr_idx;
2374 clean_idx = rx_ring->lbq_clean_idx;
2375 while (curr_idx != clean_idx) {
2376 lbq_desc = &rx_ring->lbq[curr_idx];
2377
2378 if (lbq_desc->p.pg_chunk.last_flag) {
2313 pci_unmap_page(qdev->pdev, 2379 pci_unmap_page(qdev->pdev,
2314 pci_unmap_addr(lbq_desc, mapaddr), 2380 lbq_desc->p.pg_chunk.map,
2315 pci_unmap_len(lbq_desc, maplen), 2381 ql_lbq_block_size(qdev),
2316 PCI_DMA_FROMDEVICE); 2382 PCI_DMA_FROMDEVICE);
2317 2383 lbq_desc->p.pg_chunk.last_flag = 0;
2318 put_page(lbq_desc->p.lbq_page);
2319 lbq_desc->p.lbq_page = NULL;
2320 } 2384 }
2385
2386 put_page(lbq_desc->p.pg_chunk.page);
2387 lbq_desc->p.pg_chunk.page = NULL;
2388
2389 if (++curr_idx == rx_ring->lbq_len)
2390 curr_idx = 0;
2391
2321 } 2392 }
2322} 2393}
2323 2394
@@ -2615,6 +2686,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2615 /* Set up the shadow registers for this ring. */ 2686 /* Set up the shadow registers for this ring. */
2616 rx_ring->prod_idx_sh_reg = shadow_reg; 2687 rx_ring->prod_idx_sh_reg = shadow_reg;
2617 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; 2688 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2689 *rx_ring->prod_idx_sh_reg = 0;
2618 shadow_reg += sizeof(u64); 2690 shadow_reg += sizeof(u64);
2619 shadow_reg_dma += sizeof(u64); 2691 shadow_reg_dma += sizeof(u64);
2620 rx_ring->lbq_base_indirect = shadow_reg; 2692 rx_ring->lbq_base_indirect = shadow_reg;
@@ -2692,7 +2764,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2692 cqicb->sbq_addr = 2764 cqicb->sbq_addr =
2693 cpu_to_le64(rx_ring->sbq_base_indirect_dma); 2765 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2694 cqicb->sbq_buf_size = 2766 cqicb->sbq_buf_size =
2695 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2)); 2767 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
2696 bq_len = (rx_ring->sbq_len == 65536) ? 0 : 2768 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2697 (u16) rx_ring->sbq_len; 2769 (u16) rx_ring->sbq_len;
2698 cqicb->sbq_len = cpu_to_le16(bq_len); 2770 cqicb->sbq_len = cpu_to_le16(bq_len);
@@ -2798,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2798 int i, err; 2870 int i, err;
2799 2871
2800 /* Get the MSIX vectors. */ 2872 /* Get the MSIX vectors. */
2801 if (irq_type == MSIX_IRQ) { 2873 if (qlge_irq_type == MSIX_IRQ) {
2802 /* Try to alloc space for the msix struct, 2874 /* Try to alloc space for the msix struct,
2803 * if it fails then go to MSI/legacy. 2875 * if it fails then go to MSI/legacy.
2804 */ 2876 */
@@ -2806,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2806 sizeof(struct msix_entry), 2878 sizeof(struct msix_entry),
2807 GFP_KERNEL); 2879 GFP_KERNEL);
2808 if (!qdev->msi_x_entry) { 2880 if (!qdev->msi_x_entry) {
2809 irq_type = MSI_IRQ; 2881 qlge_irq_type = MSI_IRQ;
2810 goto msi; 2882 goto msi;
2811 } 2883 }
2812 2884
@@ -2829,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2829 QPRINTK(qdev, IFUP, WARNING, 2901 QPRINTK(qdev, IFUP, WARNING,
2830 "MSI-X Enable failed, trying MSI.\n"); 2902 "MSI-X Enable failed, trying MSI.\n");
2831 qdev->intr_count = 1; 2903 qdev->intr_count = 1;
2832 irq_type = MSI_IRQ; 2904 qlge_irq_type = MSI_IRQ;
2833 } else if (err == 0) { 2905 } else if (err == 0) {
2834 set_bit(QL_MSIX_ENABLED, &qdev->flags); 2906 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2835 QPRINTK(qdev, IFUP, INFO, 2907 QPRINTK(qdev, IFUP, INFO,
@@ -2840,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2840 } 2912 }
2841msi: 2913msi:
2842 qdev->intr_count = 1; 2914 qdev->intr_count = 1;
2843 if (irq_type == MSI_IRQ) { 2915 if (qlge_irq_type == MSI_IRQ) {
2844 if (!pci_enable_msi(qdev->pdev)) { 2916 if (!pci_enable_msi(qdev->pdev)) {
2845 set_bit(QL_MSI_ENABLED, &qdev->flags); 2917 set_bit(QL_MSI_ENABLED, &qdev->flags);
2846 QPRINTK(qdev, IFUP, INFO, 2918 QPRINTK(qdev, IFUP, INFO,
@@ -2848,7 +2920,7 @@ msi:
2848 return; 2920 return;
2849 } 2921 }
2850 } 2922 }
2851 irq_type = LEG_IRQ; 2923 qlge_irq_type = LEG_IRQ;
2852 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2853} 2925}
2854 2926
@@ -3268,7 +3340,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3268 ql_write32(qdev, FSC, mask | value); 3340 ql_write32(qdev, FSC, mask | value);
3269 3341
3270 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3271 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); 3343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3272 3344
3273 /* Set RX packet routing to use port/pci function on which the 3345 /* Set RX packet routing to use port/pci function on which the
3274 * packet arrived on in addition to usual frame routing. 3346 * packet arrived on in addition to usual frame routing.
@@ -3276,6 +3348,22 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3276 * the same MAC address. 3348 * the same MAC address.
3277 */ 3349 */
3278 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); 3350 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3351 /* Reroute all packets to our Interface.
3352 * They may have been routed to MPI firmware
3353 * due to WOL.
3354 */
3355 value = ql_read32(qdev, MGMT_RCV_CFG);
3356 value &= ~MGMT_RCV_CFG_RM;
3357 mask = 0xffff0000;
3358
3359 /* Sticky reg needs clearing due to WOL. */
3360 ql_write32(qdev, MGMT_RCV_CFG, mask);
3361 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3362
3363 /* Default WOL is enable on Mezz cards */
3364 if (qdev->pdev->subsystem_device == 0x0068 ||
3365 qdev->pdev->subsystem_device == 0x0180)
3366 qdev->wol = WAKE_MAGIC;
3279 3367
3280 /* Start up the rx queues. */ 3368 /* Start up the rx queues. */
3281 for (i = 0; i < qdev->rx_ring_count; i++) { 3369 for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3310,10 +3398,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3310 3398
3311 /* Initialize the port and set the max framesize. */ 3399 /* Initialize the port and set the max framesize. */
3312 status = qdev->nic_ops->port_initialize(qdev); 3400 status = qdev->nic_ops->port_initialize(qdev);
3313 if (status) { 3401 if (status)
3314 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); 3402 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3315 return status;
3316 }
3317 3403
3318 /* Set up the MAC address and frame routing filter. */ 3404 /* Set up the MAC address and frame routing filter. */
3319 status = ql_cam_route_initialize(qdev); 3405 status = ql_cam_route_initialize(qdev);
@@ -3392,6 +3478,52 @@ static void ql_display_dev_info(struct net_device *ndev)
3392 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); 3478 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3393} 3479}
3394 3480
3481int ql_wol(struct ql_adapter *qdev)
3482{
3483 int status = 0;
3484 u32 wol = MB_WOL_DISABLE;
3485
3486 /* The CAM is still intact after a reset, but if we
3487 * are doing WOL, then we may need to program the
3488 * routing regs. We would also need to issue the mailbox
3489 * commands to instruct the MPI what to do per the ethtool
3490 * settings.
3491 */
3492
3493 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3494 WAKE_MCAST | WAKE_BCAST)) {
3495 QPRINTK(qdev, IFDOWN, ERR,
3496 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3497 qdev->wol);
3498 return -EINVAL;
3499 }
3500
3501 if (qdev->wol & WAKE_MAGIC) {
3502 status = ql_mb_wol_set_magic(qdev, 1);
3503 if (status) {
3504 QPRINTK(qdev, IFDOWN, ERR,
3505 "Failed to set magic packet on %s.\n",
3506 qdev->ndev->name);
3507 return status;
3508 } else
3509 QPRINTK(qdev, DRV, INFO,
3510 "Enabled magic packet successfully on %s.\n",
3511 qdev->ndev->name);
3512
3513 wol |= MB_WOL_MAGIC_PKT;
3514 }
3515
3516 if (qdev->wol) {
3517 wol |= MB_WOL_MODE_ON;
3518 status = ql_mb_wol_mode(qdev, wol);
3519 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3520 (status == 0) ? "Sucessfully set" : "Failed", wol,
3521 qdev->ndev->name);
3522 }
3523
3524 return status;
3525}
3526
3395static int ql_adapter_down(struct ql_adapter *qdev) 3527static int ql_adapter_down(struct ql_adapter *qdev)
3396{ 3528{
3397 int i, status = 0; 3529 int i, status = 0;
@@ -3497,6 +3629,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3497 struct rx_ring *rx_ring; 3629 struct rx_ring *rx_ring;
3498 struct tx_ring *tx_ring; 3630 struct tx_ring *tx_ring;
3499 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); 3631 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3632 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3633 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3634
3635 qdev->lbq_buf_order = get_order(lbq_buf_len);
3500 3636
3501 /* In a perfect world we have one RSS ring for each CPU 3637 /* In a perfect world we have one RSS ring for each CPU
3502 * and each has it's own vector. To do that we ask for 3638 * and each has it's own vector. To do that we ask for
@@ -3544,11 +3680,14 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3544 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3680 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3545 rx_ring->lbq_size = 3681 rx_ring->lbq_size =
3546 rx_ring->lbq_len * sizeof(__le64); 3682 rx_ring->lbq_len * sizeof(__le64);
3547 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3683 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3684 QPRINTK(qdev, IFUP, DEBUG,
3685 "lbq_buf_size %d, order = %d\n",
3686 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
3548 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3687 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3549 rx_ring->sbq_size = 3688 rx_ring->sbq_size =
3550 rx_ring->sbq_len * sizeof(__le64); 3689 rx_ring->sbq_len * sizeof(__le64);
3551 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3690 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
3552 rx_ring->type = RX_Q; 3691 rx_ring->type = RX_Q;
3553 } else { 3692 } else {
3554 /* 3693 /*
@@ -3575,6 +3714,10 @@ static int qlge_open(struct net_device *ndev)
3575 int err = 0; 3714 int err = 0;
3576 struct ql_adapter *qdev = netdev_priv(ndev); 3715 struct ql_adapter *qdev = netdev_priv(ndev);
3577 3716
3717 err = ql_adapter_reset(qdev);
3718 if (err)
3719 return err;
3720
3578 err = ql_configure_rings(qdev); 3721 err = ql_configure_rings(qdev);
3579 if (err) 3722 if (err)
3580 return err; 3723 return err;
@@ -3594,14 +3737,63 @@ error_up:
3594 return err; 3737 return err;
3595} 3738}
3596 3739
3740static int ql_change_rx_buffers(struct ql_adapter *qdev)
3741{
3742 struct rx_ring *rx_ring;
3743 int i, status;
3744 u32 lbq_buf_len;
3745
3746 /* Wait for an oustanding reset to complete. */
3747 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3748 int i = 3;
3749 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3750 QPRINTK(qdev, IFUP, ERR,
3751 "Waiting for adapter UP...\n");
3752 ssleep(1);
3753 }
3754
3755 if (!i) {
3756 QPRINTK(qdev, IFUP, ERR,
3757 "Timed out waiting for adapter UP\n");
3758 return -ETIMEDOUT;
3759 }
3760 }
3761
3762 status = ql_adapter_down(qdev);
3763 if (status)
3764 goto error;
3765
3766 /* Get the new rx buffer size. */
3767 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3768 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3769 qdev->lbq_buf_order = get_order(lbq_buf_len);
3770
3771 for (i = 0; i < qdev->rss_ring_count; i++) {
3772 rx_ring = &qdev->rx_ring[i];
3773 /* Set the new size. */
3774 rx_ring->lbq_buf_size = lbq_buf_len;
3775 }
3776
3777 status = ql_adapter_up(qdev);
3778 if (status)
3779 goto error;
3780
3781 return status;
3782error:
3783 QPRINTK(qdev, IFUP, ALERT,
3784 "Driver up/down cycle failed, closing device.\n");
3785 set_bit(QL_ADAPTER_UP, &qdev->flags);
3786 dev_close(qdev->ndev);
3787 return status;
3788}
3789
3597static int qlge_change_mtu(struct net_device *ndev, int new_mtu) 3790static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3598{ 3791{
3599 struct ql_adapter *qdev = netdev_priv(ndev); 3792 struct ql_adapter *qdev = netdev_priv(ndev);
3793 int status;
3600 3794
3601 if (ndev->mtu == 1500 && new_mtu == 9000) { 3795 if (ndev->mtu == 1500 && new_mtu == 9000) {
3602 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); 3796 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3603 queue_delayed_work(qdev->workqueue,
3604 &qdev->mpi_port_cfg_work, 0);
3605 } else if (ndev->mtu == 9000 && new_mtu == 1500) { 3797 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3606 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); 3798 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3607 } else if ((ndev->mtu == 1500 && new_mtu == 1500) || 3799 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3609,15 +3801,60 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3609 return 0; 3801 return 0;
3610 } else 3802 } else
3611 return -EINVAL; 3803 return -EINVAL;
3804
3805 queue_delayed_work(qdev->workqueue,
3806 &qdev->mpi_port_cfg_work, 3*HZ);
3807
3808 if (!netif_running(qdev->ndev)) {
3809 ndev->mtu = new_mtu;
3810 return 0;
3811 }
3812
3612 ndev->mtu = new_mtu; 3813 ndev->mtu = new_mtu;
3613 return 0; 3814 status = ql_change_rx_buffers(qdev);
3815 if (status) {
3816 QPRINTK(qdev, IFUP, ERR,
3817 "Changing MTU failed.\n");
3818 }
3819
3820 return status;
3614} 3821}
3615 3822
3616static struct net_device_stats *qlge_get_stats(struct net_device 3823static struct net_device_stats *qlge_get_stats(struct net_device
3617 *ndev) 3824 *ndev)
3618{ 3825{
3619 struct ql_adapter *qdev = netdev_priv(ndev); 3826 struct ql_adapter *qdev = netdev_priv(ndev);
3620 return &qdev->stats; 3827 struct rx_ring *rx_ring = &qdev->rx_ring[0];
3828 struct tx_ring *tx_ring = &qdev->tx_ring[0];
3829 unsigned long pkts, mcast, dropped, errors, bytes;
3830 int i;
3831
3832 /* Get RX stats. */
3833 pkts = mcast = dropped = errors = bytes = 0;
3834 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
3835 pkts += rx_ring->rx_packets;
3836 bytes += rx_ring->rx_bytes;
3837 dropped += rx_ring->rx_dropped;
3838 errors += rx_ring->rx_errors;
3839 mcast += rx_ring->rx_multicast;
3840 }
3841 ndev->stats.rx_packets = pkts;
3842 ndev->stats.rx_bytes = bytes;
3843 ndev->stats.rx_dropped = dropped;
3844 ndev->stats.rx_errors = errors;
3845 ndev->stats.multicast = mcast;
3846
3847 /* Get TX stats. */
3848 pkts = errors = bytes = 0;
3849 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
3850 pkts += tx_ring->tx_packets;
3851 bytes += tx_ring->tx_bytes;
3852 errors += tx_ring->tx_errors;
3853 }
3854 ndev->stats.tx_packets = pkts;
3855 ndev->stats.tx_bytes = bytes;
3856 ndev->stats.tx_errors = errors;
3857 return &ndev->stats;
3621} 3858}
3622 3859
3623static void qlge_set_multicast_list(struct net_device *ndev) 3860static void qlge_set_multicast_list(struct net_device *ndev)
@@ -3714,9 +3951,6 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
3714 struct sockaddr *addr = p; 3951 struct sockaddr *addr = p;
3715 int status; 3952 int status;
3716 3953
3717 if (netif_running(ndev))
3718 return -EBUSY;
3719
3720 if (!is_valid_ether_addr(addr->sa_data)) 3954 if (!is_valid_ether_addr(addr->sa_data))
3721 return -EADDRNOTAVAIL; 3955 return -EADDRNOTAVAIL;
3722 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3956 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
@@ -3868,8 +4102,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3868 struct net_device *ndev, int cards_found) 4102 struct net_device *ndev, int cards_found)
3869{ 4103{
3870 struct ql_adapter *qdev = netdev_priv(ndev); 4104 struct ql_adapter *qdev = netdev_priv(ndev);
3871 int pos, err = 0; 4105 int err = 0;
3872 u16 val16;
3873 4106
3874 memset((void *)qdev, 0, sizeof(*qdev)); 4107 memset((void *)qdev, 0, sizeof(*qdev));
3875 err = pci_enable_device(pdev); 4108 err = pci_enable_device(pdev);
@@ -3881,18 +4114,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3881 qdev->ndev = ndev; 4114 qdev->ndev = ndev;
3882 qdev->pdev = pdev; 4115 qdev->pdev = pdev;
3883 pci_set_drvdata(pdev, ndev); 4116 pci_set_drvdata(pdev, ndev);
3884 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4117
3885 if (pos <= 0) { 4118 /* Set PCIe read request size */
3886 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 4119 err = pcie_set_readrq(pdev, 4096);
3887 "aborting.\n"); 4120 if (err) {
3888 return pos; 4121 dev_err(&pdev->dev, "Set readrq failed.\n");
3889 } else { 4122 goto err_out;
3890 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3891 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3892 val16 |= (PCI_EXP_DEVCTL_CERE |
3893 PCI_EXP_DEVCTL_NFERE |
3894 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3895 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3896 } 4123 }
3897 4124
3898 err = pci_request_regions(pdev, DRV_NAME); 4125 err = pci_request_regions(pdev, DRV_NAME);
@@ -3991,7 +4218,6 @@ err_out:
3991 return err; 4218 return err;
3992} 4219}
3993 4220
3994
3995static const struct net_device_ops qlge_netdev_ops = { 4221static const struct net_device_ops qlge_netdev_ops = {
3996 .ndo_open = qlge_open, 4222 .ndo_open = qlge_open,
3997 .ndo_stop = qlge_close, 4223 .ndo_stop = qlge_close,
@@ -4002,9 +4228,9 @@ static const struct net_device_ops qlge_netdev_ops = {
4002 .ndo_set_mac_address = qlge_set_mac_address, 4228 .ndo_set_mac_address = qlge_set_mac_address,
4003 .ndo_validate_addr = eth_validate_addr, 4229 .ndo_validate_addr = eth_validate_addr,
4004 .ndo_tx_timeout = qlge_tx_timeout, 4230 .ndo_tx_timeout = qlge_tx_timeout,
4005 .ndo_vlan_rx_register = ql_vlan_rx_register, 4231 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4006 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid, 4232 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4007 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid, 4233 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4008}; 4234};
4009 4235
4010static int __devinit qlge_probe(struct pci_dev *pdev, 4236static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4060,10 +4286,21 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4060 } 4286 }
4061 ql_link_off(qdev); 4287 ql_link_off(qdev);
4062 ql_display_dev_info(ndev); 4288 ql_display_dev_info(ndev);
4289 atomic_set(&qdev->lb_count, 0);
4063 cards_found++; 4290 cards_found++;
4064 return 0; 4291 return 0;
4065} 4292}
4066 4293
4294netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4295{
4296 return qlge_send(skb, ndev);
4297}
4298
4299int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4300{
4301 return ql_clean_inbound_rx_ring(rx_ring, budget);
4302}
4303
4067static void __devexit qlge_remove(struct pci_dev *pdev) 4304static void __devexit qlge_remove(struct pci_dev *pdev)
4068{ 4305{
4069 struct net_device *ndev = pci_get_drvdata(pdev); 4306 struct net_device *ndev = pci_get_drvdata(pdev);
@@ -4193,6 +4430,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4193 return err; 4430 return err;
4194 } 4431 }
4195 4432
4433 ql_wol(qdev);
4196 err = pci_save_state(pdev); 4434 err = pci_save_state(pdev);
4197 if (err) 4435 if (err)
4198 return err; 4436 return err;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index aec05f266107..e2b2286102d4 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,25 +1,5 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3static void ql_display_mb_sts(struct ql_adapter *qdev,
4 struct mbox_params *mbcp)
5{
6 int i;
7 static char *err_sts[] = {
8 "Command Complete",
9 "Command Not Supported",
10 "Host Interface Error",
11 "Checksum Error",
12 "Unused Completion Status",
13 "Test Failed",
14 "Command Parameter Error"};
15
16 QPRINTK(qdev, DRV, DEBUG, "%s.\n",
17 err_sts[mbcp->mbox_out[0] & 0x0000000f]);
18 for (i = 0; i < mbcp->out_count; i++)
19 QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
20 i, mbcp->mbox_out[i]);
21}
22
23int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 3int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
24{ 4{
25 int status; 5 int status;
@@ -317,6 +297,7 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
317 } else { 297 } else {
318 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", 298 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
319 mbcp->mbox_out[1]); 299 mbcp->mbox_out[1]);
300 qdev->fw_rev_id = mbcp->mbox_out[1];
320 status = ql_cam_route_initialize(qdev); 301 status = ql_cam_route_initialize(qdev);
321 if (status) 302 if (status)
322 QPRINTK(qdev, IFUP, ERR, 303 QPRINTK(qdev, IFUP, ERR,
@@ -446,6 +427,9 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
446 ql_aen_lost(qdev, mbcp); 427 ql_aen_lost(qdev, mbcp);
447 break; 428 break;
448 429
430 case AEN_DCBX_CHG:
431 /* Need to support AEN 8110 */
432 break;
449 default: 433 default:
450 QPRINTK(qdev, DRV, ERR, 434 QPRINTK(qdev, DRV, ERR,
451 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); 435 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
@@ -537,7 +521,6 @@ done:
537 MB_CMD_STS_GOOD) && 521 MB_CMD_STS_GOOD) &&
538 ((mbcp->mbox_out[0] & 0x0000f000) != 522 ((mbcp->mbox_out[0] & 0x0000f000) !=
539 MB_CMD_STS_INTRMDT)) { 523 MB_CMD_STS_INTRMDT)) {
540 ql_display_mb_sts(qdev, mbcp);
541 status = -EIO; 524 status = -EIO;
542 } 525 }
543end: 526end:
@@ -655,7 +638,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
655 * for the current port. 638 * for the current port.
656 * Most likely will block. 639 * Most likely will block.
657 */ 640 */
658static int ql_mb_set_port_cfg(struct ql_adapter *qdev) 641int ql_mb_set_port_cfg(struct ql_adapter *qdev)
659{ 642{
660 struct mbox_params mbc; 643 struct mbox_params mbc;
661 struct mbox_params *mbcp = &mbc; 644 struct mbox_params *mbcp = &mbc;
@@ -690,7 +673,7 @@ static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
690 * for the current port. 673 * for the current port.
691 * Most likely will block. 674 * Most likely will block.
692 */ 675 */
693static int ql_mb_get_port_cfg(struct ql_adapter *qdev) 676int ql_mb_get_port_cfg(struct ql_adapter *qdev)
694{ 677{
695 struct mbox_params mbc; 678 struct mbox_params mbc;
696 struct mbox_params *mbcp = &mbc; 679 struct mbox_params *mbcp = &mbc;
@@ -720,6 +703,76 @@ static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
720 return status; 703 return status;
721} 704}
722 705
706int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
707{
708 struct mbox_params mbc;
709 struct mbox_params *mbcp = &mbc;
710 int status;
711
712 memset(mbcp, 0, sizeof(struct mbox_params));
713
714 mbcp->in_count = 2;
715 mbcp->out_count = 1;
716
717 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
718 mbcp->mbox_in[1] = wol;
719
720
721 status = ql_mailbox_command(qdev, mbcp);
722 if (status)
723 return status;
724
725 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
726 QPRINTK(qdev, DRV, ERR,
727 "Failed to set WOL mode.\n");
728 status = -EIO;
729 }
730 return status;
731}
732
733int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
734{
735 struct mbox_params mbc;
736 struct mbox_params *mbcp = &mbc;
737 int status;
738 u8 *addr = qdev->ndev->dev_addr;
739
740 memset(mbcp, 0, sizeof(struct mbox_params));
741
742 mbcp->in_count = 8;
743 mbcp->out_count = 1;
744
745 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
746 if (enable_wol) {
747 mbcp->mbox_in[1] = (u32)addr[0];
748 mbcp->mbox_in[2] = (u32)addr[1];
749 mbcp->mbox_in[3] = (u32)addr[2];
750 mbcp->mbox_in[4] = (u32)addr[3];
751 mbcp->mbox_in[5] = (u32)addr[4];
752 mbcp->mbox_in[6] = (u32)addr[5];
753 mbcp->mbox_in[7] = 0;
754 } else {
755 mbcp->mbox_in[1] = 0;
756 mbcp->mbox_in[2] = 1;
757 mbcp->mbox_in[3] = 1;
758 mbcp->mbox_in[4] = 1;
759 mbcp->mbox_in[5] = 1;
760 mbcp->mbox_in[6] = 1;
761 mbcp->mbox_in[7] = 0;
762 }
763
764 status = ql_mailbox_command(qdev, mbcp);
765 if (status)
766 return status;
767
768 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
769 QPRINTK(qdev, DRV, ERR,
770 "Failed to set WOL mode.\n");
771 status = -EIO;
772 }
773 return status;
774}
775
723/* IDC - Inter Device Communication... 776/* IDC - Inter Device Communication...
724 * Some firmware commands require consent of adjacent FCOE 777 * Some firmware commands require consent of adjacent FCOE
725 * function. This function waits for the OK, or a 778 * function. This function waits for the OK, or a
@@ -769,6 +822,61 @@ static int ql_idc_wait(struct ql_adapter *qdev)
769 return status; 822 return status;
770} 823}
771 824
825int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
826{
827 struct mbox_params mbc;
828 struct mbox_params *mbcp = &mbc;
829 int status;
830
831 memset(mbcp, 0, sizeof(struct mbox_params));
832
833 mbcp->in_count = 2;
834 mbcp->out_count = 1;
835
836 mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
837 mbcp->mbox_in[1] = led_config;
838
839
840 status = ql_mailbox_command(qdev, mbcp);
841 if (status)
842 return status;
843
844 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
845 QPRINTK(qdev, DRV, ERR,
846 "Failed to set LED Configuration.\n");
847 status = -EIO;
848 }
849
850 return status;
851}
852
853int ql_mb_get_led_cfg(struct ql_adapter *qdev)
854{
855 struct mbox_params mbc;
856 struct mbox_params *mbcp = &mbc;
857 int status;
858
859 memset(mbcp, 0, sizeof(struct mbox_params));
860
861 mbcp->in_count = 1;
862 mbcp->out_count = 2;
863
864 mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
865
866 status = ql_mailbox_command(qdev, mbcp);
867 if (status)
868 return status;
869
870 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
871 QPRINTK(qdev, DRV, ERR,
872 "Failed to get LED Configuration.\n");
873 status = -EIO;
874 } else
875 qdev->led_config = mbcp->mbox_out[1];
876
877 return status;
878}
879
772int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) 880int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
773{ 881{
774 struct mbox_params mbc; 882 struct mbox_params mbc;
@@ -930,8 +1038,11 @@ void ql_mpi_idc_work(struct work_struct *work)
930 int status; 1038 int status;
931 struct mbox_params *mbcp = &qdev->idc_mbc; 1039 struct mbox_params *mbcp = &qdev->idc_mbc;
932 u32 aen; 1040 u32 aen;
1041 int timeout;
933 1042
1043 rtnl_lock();
934 aen = mbcp->mbox_out[1] >> 16; 1044 aen = mbcp->mbox_out[1] >> 16;
1045 timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
935 1046
936 switch (aen) { 1047 switch (aen) {
937 default: 1048 default:
@@ -939,22 +1050,61 @@ void ql_mpi_idc_work(struct work_struct *work)
939 "Bug: Unhandled IDC action.\n"); 1050 "Bug: Unhandled IDC action.\n");
940 break; 1051 break;
941 case MB_CMD_PORT_RESET: 1052 case MB_CMD_PORT_RESET:
942 case MB_CMD_SET_PORT_CFG:
943 case MB_CMD_STOP_FW: 1053 case MB_CMD_STOP_FW:
944 ql_link_off(qdev); 1054 ql_link_off(qdev);
1055 case MB_CMD_SET_PORT_CFG:
945 /* Signal the resulting link up AEN 1056 /* Signal the resulting link up AEN
946 * that the frame routing and mac addr 1057 * that the frame routing and mac addr
947 * needs to be set. 1058 * needs to be set.
948 * */ 1059 * */
949 set_bit(QL_CAM_RT_SET, &qdev->flags); 1060 set_bit(QL_CAM_RT_SET, &qdev->flags);
950 rtnl_lock(); 1061 /* Do ACK if required */
951 status = ql_mb_idc_ack(qdev); 1062 if (timeout) {
952 rtnl_unlock(); 1063 status = ql_mb_idc_ack(qdev);
953 if (status) { 1064 if (status)
954 QPRINTK(qdev, DRV, ERR, 1065 QPRINTK(qdev, DRV, ERR,
955 "Bug: No pending IDC!\n"); 1066 "Bug: No pending IDC!\n");
1067 } else {
1068 QPRINTK(qdev, DRV, DEBUG,
1069 "IDC ACK not required\n");
1070 status = 0; /* success */
956 } 1071 }
1072 break;
1073
1074 /* These sub-commands issued by another (FCoE)
1075 * function are requesting to do an operation
1076 * on the shared resource (MPI environment).
1077 * We currently don't issue these so we just
1078 * ACK the request.
1079 */
1080 case MB_CMD_IOP_RESTART_MPI:
1081 case MB_CMD_IOP_PREP_LINK_DOWN:
1082 /* Drop the link, reload the routing
1083 * table when link comes up.
1084 */
1085 ql_link_off(qdev);
1086 set_bit(QL_CAM_RT_SET, &qdev->flags);
1087 /* Fall through. */
1088 case MB_CMD_IOP_DVR_START:
1089 case MB_CMD_IOP_FLASH_ACC:
1090 case MB_CMD_IOP_CORE_DUMP_MPI:
1091 case MB_CMD_IOP_PREP_UPDATE_MPI:
1092 case MB_CMD_IOP_COMP_UPDATE_MPI:
1093 case MB_CMD_IOP_NONE: /* an IDC without params */
1094 /* Do ACK if required */
1095 if (timeout) {
1096 status = ql_mb_idc_ack(qdev);
1097 if (status)
1098 QPRINTK(qdev, DRV, ERR,
1099 "Bug: No pending IDC!\n");
1100 } else {
1101 QPRINTK(qdev, DRV, DEBUG,
1102 "IDC ACK not required\n");
1103 status = 0; /* success */
1104 }
1105 break;
957 } 1106 }
1107 rtnl_unlock();
958} 1108}
959 1109
960void ql_mpi_work(struct work_struct *work) 1110void ql_mpi_work(struct work_struct *work)
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 8b14c6eda7c3..0f30ea4e97ec 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -842,7 +842,7 @@ static int r6040_open(struct net_device *dev)
842 int ret; 842 int ret;
843 843
844 /* Request IRQ and Register interrupt handler */ 844 /* Request IRQ and Register interrupt handler */
845 ret = request_irq(dev->irq, &r6040_interrupt, 845 ret = request_irq(dev->irq, r6040_interrupt,
846 IRQF_SHARED, dev->name, dev); 846 IRQF_SHARED, dev->name, dev);
847 if (ret) 847 if (ret)
848 return ret; 848 return ret;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b9221bdc7184..5dba9fa2bc19 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -794,7 +794,7 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
794 struct rtl8169_private *tp = netdev_priv(dev); 794 struct rtl8169_private *tp = netdev_priv(dev);
795 void __iomem *ioaddr = tp->mmio_addr; 795 void __iomem *ioaddr = tp->mmio_addr;
796 unsigned int i; 796 unsigned int i;
797 static struct { 797 static const struct {
798 u32 opt; 798 u32 opt;
799 u16 reg; 799 u16 reg;
800 u8 mask; 800 u8 mask;
@@ -1277,7 +1277,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1277 * 1277 *
1278 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec 1278 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1279 */ 1279 */
1280 const struct { 1280 static const struct {
1281 u32 mask; 1281 u32 mask;
1282 u32 val; 1282 u32 val;
1283 int mac_version; 1283 int mac_version;
@@ -1351,7 +1351,7 @@ struct phy_reg {
1351 u16 val; 1351 u16 val;
1352}; 1352};
1353 1353
1354static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len) 1354static void rtl_phy_write(void __iomem *ioaddr, const struct phy_reg *regs, int len)
1355{ 1355{
1356 while (len-- > 0) { 1356 while (len-- > 0) {
1357 mdio_write(ioaddr, regs->reg, regs->val); 1357 mdio_write(ioaddr, regs->reg, regs->val);
@@ -1361,7 +1361,7 @@ static void rtl_phy_write(void __iomem *ioaddr, struct phy_reg *regs, int len)
1361 1361
1362static void rtl8169s_hw_phy_config(void __iomem *ioaddr) 1362static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
1363{ 1363{
1364 struct phy_reg phy_reg_init[] = { 1364 static const struct phy_reg phy_reg_init[] = {
1365 { 0x1f, 0x0001 }, 1365 { 0x1f, 0x0001 },
1366 { 0x06, 0x006e }, 1366 { 0x06, 0x006e },
1367 { 0x08, 0x0708 }, 1367 { 0x08, 0x0708 },
@@ -1428,7 +1428,7 @@ static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
1428 1428
1429static void rtl8169sb_hw_phy_config(void __iomem *ioaddr) 1429static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
1430{ 1430{
1431 struct phy_reg phy_reg_init[] = { 1431 static const struct phy_reg phy_reg_init[] = {
1432 { 0x1f, 0x0002 }, 1432 { 0x1f, 0x0002 },
1433 { 0x01, 0x90d0 }, 1433 { 0x01, 0x90d0 },
1434 { 0x1f, 0x0000 } 1434 { 0x1f, 0x0000 }
@@ -1457,7 +1457,7 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp,
1457static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, 1457static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
1458 void __iomem *ioaddr) 1458 void __iomem *ioaddr)
1459{ 1459{
1460 struct phy_reg phy_reg_init[] = { 1460 static const struct phy_reg phy_reg_init[] = {
1461 { 0x1f, 0x0001 }, 1461 { 0x1f, 0x0001 },
1462 { 0x04, 0x0000 }, 1462 { 0x04, 0x0000 },
1463 { 0x03, 0x00a1 }, 1463 { 0x03, 0x00a1 },
@@ -1504,7 +1504,7 @@ static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
1504 1504
1505static void rtl8169sce_hw_phy_config(void __iomem *ioaddr) 1505static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
1506{ 1506{
1507 struct phy_reg phy_reg_init[] = { 1507 static const struct phy_reg phy_reg_init[] = {
1508 { 0x1f, 0x0001 }, 1508 { 0x1f, 0x0001 },
1509 { 0x04, 0x0000 }, 1509 { 0x04, 0x0000 },
1510 { 0x03, 0x00a1 }, 1510 { 0x03, 0x00a1 },
@@ -1557,7 +1557,7 @@ static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
1557 1557
1558static void rtl8168bb_hw_phy_config(void __iomem *ioaddr) 1558static void rtl8168bb_hw_phy_config(void __iomem *ioaddr)
1559{ 1559{
1560 struct phy_reg phy_reg_init[] = { 1560 static const struct phy_reg phy_reg_init[] = {
1561 { 0x10, 0xf41b }, 1561 { 0x10, 0xf41b },
1562 { 0x1f, 0x0000 } 1562 { 0x1f, 0x0000 }
1563 }; 1563 };
@@ -1570,7 +1570,7 @@ static void rtl8168bb_hw_phy_config(void __iomem *ioaddr)
1570 1570
1571static void rtl8168bef_hw_phy_config(void __iomem *ioaddr) 1571static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
1572{ 1572{
1573 struct phy_reg phy_reg_init[] = { 1573 static const struct phy_reg phy_reg_init[] = {
1574 { 0x1f, 0x0001 }, 1574 { 0x1f, 0x0001 },
1575 { 0x10, 0xf41b }, 1575 { 0x10, 0xf41b },
1576 { 0x1f, 0x0000 } 1576 { 0x1f, 0x0000 }
@@ -1581,7 +1581,7 @@ static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
1581 1581
1582static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr) 1582static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
1583{ 1583{
1584 struct phy_reg phy_reg_init[] = { 1584 static const struct phy_reg phy_reg_init[] = {
1585 { 0x1f, 0x0000 }, 1585 { 0x1f, 0x0000 },
1586 { 0x1d, 0x0f00 }, 1586 { 0x1d, 0x0f00 },
1587 { 0x1f, 0x0002 }, 1587 { 0x1f, 0x0002 },
@@ -1594,7 +1594,7 @@ static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
1594 1594
1595static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr) 1595static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
1596{ 1596{
1597 struct phy_reg phy_reg_init[] = { 1597 static const struct phy_reg phy_reg_init[] = {
1598 { 0x1f, 0x0001 }, 1598 { 0x1f, 0x0001 },
1599 { 0x1d, 0x3d98 }, 1599 { 0x1d, 0x3d98 },
1600 { 0x1f, 0x0000 } 1600 { 0x1f, 0x0000 }
@@ -1609,7 +1609,7 @@ static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
1609 1609
1610static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr) 1610static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
1611{ 1611{
1612 struct phy_reg phy_reg_init[] = { 1612 static const struct phy_reg phy_reg_init[] = {
1613 { 0x1f, 0x0001 }, 1613 { 0x1f, 0x0001 },
1614 { 0x12, 0x2300 }, 1614 { 0x12, 0x2300 },
1615 { 0x1f, 0x0002 }, 1615 { 0x1f, 0x0002 },
@@ -1638,7 +1638,7 @@ static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
1638 1638
1639static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr) 1639static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
1640{ 1640{
1641 struct phy_reg phy_reg_init[] = { 1641 static const struct phy_reg phy_reg_init[] = {
1642 { 0x1f, 0x0001 }, 1642 { 0x1f, 0x0001 },
1643 { 0x12, 0x2300 }, 1643 { 0x12, 0x2300 },
1644 { 0x03, 0x802f }, 1644 { 0x03, 0x802f },
@@ -1666,7 +1666,7 @@ static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
1666 1666
1667static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr) 1667static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr)
1668{ 1668{
1669 struct phy_reg phy_reg_init[] = { 1669 static const struct phy_reg phy_reg_init[] = {
1670 { 0x1f, 0x0001 }, 1670 { 0x1f, 0x0001 },
1671 { 0x12, 0x2300 }, 1671 { 0x12, 0x2300 },
1672 { 0x1d, 0x3d98 }, 1672 { 0x1d, 0x3d98 },
@@ -1693,7 +1693,7 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr)
1693 1693
1694static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr) 1694static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
1695{ 1695{
1696 static struct phy_reg phy_reg_init_0[] = { 1696 static const struct phy_reg phy_reg_init_0[] = {
1697 { 0x1f, 0x0001 }, 1697 { 0x1f, 0x0001 },
1698 { 0x06, 0x4064 }, 1698 { 0x06, 0x4064 },
1699 { 0x07, 0x2863 }, 1699 { 0x07, 0x2863 },
@@ -1712,14 +1712,14 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
1712 { 0x1a, 0x05ad }, 1712 { 0x1a, 0x05ad },
1713 { 0x14, 0x94c0 } 1713 { 0x14, 0x94c0 }
1714 }; 1714 };
1715 static struct phy_reg phy_reg_init_1[] = { 1715 static const struct phy_reg phy_reg_init_1[] = {
1716 { 0x1f, 0x0002 }, 1716 { 0x1f, 0x0002 },
1717 { 0x06, 0x5561 }, 1717 { 0x06, 0x5561 },
1718 { 0x1f, 0x0005 }, 1718 { 0x1f, 0x0005 },
1719 { 0x05, 0x8332 }, 1719 { 0x05, 0x8332 },
1720 { 0x06, 0x5561 } 1720 { 0x06, 0x5561 }
1721 }; 1721 };
1722 static struct phy_reg phy_reg_init_2[] = { 1722 static const struct phy_reg phy_reg_init_2[] = {
1723 { 0x1f, 0x0005 }, 1723 { 0x1f, 0x0005 },
1724 { 0x05, 0xffc2 }, 1724 { 0x05, 0xffc2 },
1725 { 0x1f, 0x0005 }, 1725 { 0x1f, 0x0005 },
@@ -2084,7 +2084,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2084 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); 2084 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1));
2085 2085
2086 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2086 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2087 struct phy_reg phy_reg_init[] = { 2087 static const struct phy_reg phy_reg_init[] = {
2088 { 0x1f, 0x0002 }, 2088 { 0x1f, 0x0002 },
2089 { 0x05, 0x669a }, 2089 { 0x05, 0x669a },
2090 { 0x1f, 0x0005 }, 2090 { 0x1f, 0x0005 },
@@ -2099,7 +2099,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2099 val = mdio_read(ioaddr, 0x0d); 2099 val = mdio_read(ioaddr, 0x0d);
2100 2100
2101 if ((val & 0x00ff) != 0x006c) { 2101 if ((val & 0x00ff) != 0x006c) {
2102 u32 set[] = { 2102 static const u32 set[] = {
2103 0x0065, 0x0066, 0x0067, 0x0068, 2103 0x0065, 0x0066, 0x0067, 0x0068,
2104 0x0069, 0x006a, 0x006b, 0x006c 2104 0x0069, 0x006a, 0x006b, 0x006c
2105 }; 2105 };
@@ -2112,7 +2112,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2112 mdio_write(ioaddr, 0x0d, val | set[i]); 2112 mdio_write(ioaddr, 0x0d, val | set[i]);
2113 } 2113 }
2114 } else { 2114 } else {
2115 struct phy_reg phy_reg_init[] = { 2115 static const struct phy_reg phy_reg_init[] = {
2116 { 0x1f, 0x0002 }, 2116 { 0x1f, 0x0002 },
2117 { 0x05, 0x6662 }, 2117 { 0x05, 0x6662 },
2118 { 0x1f, 0x0005 }, 2118 { 0x1f, 0x0005 },
@@ -2136,7 +2136,7 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2136 2136
2137static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr) 2137static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2138{ 2138{
2139 static struct phy_reg phy_reg_init_0[] = { 2139 static const struct phy_reg phy_reg_init_0[] = {
2140 { 0x1f, 0x0001 }, 2140 { 0x1f, 0x0001 },
2141 { 0x06, 0x4064 }, 2141 { 0x06, 0x4064 },
2142 { 0x07, 0x2863 }, 2142 { 0x07, 0x2863 },
@@ -2161,7 +2161,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2161 { 0x05, 0x8332 }, 2161 { 0x05, 0x8332 },
2162 { 0x06, 0x5561 } 2162 { 0x06, 0x5561 }
2163 }; 2163 };
2164 static struct phy_reg phy_reg_init_1[] = { 2164 static const struct phy_reg phy_reg_init_1[] = {
2165 { 0x1f, 0x0005 }, 2165 { 0x1f, 0x0005 },
2166 { 0x05, 0xffc2 }, 2166 { 0x05, 0xffc2 },
2167 { 0x1f, 0x0005 }, 2167 { 0x1f, 0x0005 },
@@ -2477,7 +2477,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2477 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2477 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2478 2478
2479 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2479 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2480 struct phy_reg phy_reg_init[] = { 2480 static const struct phy_reg phy_reg_init[] = {
2481 { 0x1f, 0x0002 }, 2481 { 0x1f, 0x0002 },
2482 { 0x05, 0x669a }, 2482 { 0x05, 0x669a },
2483 { 0x1f, 0x0005 }, 2483 { 0x1f, 0x0005 },
@@ -2505,7 +2505,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2505 mdio_write(ioaddr, 0x0d, val | set[i]); 2505 mdio_write(ioaddr, 0x0d, val | set[i]);
2506 } 2506 }
2507 } else { 2507 } else {
2508 struct phy_reg phy_reg_init[] = { 2508 static const struct phy_reg phy_reg_init[] = {
2509 { 0x1f, 0x0002 }, 2509 { 0x1f, 0x0002 },
2510 { 0x05, 0x2642 }, 2510 { 0x05, 0x2642 },
2511 { 0x1f, 0x0005 }, 2511 { 0x1f, 0x0005 },
@@ -2531,7 +2531,7 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2531 2531
2532static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr) 2532static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
2533{ 2533{
2534 struct phy_reg phy_reg_init[] = { 2534 static const struct phy_reg phy_reg_init[] = {
2535 { 0x1f, 0x0002 }, 2535 { 0x1f, 0x0002 },
2536 { 0x10, 0x0008 }, 2536 { 0x10, 0x0008 },
2537 { 0x0d, 0x006c }, 2537 { 0x0d, 0x006c },
@@ -2592,7 +2592,7 @@ static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
2592 2592
2593static void rtl8102e_hw_phy_config(void __iomem *ioaddr) 2593static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
2594{ 2594{
2595 struct phy_reg phy_reg_init[] = { 2595 static const struct phy_reg phy_reg_init[] = {
2596 { 0x1f, 0x0003 }, 2596 { 0x1f, 0x0003 },
2597 { 0x08, 0x441d }, 2597 { 0x08, 0x441d },
2598 { 0x01, 0x9100 }, 2598 { 0x01, 0x9100 },
@@ -3384,7 +3384,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3384 3384
3385static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3385static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3386{ 3386{
3387 struct { 3387 static const struct {
3388 u32 mac_version; 3388 u32 mac_version;
3389 u32 clk; 3389 u32 clk;
3390 u32 val; 3390 u32 val;
@@ -3508,7 +3508,7 @@ struct ephy_info {
3508 u16 bits; 3508 u16 bits;
3509}; 3509};
3510 3510
3511static void rtl_ephy_init(void __iomem *ioaddr, struct ephy_info *e, int len) 3511static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
3512{ 3512{
3513 u16 w; 3513 u16 w;
3514 3514
@@ -3579,7 +3579,7 @@ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
3579 3579
3580static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) 3580static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
3581{ 3581{
3582 static struct ephy_info e_info_8168cp[] = { 3582 static const struct ephy_info e_info_8168cp[] = {
3583 { 0x01, 0, 0x0001 }, 3583 { 0x01, 0, 0x0001 },
3584 { 0x02, 0x0800, 0x1000 }, 3584 { 0x02, 0x0800, 0x1000 },
3585 { 0x03, 0, 0x0042 }, 3585 { 0x03, 0, 0x0042 },
@@ -3623,7 +3623,7 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
3623 3623
3624static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) 3624static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
3625{ 3625{
3626 static struct ephy_info e_info_8168c_1[] = { 3626 static const struct ephy_info e_info_8168c_1[] = {
3627 { 0x02, 0x0800, 0x1000 }, 3627 { 0x02, 0x0800, 0x1000 },
3628 { 0x03, 0, 0x0002 }, 3628 { 0x03, 0, 0x0002 },
3629 { 0x06, 0x0080, 0x0000 } 3629 { 0x06, 0x0080, 0x0000 }
@@ -3640,7 +3640,7 @@ static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
3640 3640
3641static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) 3641static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
3642{ 3642{
3643 static struct ephy_info e_info_8168c_2[] = { 3643 static const struct ephy_info e_info_8168c_2[] = {
3644 { 0x01, 0, 0x0001 }, 3644 { 0x01, 0, 0x0001 },
3645 { 0x03, 0x0400, 0x0220 } 3645 { 0x03, 0x0400, 0x0220 }
3646 }; 3646 };
@@ -3783,7 +3783,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
3783 3783
3784static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 3784static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3785{ 3785{
3786 static struct ephy_info e_info_8102e_1[] = { 3786 static const struct ephy_info e_info_8102e_1[] = {
3787 { 0x01, 0, 0x6e65 }, 3787 { 0x01, 0, 0x6e65 },
3788 { 0x02, 0, 0x091f }, 3788 { 0x02, 0, 0x091f },
3789 { 0x03, 0, 0xc2f9 }, 3789 { 0x03, 0, 0xc2f9 },
@@ -4443,13 +4443,12 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
4443 if (pkt_size >= rx_copybreak) 4443 if (pkt_size >= rx_copybreak)
4444 goto out; 4444 goto out;
4445 4445
4446 skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN); 4446 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
4447 if (!skb) 4447 if (!skb)
4448 goto out; 4448 goto out;
4449 4449
4450 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, 4450 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
4451 PCI_DMA_FROMDEVICE); 4451 PCI_DMA_FROMDEVICE);
4452 skb_reserve(skb, NET_IP_ALIGN);
4453 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); 4452 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
4454 *sk_buff = skb; 4453 *sk_buff = skb;
4455 done = true; 4454 done = true;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 4525cbe8dd69..d81706e91aa7 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -984,7 +984,7 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
984 pd->rx_dma = DMA_MASK_DMAC(i); 984 pd->rx_dma = DMA_MASK_DMAC(i);
985 pd->rx_chan = DMA_INDEX_CHNL(i); 985 pd->rx_chan = DMA_INDEX_CHNL(i);
986 pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start; 986 pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
987 res = request_irq(dev->irq, &s6gmac_interrupt, 0, dev->name, dev); 987 res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
988 if (res) { 988 if (res) {
989 printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq); 989 printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
990 goto errirq; 990 goto errirq;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index c9c70ab0cce0..9f83a1197375 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -973,7 +973,7 @@ sb1000_open(struct net_device *dev)
973 lp->rx_frame_id[1] = 0; 973 lp->rx_frame_id[1] = 0;
974 lp->rx_frame_id[2] = 0; 974 lp->rx_frame_id[2] = 0;
975 lp->rx_frame_id[3] = 0; 975 lp->rx_frame_id[3] = 0;
976 if (request_irq(dev->irq, &sb1000_interrupt, 0, "sb1000", dev)) { 976 if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
977 return -EAGAIN; 977 return -EAGAIN;
978 } 978 }
979 979
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 508551f1b3fc..564d4d7f855b 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1476,7 +1476,6 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1476 V_MAC_TX_RL_THRSH(4) | 1476 V_MAC_TX_RL_THRSH(4) |
1477 V_MAC_RX_PL_THRSH(4) | 1477 V_MAC_RX_PL_THRSH(4) |
1478 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ 1478 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1479 V_MAC_RX_PL_THRSH(4) |
1480 V_MAC_RX_RL_THRSH(8) | 1479 V_MAC_RX_RL_THRSH(8) |
1481 0; 1480 0;
1482 1481
@@ -2411,7 +2410,7 @@ static int sbmac_open(struct net_device *dev)
2411 */ 2410 */
2412 2411
2413 __raw_readq(sc->sbm_isr); 2412 __raw_readq(sc->sbm_isr);
2414 err = request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev); 2413 err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
2415 if (err) { 2414 if (err) {
2416 printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, 2415 printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
2417 dev->irq); 2416 dev->irq);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8d6030022d14..b7e0eb40a8bd 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -793,7 +793,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
793 793
794 rx_len -= rx_size_align + 4; 794 rx_len -= rx_size_align + 4;
795 795
796 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); 796 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
797 if (unlikely(!skb)) { 797 if (unlikely(!skb)) {
798 if (printk_ratelimit()) 798 if (printk_ratelimit())
799 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", 799 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -801,8 +801,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
801 goto next; 801 goto next;
802 } 802 }
803 803
804 skb_reserve(skb, NET_IP_ALIGN);
805
806 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { 804 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
807 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), 805 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
808 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); 806 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 39246d457ac2..fe806bd9b95f 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -335,7 +335,7 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
335 335
336#if 0 336#if 0
337 { 337 {
338 int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev); 338 int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
339 if (irqval) { 339 if (irqval) {
340 printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name, 340 printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
341 dev->irq, irqval); 341 dev->irq, irqval);
@@ -367,7 +367,7 @@ static int seeq8005_open(struct net_device *dev)
367 struct net_local *lp = netdev_priv(dev); 367 struct net_local *lp = netdev_priv(dev);
368 368
369 { 369 {
370 int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev); 370 int irqval = request_irq(dev->irq, seeq8005_interrupt, 0, "seeq8005", dev);
371 if (irqval) { 371 if (irqval) {
372 printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name, 372 printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
373 dev->irq, irqval); 373 dev->irq, irqval);
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index b89f9be3cb13..7b52fe10d38f 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,6 +1,6 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \
2 falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ 2 falcon_xmac.o selftest.o ethtool.o qt202x_phy.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o falcon_boards.o
4sfc-$(CONFIG_SFC_MTD) += mtd.o 4sfc-$(CONFIG_SFC_MTD) += mtd.o
5 5
6obj-$(CONFIG_SFC) += sfc.o 6obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d54d84c267b9..6ad909bba957 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -520,19 +520,6 @@ typedef union efx_oword {
520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
521#endif 521#endif
522 522
523#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
524 if (falcon_rev(efx) >= FALCON_REV_B0) { \
525 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
526 } else { \
527 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
528 } \
529} while (0)
530
531#define EFX_QWORD_FIELD_VER(efx, qword, field) \
532 (falcon_rev(efx) >= FALCON_REV_B0 ? \
533 EFX_QWORD_FIELD((qword), field##_B0) : \
534 EFX_QWORD_FIELD((qword), field##_A1))
535
536/* Used to avoid compiler warnings about shift range exceeding width 523/* Used to avoid compiler warnings about shift range exceeding width
537 * of the data types when dma_addr_t is only 32 bits wide. 524 * of the data types when dma_addr_t is only 32 bits wide.
538 */ 525 */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
deleted file mode 100644
index 4a4c74c891b7..000000000000
--- a/drivers/net/sfc/boards.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14#include "workarounds.h"
15
16/* Macros for unpacking the board revision */
17/* The revision info is in host byte order. */
18#define BOARD_TYPE(_rev) (_rev >> 8)
19#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
20#define BOARD_MINOR(_rev) (_rev & 0xf)
21
22/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
23#define BLINK_INTERVAL (HZ/2)
24
25static void blink_led_timer(unsigned long context)
26{
27 struct efx_nic *efx = (struct efx_nic *)context;
28 struct efx_blinker *bl = &efx->board_info.blinker;
29 efx->board_info.set_id_led(efx, bl->state);
30 bl->state = !bl->state;
31 if (bl->resubmit)
32 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
33}
34
35static void board_blink(struct efx_nic *efx, bool blink)
36{
37 struct efx_blinker *blinker = &efx->board_info.blinker;
38
39 /* The rtnl mutex serialises all ethtool ioctls, so
40 * nothing special needs doing here. */
41 if (blink) {
42 blinker->resubmit = true;
43 blinker->state = false;
44 setup_timer(&blinker->timer, blink_led_timer,
45 (unsigned long)efx);
46 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
47 } else {
48 blinker->resubmit = false;
49 if (blinker->timer.function)
50 del_timer_sync(&blinker->timer);
51 efx->board_info.init_leds(efx);
52 }
53}
54
55/*****************************************************************************
56 * Support for LM87 sensor chip used on several boards
57 */
58#define LM87_REG_ALARMS1 0x41
59#define LM87_REG_ALARMS2 0x42
60#define LM87_IN_LIMITS(nr, _min, _max) \
61 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
62#define LM87_AIN_LIMITS(nr, _min, _max) \
63 0x3B + (nr), _max, 0x1A + (nr), _min
64#define LM87_TEMP_INT_LIMITS(_min, _max) \
65 0x39, _max, 0x3A, _min
66#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
67 0x37, _max, 0x38, _min
68
69#define LM87_ALARM_TEMP_INT 0x10
70#define LM87_ALARM_TEMP_EXT1 0x20
71
72#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
73
74static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
75 const u8 *reg_values)
76{
77 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
78 int rc;
79
80 if (!client)
81 return -EIO;
82
83 while (*reg_values) {
84 u8 reg = *reg_values++;
85 u8 value = *reg_values++;
86 rc = i2c_smbus_write_byte_data(client, reg, value);
87 if (rc)
88 goto err;
89 }
90
91 efx->board_info.hwmon_client = client;
92 return 0;
93
94err:
95 i2c_unregister_device(client);
96 return rc;
97}
98
99static void efx_fini_lm87(struct efx_nic *efx)
100{
101 i2c_unregister_device(efx->board_info.hwmon_client);
102}
103
104static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
105{
106 struct i2c_client *client = efx->board_info.hwmon_client;
107 s32 alarms1, alarms2;
108
109 /* If link is up then do not monitor temperature */
110 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
111 return 0;
112
113 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
114 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
115 if (alarms1 < 0)
116 return alarms1;
117 if (alarms2 < 0)
118 return alarms2;
119 alarms1 &= mask;
120 alarms2 &= mask >> 8;
121 if (alarms1 || alarms2) {
122 EFX_ERR(efx,
123 "LM87 detected a hardware failure (status %02x:%02x)"
124 "%s%s\n",
125 alarms1, alarms2,
126 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
127 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
128 return -ERANGE;
129 }
130
131 return 0;
132}
133
134#else /* !CONFIG_SENSORS_LM87 */
135
136static inline int
137efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
138 const u8 *reg_values)
139{
140 return 0;
141}
142static inline void efx_fini_lm87(struct efx_nic *efx)
143{
144}
145static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
146{
147 return 0;
148}
149
150#endif /* CONFIG_SENSORS_LM87 */
151
152/*****************************************************************************
153 * Support for the SFE4002
154 *
155 */
156static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
157
158static const u8 sfe4002_lm87_regs[] = {
159 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
160 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
161 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
162 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
163 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
164 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
165 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
166 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
167 LM87_TEMP_INT_LIMITS(10, 60), /* board */
168 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
169 0
170};
171
172static struct i2c_board_info sfe4002_hwmon_info = {
173 I2C_BOARD_INFO("lm87", 0x2e),
174 .platform_data = &sfe4002_lm87_channel,
175};
176
177/****************************************************************************/
178/* LED allocations. Note that on rev A0 boards the schematic and the reality
179 * differ: red and green are swapped. Below is the fixed (A1) layout (there
180 * are only 3 A0 boards in existence, so no real reason to make this
181 * conditional).
182 */
183#define SFE4002_FAULT_LED (2) /* Red */
184#define SFE4002_RX_LED (0) /* Green */
185#define SFE4002_TX_LED (1) /* Amber */
186
187static void sfe4002_init_leds(struct efx_nic *efx)
188{
189 /* Set the TX and RX LEDs to reflect status and activity, and the
190 * fault LED off */
191 xfp_set_led(efx, SFE4002_TX_LED,
192 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
193 xfp_set_led(efx, SFE4002_RX_LED,
194 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
195 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
196}
197
198static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
199{
200 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
201 QUAKE_LED_OFF);
202}
203
204static int sfe4002_check_hw(struct efx_nic *efx)
205{
206 /* A0 board rev. 4002s report a temperature fault the whole time
207 * (bad sensor) so we mask it out. */
208 unsigned alarm_mask =
209 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
210 ~LM87_ALARM_TEMP_EXT1 : ~0;
211
212 return efx_check_lm87(efx, alarm_mask);
213}
214
215static int sfe4002_init(struct efx_nic *efx)
216{
217 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
218 if (rc)
219 return rc;
220 efx->board_info.monitor = sfe4002_check_hw;
221 efx->board_info.init_leds = sfe4002_init_leds;
222 efx->board_info.set_id_led = sfe4002_set_id_led;
223 efx->board_info.blink = board_blink;
224 efx->board_info.fini = efx_fini_lm87;
225 return 0;
226}
227
228/*****************************************************************************
229 * Support for the SFN4112F
230 *
231 */
232static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
233
234static const u8 sfn4112f_lm87_regs[] = {
235 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
236 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
237 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
238 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
239 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
240 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
241 LM87_TEMP_INT_LIMITS(10, 60), /* board */
242 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
243 0
244};
245
246static struct i2c_board_info sfn4112f_hwmon_info = {
247 I2C_BOARD_INFO("lm87", 0x2e),
248 .platform_data = &sfn4112f_lm87_channel,
249};
250
251#define SFN4112F_ACT_LED 0
252#define SFN4112F_LINK_LED 1
253
254static void sfn4112f_init_leds(struct efx_nic *efx)
255{
256 xfp_set_led(efx, SFN4112F_ACT_LED,
257 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
258 xfp_set_led(efx, SFN4112F_LINK_LED,
259 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
260}
261
262static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
263{
264 xfp_set_led(efx, SFN4112F_LINK_LED,
265 state ? QUAKE_LED_ON : QUAKE_LED_OFF);
266}
267
268static int sfn4112f_check_hw(struct efx_nic *efx)
269{
270 /* Mask out unused sensors */
271 return efx_check_lm87(efx, ~0x48);
272}
273
274static int sfn4112f_init(struct efx_nic *efx)
275{
276 int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
277 if (rc)
278 return rc;
279 efx->board_info.monitor = sfn4112f_check_hw;
280 efx->board_info.init_leds = sfn4112f_init_leds;
281 efx->board_info.set_id_led = sfn4112f_set_id_led;
282 efx->board_info.blink = board_blink;
283 efx->board_info.fini = efx_fini_lm87;
284 return 0;
285}
286
287/* This will get expanded as board-specific details get moved out of the
288 * PHY drivers. */
289struct efx_board_data {
290 enum efx_board_type type;
291 const char *ref_model;
292 const char *gen_type;
293 int (*init) (struct efx_nic *nic);
294};
295
296
297static struct efx_board_data board_data[] = {
298 { EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
299 { EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
300 { EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
301 sfn4111t_init },
302 { EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
303 sfn4112f_init },
304};
305
306void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
307{
308 struct efx_board_data *data = NULL;
309 int i;
310
311 efx->board_info.type = BOARD_TYPE(revision_info);
312 efx->board_info.major = BOARD_MAJOR(revision_info);
313 efx->board_info.minor = BOARD_MINOR(revision_info);
314
315 for (i = 0; i < ARRAY_SIZE(board_data); i++)
316 if (board_data[i].type == efx->board_info.type)
317 data = &board_data[i];
318
319 if (data) {
320 EFX_INFO(efx, "board is %s rev %c%d\n",
321 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
322 ? data->ref_model : data->gen_type,
323 'A' + efx->board_info.major, efx->board_info.minor);
324 efx->board_info.init = data->init;
325 } else {
326 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
327 }
328}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
deleted file mode 100644
index 44942de0e080..000000000000
--- a/drivers/net/sfc/boards.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_SFE4001 = 1,
16 EFX_BOARD_SFE4002 = 2,
17 EFX_BOARD_SFN4111T = 0x51,
18 EFX_BOARD_SFN4112F = 0x52,
19};
20
21extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
22
23/* SFE4001 (10GBASE-T) */
24extern int sfe4001_init(struct efx_nic *efx);
25/* SFN4111T (100/1000/10GBASE-T) */
26extern int sfn4111t_init(struct efx_nic *efx);
27
28#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f99989d..f5e81114270a 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -21,13 +21,54 @@
21#include <linux/ethtool.h> 21#include <linux/ethtool.h>
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include "net_driver.h" 23#include "net_driver.h"
24#include "ethtool.h"
25#include "tx.h"
26#include "rx.h"
27#include "efx.h" 24#include "efx.h"
28#include "mdio_10g.h" 25#include "mdio_10g.h"
29#include "falcon.h" 26#include "falcon.h"
30 27
28/**************************************************************************
29 *
30 * Type name strings
31 *
32 **************************************************************************
33 */
34
35/* Loopback mode names (see LOOPBACK_MODE()) */
36const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
37const char *efx_loopback_mode_names[] = {
38 [LOOPBACK_NONE] = "NONE",
39 [LOOPBACK_GMAC] = "GMAC",
40 [LOOPBACK_XGMII] = "XGMII",
41 [LOOPBACK_XGXS] = "XGXS",
42 [LOOPBACK_XAUI] = "XAUI",
43 [LOOPBACK_GPHY] = "GPHY",
44 [LOOPBACK_PHYXS] = "PHYXS",
45 [LOOPBACK_PCS] = "PCS",
46 [LOOPBACK_PMAPMD] = "PMA/PMD",
47 [LOOPBACK_NETWORK] = "NETWORK",
48};
49
50/* Interrupt mode names (see INT_MODE())) */
51const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
52const char *efx_interrupt_mode_names[] = {
53 [EFX_INT_MODE_MSIX] = "MSI-X",
54 [EFX_INT_MODE_MSI] = "MSI",
55 [EFX_INT_MODE_LEGACY] = "legacy",
56};
57
58const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
59const char *efx_reset_type_names[] = {
60 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
61 [RESET_TYPE_ALL] = "ALL",
62 [RESET_TYPE_WORLD] = "WORLD",
63 [RESET_TYPE_DISABLE] = "DISABLE",
64 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
65 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
66 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
67 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
68 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
69 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
70};
71
31#define EFX_MAX_MTU (9 * 1024) 72#define EFX_MAX_MTU (9 * 1024)
32 73
33/* RX slow fill workqueue. If memory allocation fails in the fast path, 74/* RX slow fill workqueue. If memory allocation fails in the fast path,
@@ -145,7 +186,8 @@ static void efx_fini_channels(struct efx_nic *efx);
145 186
146#define EFX_ASSERT_RESET_SERIALISED(efx) \ 187#define EFX_ASSERT_RESET_SERIALISED(efx) \
147 do { \ 188 do { \
148 if (efx->state == STATE_RUNNING) \ 189 if ((efx->state == STATE_RUNNING) || \
190 (efx->state == STATE_DISABLED)) \
149 ASSERT_RTNL(); \ 191 ASSERT_RTNL(); \
150 } while (0) 192 } while (0)
151 193
@@ -228,26 +270,20 @@ static int efx_poll(struct napi_struct *napi, int budget)
228 if (channel->used_flags & EFX_USED_BY_RX && 270 if (channel->used_flags & EFX_USED_BY_RX &&
229 efx->irq_rx_adaptive && 271 efx->irq_rx_adaptive &&
230 unlikely(++channel->irq_count == 1000)) { 272 unlikely(++channel->irq_count == 1000)) {
231 unsigned old_irq_moderation = channel->irq_moderation;
232
233 if (unlikely(channel->irq_mod_score < 273 if (unlikely(channel->irq_mod_score <
234 irq_adapt_low_thresh)) { 274 irq_adapt_low_thresh)) {
235 channel->irq_moderation = 275 if (channel->irq_moderation > 1) {
236 max_t(int, 276 channel->irq_moderation -= 1;
237 channel->irq_moderation - 277 falcon_set_int_moderation(channel);
238 FALCON_IRQ_MOD_RESOLUTION, 278 }
239 FALCON_IRQ_MOD_RESOLUTION);
240 } else if (unlikely(channel->irq_mod_score > 279 } else if (unlikely(channel->irq_mod_score >
241 irq_adapt_high_thresh)) { 280 irq_adapt_high_thresh)) {
242 channel->irq_moderation = 281 if (channel->irq_moderation <
243 min(channel->irq_moderation + 282 efx->irq_rx_moderation) {
244 FALCON_IRQ_MOD_RESOLUTION, 283 channel->irq_moderation += 1;
245 efx->irq_rx_moderation); 284 falcon_set_int_moderation(channel);
285 }
246 } 286 }
247
248 if (channel->irq_moderation != old_irq_moderation)
249 falcon_set_int_moderation(channel);
250
251 channel->irq_count = 0; 287 channel->irq_count = 0;
252 channel->irq_mod_score = 0; 288 channel->irq_mod_score = 0;
253 } 289 }
@@ -290,7 +326,7 @@ void efx_process_channel_now(struct efx_channel *channel)
290 napi_disable(&channel->napi_str); 326 napi_disable(&channel->napi_str);
291 327
292 /* Poll the channel */ 328 /* Poll the channel */
293 efx_process_channel(channel, efx->type->evq_size); 329 efx_process_channel(channel, EFX_EVQ_SIZE);
294 330
295 /* Ack the eventq. This may cause an interrupt to be generated 331 /* Ack the eventq. This may cause an interrupt to be generated
296 * when they are reenabled */ 332 * when they are reenabled */
@@ -547,8 +583,10 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
547 * netif_carrier_on/off) of the link status, and also maintains the 583 * netif_carrier_on/off) of the link status, and also maintains the
548 * link status's stop on the port's TX queue. 584 * link status's stop on the port's TX queue.
549 */ 585 */
550static void efx_link_status_changed(struct efx_nic *efx) 586void efx_link_status_changed(struct efx_nic *efx)
551{ 587{
588 struct efx_link_state *link_state = &efx->link_state;
589
552 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure 590 /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
553 * that no events are triggered between unregister_netdev() and the 591 * that no events are triggered between unregister_netdev() and the
554 * driver unloading. A more general condition is that NETDEV_CHANGE 592 * driver unloading. A more general condition is that NETDEV_CHANGE
@@ -561,19 +599,19 @@ static void efx_link_status_changed(struct efx_nic *efx)
561 return; 599 return;
562 } 600 }
563 601
564 if (efx->link_up != netif_carrier_ok(efx->net_dev)) { 602 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
565 efx->n_link_state_changes++; 603 efx->n_link_state_changes++;
566 604
567 if (efx->link_up) 605 if (link_state->up)
568 netif_carrier_on(efx->net_dev); 606 netif_carrier_on(efx->net_dev);
569 else 607 else
570 netif_carrier_off(efx->net_dev); 608 netif_carrier_off(efx->net_dev);
571 } 609 }
572 610
573 /* Status message for kernel log */ 611 /* Status message for kernel log */
574 if (efx->link_up) { 612 if (link_state->up) {
575 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n", 613 EFX_INFO(efx, "link up at %uMbps %s-duplex (MTU %d)%s\n",
576 efx->link_speed, efx->link_fd ? "full" : "half", 614 link_state->speed, link_state->fd ? "full" : "half",
577 efx->net_dev->mtu, 615 efx->net_dev->mtu,
578 (efx->promiscuous ? " [PROMISC]" : "")); 616 (efx->promiscuous ? " [PROMISC]" : ""));
579 } else { 617 } else {
@@ -599,6 +637,7 @@ void __efx_reconfigure_port(struct efx_nic *efx)
599 netif_addr_unlock_bh(efx->net_dev); 637 netif_addr_unlock_bh(efx->net_dev);
600 } 638 }
601 639
640 falcon_stop_nic_stats(efx);
602 falcon_deconfigure_mac_wrapper(efx); 641 falcon_deconfigure_mac_wrapper(efx);
603 642
604 /* Reconfigure the PHY, disabling transmit in mac level loopback. */ 643 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
@@ -613,6 +652,8 @@ void __efx_reconfigure_port(struct efx_nic *efx)
613 652
614 efx->mac_op->reconfigure(efx); 653 efx->mac_op->reconfigure(efx);
615 654
655 falcon_start_nic_stats(efx);
656
616 /* Inform kernel of loss/gain of carrier */ 657 /* Inform kernel of loss/gain of carrier */
617 efx_link_status_changed(efx); 658 efx_link_status_changed(efx);
618 return; 659 return;
@@ -634,26 +675,18 @@ void efx_reconfigure_port(struct efx_nic *efx)
634 mutex_unlock(&efx->mac_lock); 675 mutex_unlock(&efx->mac_lock);
635} 676}
636 677
637/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all() 678/* Asynchronous work item for changing MAC promiscuity and multicast
638 * we don't efx_reconfigure_port() if the port is disabled. Care is taken 679 * hash. Avoid a drain/rx_ingress enable by reconfiguring the current
639 * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */ 680 * MAC directly. */
640static void efx_phy_work(struct work_struct *data)
641{
642 struct efx_nic *efx = container_of(data, struct efx_nic, phy_work);
643
644 mutex_lock(&efx->mac_lock);
645 if (efx->port_enabled)
646 __efx_reconfigure_port(efx);
647 mutex_unlock(&efx->mac_lock);
648}
649
650static void efx_mac_work(struct work_struct *data) 681static void efx_mac_work(struct work_struct *data)
651{ 682{
652 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); 683 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
653 684
654 mutex_lock(&efx->mac_lock); 685 mutex_lock(&efx->mac_lock);
655 if (efx->port_enabled) 686 if (efx->port_enabled) {
656 efx->mac_op->irq(efx); 687 falcon_push_multicast_hash(efx);
688 efx->mac_op->reconfigure(efx);
689 }
657 mutex_unlock(&efx->mac_lock); 690 mutex_unlock(&efx->mac_lock);
658} 691}
659 692
@@ -699,29 +732,29 @@ static int efx_init_port(struct efx_nic *efx)
699 732
700 EFX_LOG(efx, "init port\n"); 733 EFX_LOG(efx, "init port\n");
701 734
735 mutex_lock(&efx->mac_lock);
736
702 rc = efx->phy_op->init(efx); 737 rc = efx->phy_op->init(efx);
703 if (rc) 738 if (rc)
704 return rc; 739 goto fail1;
705 mutex_lock(&efx->mac_lock);
706 efx->phy_op->reconfigure(efx); 740 efx->phy_op->reconfigure(efx);
707 rc = falcon_switch_mac(efx); 741 rc = falcon_switch_mac(efx);
708 mutex_unlock(&efx->mac_lock);
709 if (rc) 742 if (rc)
710 goto fail; 743 goto fail2;
711 efx->mac_op->reconfigure(efx); 744 efx->mac_op->reconfigure(efx);
712 745
713 efx->port_initialized = true; 746 efx->port_initialized = true;
714 efx_stats_enable(efx); 747
748 mutex_unlock(&efx->mac_lock);
715 return 0; 749 return 0;
716 750
717fail: 751fail2:
718 efx->phy_op->fini(efx); 752 efx->phy_op->fini(efx);
753fail1:
754 mutex_unlock(&efx->mac_lock);
719 return rc; 755 return rc;
720} 756}
721 757
722/* Allow efx_reconfigure_port() to be scheduled, and close the window
723 * between efx_stop_port and efx_flush_all whereby a previously scheduled
724 * efx_phy_work()/efx_mac_work() may have been cancelled */
725static void efx_start_port(struct efx_nic *efx) 758static void efx_start_port(struct efx_nic *efx)
726{ 759{
727 EFX_LOG(efx, "start port\n"); 760 EFX_LOG(efx, "start port\n");
@@ -729,15 +762,16 @@ static void efx_start_port(struct efx_nic *efx)
729 762
730 mutex_lock(&efx->mac_lock); 763 mutex_lock(&efx->mac_lock);
731 efx->port_enabled = true; 764 efx->port_enabled = true;
732 __efx_reconfigure_port(efx); 765
733 efx->mac_op->irq(efx); 766 /* efx_mac_work() might have been scheduled after efx_stop_port(),
767 * and then cancelled by efx_flush_all() */
768 falcon_push_multicast_hash(efx);
769 efx->mac_op->reconfigure(efx);
770
734 mutex_unlock(&efx->mac_lock); 771 mutex_unlock(&efx->mac_lock);
735} 772}
736 773
737/* Prevent efx_phy_work, efx_mac_work, and efx_monitor() from executing, 774/* Prevent efx_mac_work() and efx_monitor() from working */
738 * and efx_set_multicast_list() from scheduling efx_phy_work. efx_phy_work
739 * and efx_mac_work may still be scheduled via NAPI processing until
740 * efx_flush_all() is called */
741static void efx_stop_port(struct efx_nic *efx) 775static void efx_stop_port(struct efx_nic *efx)
742{ 776{
743 EFX_LOG(efx, "stop port\n"); 777 EFX_LOG(efx, "stop port\n");
@@ -760,11 +794,10 @@ static void efx_fini_port(struct efx_nic *efx)
760 if (!efx->port_initialized) 794 if (!efx->port_initialized)
761 return; 795 return;
762 796
763 efx_stats_disable(efx);
764 efx->phy_op->fini(efx); 797 efx->phy_op->fini(efx);
765 efx->port_initialized = false; 798 efx->port_initialized = false;
766 799
767 efx->link_up = false; 800 efx->link_state.up = false;
768 efx_link_status_changed(efx); 801 efx_link_status_changed(efx);
769} 802}
770 803
@@ -824,9 +857,8 @@ static int efx_init_io(struct efx_nic *efx)
824 goto fail2; 857 goto fail2;
825 } 858 }
826 859
827 efx->membase_phys = pci_resource_start(efx->pci_dev, 860 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
828 efx->type->mem_bar); 861 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
829 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
830 if (rc) { 862 if (rc) {
831 EFX_ERR(efx, "request for memory BAR failed\n"); 863 EFX_ERR(efx, "request for memory BAR failed\n");
832 rc = -EIO; 864 rc = -EIO;
@@ -835,21 +867,20 @@ static int efx_init_io(struct efx_nic *efx)
835 efx->membase = ioremap_nocache(efx->membase_phys, 867 efx->membase = ioremap_nocache(efx->membase_phys,
836 efx->type->mem_map_size); 868 efx->type->mem_map_size);
837 if (!efx->membase) { 869 if (!efx->membase) {
838 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 870 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
839 efx->type->mem_bar,
840 (unsigned long long)efx->membase_phys, 871 (unsigned long long)efx->membase_phys,
841 efx->type->mem_map_size); 872 efx->type->mem_map_size);
842 rc = -ENOMEM; 873 rc = -ENOMEM;
843 goto fail4; 874 goto fail4;
844 } 875 }
845 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 876 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
846 efx->type->mem_bar, (unsigned long long)efx->membase_phys, 877 (unsigned long long)efx->membase_phys,
847 efx->type->mem_map_size, efx->membase); 878 efx->type->mem_map_size, efx->membase);
848 879
849 return 0; 880 return 0;
850 881
851 fail4: 882 fail4:
852 pci_release_region(efx->pci_dev, efx->type->mem_bar); 883 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
853 fail3: 884 fail3:
854 efx->membase_phys = 0; 885 efx->membase_phys = 0;
855 fail2: 886 fail2:
@@ -868,7 +899,7 @@ static void efx_fini_io(struct efx_nic *efx)
868 } 899 }
869 900
870 if (efx->membase_phys) { 901 if (efx->membase_phys) {
871 pci_release_region(efx->pci_dev, efx->type->mem_bar); 902 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
872 efx->membase_phys = 0; 903 efx->membase_phys = 0;
873 } 904 }
874 905
@@ -1118,6 +1149,8 @@ static void efx_start_all(struct efx_nic *efx)
1118 if (efx->state == STATE_RUNNING) 1149 if (efx->state == STATE_RUNNING)
1119 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1150 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1120 efx_monitor_interval); 1151 efx_monitor_interval);
1152
1153 falcon_start_nic_stats(efx);
1121} 1154}
1122 1155
1123/* Flush all delayed work. Should only be called when no more delayed work 1156/* Flush all delayed work. Should only be called when no more delayed work
@@ -1136,8 +1169,6 @@ static void efx_flush_all(struct efx_nic *efx)
1136 1169
1137 /* Stop scheduled port reconfigurations */ 1170 /* Stop scheduled port reconfigurations */
1138 cancel_work_sync(&efx->mac_work); 1171 cancel_work_sync(&efx->mac_work);
1139 cancel_work_sync(&efx->phy_work);
1140
1141} 1172}
1142 1173
1143/* Quiesce hardware and software without bringing the link down. 1174/* Quiesce hardware and software without bringing the link down.
@@ -1155,6 +1186,8 @@ static void efx_stop_all(struct efx_nic *efx)
1155 if (!efx->port_enabled) 1186 if (!efx->port_enabled)
1156 return; 1187 return;
1157 1188
1189 falcon_stop_nic_stats(efx);
1190
1158 /* Disable interrupts and wait for ISR to complete */ 1191 /* Disable interrupts and wait for ISR to complete */
1159 falcon_disable_interrupts(efx); 1192 falcon_disable_interrupts(efx);
1160 if (efx->legacy_irq) 1193 if (efx->legacy_irq)
@@ -1173,7 +1206,7 @@ static void efx_stop_all(struct efx_nic *efx)
1173 * window to loose phy events */ 1206 * window to loose phy events */
1174 efx_stop_port(efx); 1207 efx_stop_port(efx);
1175 1208
1176 /* Flush efx_phy_work, efx_mac_work, refill_workqueue, monitor_work */ 1209 /* Flush efx_mac_work(), refill_workqueue, monitor_work */
1177 efx_flush_all(efx); 1210 efx_flush_all(efx);
1178 1211
1179 /* Isolate the MAC from the TX and RX engines, so that queue 1212 /* Isolate the MAC from the TX and RX engines, so that queue
@@ -1201,41 +1234,39 @@ static void efx_remove_all(struct efx_nic *efx)
1201 efx_remove_nic(efx); 1234 efx_remove_nic(efx);
1202} 1235}
1203 1236
1204/* A convinience function to safely flush all the queues */
1205void efx_flush_queues(struct efx_nic *efx)
1206{
1207 EFX_ASSERT_RESET_SERIALISED(efx);
1208
1209 efx_stop_all(efx);
1210
1211 efx_fini_channels(efx);
1212 efx_init_channels(efx);
1213
1214 efx_start_all(efx);
1215}
1216
1217/************************************************************************** 1237/**************************************************************************
1218 * 1238 *
1219 * Interrupt moderation 1239 * Interrupt moderation
1220 * 1240 *
1221 **************************************************************************/ 1241 **************************************************************************/
1222 1242
1243static unsigned irq_mod_ticks(int usecs, int resolution)
1244{
1245 if (usecs <= 0)
1246 return 0; /* cannot receive interrupts ahead of time :-) */
1247 if (usecs < resolution)
1248 return 1; /* never round down to 0 */
1249 return usecs / resolution;
1250}
1251
1223/* Set interrupt moderation parameters */ 1252/* Set interrupt moderation parameters */
1224void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1253void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1225 bool rx_adaptive) 1254 bool rx_adaptive)
1226{ 1255{
1227 struct efx_tx_queue *tx_queue; 1256 struct efx_tx_queue *tx_queue;
1228 struct efx_rx_queue *rx_queue; 1257 struct efx_rx_queue *rx_queue;
1258 unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1259 unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1229 1260
1230 EFX_ASSERT_RESET_SERIALISED(efx); 1261 EFX_ASSERT_RESET_SERIALISED(efx);
1231 1262
1232 efx_for_each_tx_queue(tx_queue, efx) 1263 efx_for_each_tx_queue(tx_queue, efx)
1233 tx_queue->channel->irq_moderation = tx_usecs; 1264 tx_queue->channel->irq_moderation = tx_ticks;
1234 1265
1235 efx->irq_rx_adaptive = rx_adaptive; 1266 efx->irq_rx_adaptive = rx_adaptive;
1236 efx->irq_rx_moderation = rx_usecs; 1267 efx->irq_rx_moderation = rx_ticks;
1237 efx_for_each_rx_queue(rx_queue, efx) 1268 efx_for_each_rx_queue(rx_queue, efx)
1238 rx_queue->channel->irq_moderation = rx_usecs; 1269 rx_queue->channel->irq_moderation = rx_ticks;
1239} 1270}
1240 1271
1241/************************************************************************** 1272/**************************************************************************
@@ -1250,7 +1281,6 @@ static void efx_monitor(struct work_struct *data)
1250{ 1281{
1251 struct efx_nic *efx = container_of(data, struct efx_nic, 1282 struct efx_nic *efx = container_of(data, struct efx_nic,
1252 monitor_work.work); 1283 monitor_work.work);
1253 int rc;
1254 1284
1255 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", 1285 EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
1256 raw_smp_processor_id()); 1286 raw_smp_processor_id());
@@ -1262,15 +1292,7 @@ static void efx_monitor(struct work_struct *data)
1262 goto out_requeue; 1292 goto out_requeue;
1263 if (!efx->port_enabled) 1293 if (!efx->port_enabled)
1264 goto out_unlock; 1294 goto out_unlock;
1265 rc = efx->board_info.monitor(efx); 1295 falcon_monitor(efx);
1266 if (rc) {
1267 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
1268 (rc == -ERANGE) ? "reported fault" : "failed");
1269 efx->phy_mode |= PHY_MODE_LOW_POWER;
1270 falcon_sim_phy_event(efx);
1271 }
1272 efx->phy_op->poll(efx);
1273 efx->mac_op->poll(efx);
1274 1296
1275out_unlock: 1297out_unlock:
1276 mutex_unlock(&efx->mac_lock); 1298 mutex_unlock(&efx->mac_lock);
@@ -1400,20 +1422,6 @@ static int efx_net_stop(struct net_device *net_dev)
1400 return 0; 1422 return 0;
1401} 1423}
1402 1424
1403void efx_stats_disable(struct efx_nic *efx)
1404{
1405 spin_lock(&efx->stats_lock);
1406 ++efx->stats_disable_count;
1407 spin_unlock(&efx->stats_lock);
1408}
1409
1410void efx_stats_enable(struct efx_nic *efx)
1411{
1412 spin_lock(&efx->stats_lock);
1413 --efx->stats_disable_count;
1414 spin_unlock(&efx->stats_lock);
1415}
1416
1417/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1425/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1418static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1426static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1419{ 1427{
@@ -1421,17 +1429,9 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1421 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1429 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1422 struct net_device_stats *stats = &net_dev->stats; 1430 struct net_device_stats *stats = &net_dev->stats;
1423 1431
1424 /* Update stats if possible, but do not wait if another thread 1432 spin_lock_bh(&efx->stats_lock);
1425 * is updating them or if MAC stats fetches are temporarily 1433 falcon_update_nic_stats(efx);
1426 * disabled; slightly stale stats are acceptable. 1434 spin_unlock_bh(&efx->stats_lock);
1427 */
1428 if (!spin_trylock(&efx->stats_lock))
1429 return stats;
1430 if (!efx->stats_disable_count) {
1431 efx->mac_op->update_stats(efx);
1432 falcon_update_nic_stats(efx);
1433 }
1434 spin_unlock(&efx->stats_lock);
1435 1435
1436 stats->rx_packets = mac_stats->rx_packets; 1436 stats->rx_packets = mac_stats->rx_packets;
1437 stats->tx_packets = mac_stats->tx_packets; 1437 stats->tx_packets = mac_stats->tx_packets;
@@ -1525,16 +1525,14 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1525 struct efx_nic *efx = netdev_priv(net_dev); 1525 struct efx_nic *efx = netdev_priv(net_dev);
1526 struct dev_mc_list *mc_list = net_dev->mc_list; 1526 struct dev_mc_list *mc_list = net_dev->mc_list;
1527 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1527 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1528 bool promiscuous = !!(net_dev->flags & IFF_PROMISC);
1529 bool changed = (efx->promiscuous != promiscuous);
1530 u32 crc; 1528 u32 crc;
1531 int bit; 1529 int bit;
1532 int i; 1530 int i;
1533 1531
1534 efx->promiscuous = promiscuous; 1532 efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
1535 1533
1536 /* Build multicast hash table */ 1534 /* Build multicast hash table */
1537 if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { 1535 if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
1538 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1536 memset(mc_hash, 0xff, sizeof(*mc_hash));
1539 } else { 1537 } else {
1540 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1538 memset(mc_hash, 0x00, sizeof(*mc_hash));
@@ -1544,17 +1542,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1544 set_bit_le(bit, mc_hash->byte); 1542 set_bit_le(bit, mc_hash->byte);
1545 mc_list = mc_list->next; 1543 mc_list = mc_list->next;
1546 } 1544 }
1547 }
1548
1549 if (!efx->port_enabled)
1550 /* Delay pushing settings until efx_start_port() */
1551 return;
1552 1545
1553 if (changed) 1546 /* Broadcast packets go through the multicast hash filter.
1554 queue_work(efx->workqueue, &efx->phy_work); 1547 * ether_crc_le() of the broadcast address is 0xbe2612ff
1548 * so we always add bit 0xff to the mask.
1549 */
1550 set_bit_le(0xff, mc_hash->byte);
1551 }
1555 1552
1556 /* Create and activate new global multicast hash table */ 1553 if (efx->port_enabled)
1557 falcon_set_multicast_hash(efx); 1554 queue_work(efx->workqueue, &efx->mac_work);
1555 /* Otherwise efx_start_port() will do this */
1558} 1556}
1559 1557
1560static const struct net_device_ops efx_netdev_ops = { 1558static const struct net_device_ops efx_netdev_ops = {
@@ -1688,7 +1686,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method,
1688{ 1686{
1689 EFX_ASSERT_RESET_SERIALISED(efx); 1687 EFX_ASSERT_RESET_SERIALISED(efx);
1690 1688
1691 efx_stats_disable(efx);
1692 efx_stop_all(efx); 1689 efx_stop_all(efx);
1693 mutex_lock(&efx->mac_lock); 1690 mutex_lock(&efx->mac_lock);
1694 mutex_lock(&efx->spi_lock); 1691 mutex_lock(&efx->spi_lock);
@@ -1738,10 +1735,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method,
1738 mutex_unlock(&efx->spi_lock); 1735 mutex_unlock(&efx->spi_lock);
1739 mutex_unlock(&efx->mac_lock); 1736 mutex_unlock(&efx->mac_lock);
1740 1737
1741 if (ok) { 1738 if (ok)
1742 efx_start_all(efx); 1739 efx_start_all(efx);
1743 efx_stats_enable(efx);
1744 }
1745 return rc; 1740 return rc;
1746} 1741}
1747 1742
@@ -1770,7 +1765,7 @@ static int efx_reset(struct efx_nic *efx)
1770 goto out_unlock; 1765 goto out_unlock;
1771 } 1766 }
1772 1767
1773 EFX_INFO(efx, "resetting (%d)\n", method); 1768 EFX_INFO(efx, "resetting (%s)\n", RESET_TYPE(method));
1774 1769
1775 efx_reset_down(efx, method, &ecmd); 1770 efx_reset_down(efx, method, &ecmd);
1776 1771
@@ -1849,9 +1844,10 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1849 } 1844 }
1850 1845
1851 if (method != type) 1846 if (method != type)
1852 EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method); 1847 EFX_LOG(efx, "scheduling %s reset for %s\n",
1848 RESET_TYPE(method), RESET_TYPE(type));
1853 else 1849 else
1854 EFX_LOG(efx, "scheduling reset (%d)\n", method); 1850 EFX_LOG(efx, "scheduling %s reset\n", RESET_TYPE(method));
1855 1851
1856 efx->reset_pending = method; 1852 efx->reset_pending = method;
1857 1853
@@ -1867,15 +1863,15 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
1867/* PCI device ID table */ 1863/* PCI device ID table */
1868static struct pci_device_id efx_pci_table[] __devinitdata = { 1864static struct pci_device_id efx_pci_table[] __devinitdata = {
1869 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), 1865 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
1870 .driver_data = (unsigned long) &falcon_a_nic_type}, 1866 .driver_data = (unsigned long) &falcon_a1_nic_type},
1871 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), 1867 {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
1872 .driver_data = (unsigned long) &falcon_b_nic_type}, 1868 .driver_data = (unsigned long) &falcon_b0_nic_type},
1873 {0} /* end of list */ 1869 {0} /* end of list */
1874}; 1870};
1875 1871
1876/************************************************************************** 1872/**************************************************************************
1877 * 1873 *
1878 * Dummy PHY/MAC/Board operations 1874 * Dummy PHY/MAC operations
1879 * 1875 *
1880 * Can be used for some unimplemented operations 1876 * Can be used for some unimplemented operations
1881 * Needed so all function pointers are valid and do not have to be tested 1877 * Needed so all function pointers are valid and do not have to be tested
@@ -1887,29 +1883,19 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
1887 return 0; 1883 return 0;
1888} 1884}
1889void efx_port_dummy_op_void(struct efx_nic *efx) {} 1885void efx_port_dummy_op_void(struct efx_nic *efx) {}
1890void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink) {} 1886void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
1891 1887{
1892static struct efx_mac_operations efx_dummy_mac_operations = { 1888}
1893 .reconfigure = efx_port_dummy_op_void, 1889bool efx_port_dummy_op_poll(struct efx_nic *efx)
1894 .poll = efx_port_dummy_op_void, 1890{
1895 .irq = efx_port_dummy_op_void, 1891 return false;
1896}; 1892}
1897 1893
1898static struct efx_phy_operations efx_dummy_phy_operations = { 1894static struct efx_phy_operations efx_dummy_phy_operations = {
1899 .init = efx_port_dummy_op_int, 1895 .init = efx_port_dummy_op_int,
1900 .reconfigure = efx_port_dummy_op_void, 1896 .reconfigure = efx_port_dummy_op_void,
1901 .poll = efx_port_dummy_op_void, 1897 .poll = efx_port_dummy_op_poll,
1902 .fini = efx_port_dummy_op_void, 1898 .fini = efx_port_dummy_op_void,
1903 .clear_interrupt = efx_port_dummy_op_void,
1904};
1905
1906static struct efx_board efx_dummy_board_info = {
1907 .init = efx_port_dummy_op_int,
1908 .init_leds = efx_port_dummy_op_void,
1909 .set_id_led = efx_port_dummy_op_blink,
1910 .monitor = efx_port_dummy_op_int,
1911 .blink = efx_port_dummy_op_blink,
1912 .fini = efx_port_dummy_op_void,
1913}; 1899};
1914 1900
1915/************************************************************************** 1901/**************************************************************************
@@ -1932,7 +1918,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1932 /* Initialise common structures */ 1918 /* Initialise common structures */
1933 memset(efx, 0, sizeof(*efx)); 1919 memset(efx, 0, sizeof(*efx));
1934 spin_lock_init(&efx->biu_lock); 1920 spin_lock_init(&efx->biu_lock);
1935 spin_lock_init(&efx->phy_lock); 1921 mutex_init(&efx->mdio_lock);
1936 mutex_init(&efx->spi_lock); 1922 mutex_init(&efx->spi_lock);
1937 INIT_WORK(&efx->reset_work, efx_reset_work); 1923 INIT_WORK(&efx->reset_work, efx_reset_work);
1938 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); 1924 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
@@ -1940,18 +1926,15 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1940 efx->state = STATE_INIT; 1926 efx->state = STATE_INIT;
1941 efx->reset_pending = RESET_TYPE_NONE; 1927 efx->reset_pending = RESET_TYPE_NONE;
1942 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); 1928 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
1943 efx->board_info = efx_dummy_board_info;
1944 1929
1945 efx->net_dev = net_dev; 1930 efx->net_dev = net_dev;
1946 efx->rx_checksum_enabled = true; 1931 efx->rx_checksum_enabled = true;
1947 spin_lock_init(&efx->netif_stop_lock); 1932 spin_lock_init(&efx->netif_stop_lock);
1948 spin_lock_init(&efx->stats_lock); 1933 spin_lock_init(&efx->stats_lock);
1949 efx->stats_disable_count = 1;
1950 mutex_init(&efx->mac_lock); 1934 mutex_init(&efx->mac_lock);
1951 efx->mac_op = &efx_dummy_mac_operations; 1935 efx->mac_op = type->default_mac_ops;
1952 efx->phy_op = &efx_dummy_phy_operations; 1936 efx->phy_op = &efx_dummy_phy_operations;
1953 efx->mdio.dev = net_dev; 1937 efx->mdio.dev = net_dev;
1954 INIT_WORK(&efx->phy_work, efx_phy_work);
1955 INIT_WORK(&efx->mac_work, efx_mac_work); 1938 INIT_WORK(&efx->mac_work, efx_mac_work);
1956 atomic_set(&efx->netif_stop_count, 1); 1939 atomic_set(&efx->netif_stop_count, 1);
1957 1940
@@ -1981,17 +1964,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1981 1964
1982 efx->type = type; 1965 efx->type = type;
1983 1966
1984 /* Sanity-check NIC type */
1985 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1986 (efx->type->txd_ring_mask + 1));
1987 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1988 (efx->type->rxd_ring_mask + 1));
1989 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1990 (efx->type->evq_size - 1));
1991 /* As close as we can get to guaranteeing that we don't overflow */ 1967 /* As close as we can get to guaranteeing that we don't overflow */
1992 EFX_BUG_ON_PARANOID(efx->type->evq_size < 1968 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
1993 (efx->type->txd_ring_mask + 1 + 1969
1994 efx->type->rxd_ring_mask + 1));
1995 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 1970 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1996 1971
1997 /* Higher numbered interrupt modes are less capable! */ 1972 /* Higher numbered interrupt modes are less capable! */
@@ -2027,19 +2002,9 @@ static void efx_fini_struct(struct efx_nic *efx)
2027 */ 2002 */
2028static void efx_pci_remove_main(struct efx_nic *efx) 2003static void efx_pci_remove_main(struct efx_nic *efx)
2029{ 2004{
2030 EFX_ASSERT_RESET_SERIALISED(efx); 2005 falcon_fini_interrupt(efx);
2031
2032 /* Skip everything if we never obtained a valid membase */
2033 if (!efx->membase)
2034 return;
2035
2036 efx_fini_channels(efx); 2006 efx_fini_channels(efx);
2037 efx_fini_port(efx); 2007 efx_fini_port(efx);
2038
2039 /* Shutdown the board, then the NIC and board state */
2040 efx->board_info.fini(efx);
2041 falcon_fini_interrupt(efx);
2042
2043 efx_fini_napi(efx); 2008 efx_fini_napi(efx);
2044 efx_remove_all(efx); 2009 efx_remove_all(efx);
2045} 2010}
@@ -2063,9 +2028,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2063 /* Allow any queued efx_resets() to complete */ 2028 /* Allow any queued efx_resets() to complete */
2064 rtnl_unlock(); 2029 rtnl_unlock();
2065 2030
2066 if (efx->membase == NULL)
2067 goto out;
2068
2069 efx_unregister_netdev(efx); 2031 efx_unregister_netdev(efx);
2070 2032
2071 efx_mtd_remove(efx); 2033 efx_mtd_remove(efx);
@@ -2078,7 +2040,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2078 2040
2079 efx_pci_remove_main(efx); 2041 efx_pci_remove_main(efx);
2080 2042
2081out:
2082 efx_fini_io(efx); 2043 efx_fini_io(efx);
2083 EFX_LOG(efx, "shutdown successful\n"); 2044 EFX_LOG(efx, "shutdown successful\n");
2084 2045
@@ -2103,39 +2064,30 @@ static int efx_pci_probe_main(struct efx_nic *efx)
2103 if (rc) 2064 if (rc)
2104 goto fail2; 2065 goto fail2;
2105 2066
2106 /* Initialise the board */
2107 rc = efx->board_info.init(efx);
2108 if (rc) {
2109 EFX_ERR(efx, "failed to initialise board\n");
2110 goto fail3;
2111 }
2112
2113 rc = falcon_init_nic(efx); 2067 rc = falcon_init_nic(efx);
2114 if (rc) { 2068 if (rc) {
2115 EFX_ERR(efx, "failed to initialise NIC\n"); 2069 EFX_ERR(efx, "failed to initialise NIC\n");
2116 goto fail4; 2070 goto fail3;
2117 } 2071 }
2118 2072
2119 rc = efx_init_port(efx); 2073 rc = efx_init_port(efx);
2120 if (rc) { 2074 if (rc) {
2121 EFX_ERR(efx, "failed to initialise port\n"); 2075 EFX_ERR(efx, "failed to initialise port\n");
2122 goto fail5; 2076 goto fail4;
2123 } 2077 }
2124 2078
2125 efx_init_channels(efx); 2079 efx_init_channels(efx);
2126 2080
2127 rc = falcon_init_interrupt(efx); 2081 rc = falcon_init_interrupt(efx);
2128 if (rc) 2082 if (rc)
2129 goto fail6; 2083 goto fail5;
2130 2084
2131 return 0; 2085 return 0;
2132 2086
2133 fail6: 2087 fail5:
2134 efx_fini_channels(efx); 2088 efx_fini_channels(efx);
2135 efx_fini_port(efx); 2089 efx_fini_port(efx);
2136 fail5:
2137 fail4: 2090 fail4:
2138 efx->board_info.fini(efx);
2139 fail3: 2091 fail3:
2140 efx_fini_napi(efx); 2092 efx_fini_napi(efx);
2141 fail2: 2093 fail2:
@@ -2219,18 +2171,19 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2219 goto fail4; 2171 goto fail4;
2220 } 2172 }
2221 2173
2222 /* Switch to the running state before we expose the device to 2174 /* Switch to the running state before we expose the device to the OS,
2223 * the OS. This is to ensure that the initial gathering of 2175 * so that dev_open()|efx_start_all() will actually start the device */
2224 * MAC stats succeeds. */
2225 efx->state = STATE_RUNNING; 2176 efx->state = STATE_RUNNING;
2226 2177
2227 efx_mtd_probe(efx); /* allowed to fail */
2228
2229 rc = efx_register_netdev(efx); 2178 rc = efx_register_netdev(efx);
2230 if (rc) 2179 if (rc)
2231 goto fail5; 2180 goto fail5;
2232 2181
2233 EFX_LOG(efx, "initialisation successful\n"); 2182 EFX_LOG(efx, "initialisation successful\n");
2183
2184 rtnl_lock();
2185 efx_mtd_probe(efx); /* allowed to fail */
2186 rtnl_unlock();
2234 return 0; 2187 return 0;
2235 2188
2236 fail5: 2189 fail5:
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62f4929..15edda2a2242 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -19,29 +19,57 @@
19#define FALCON_A_S_DEVID 0x6703 19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710 20#define FALCON_B_P_DEVID 0x0710
21 21
22/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
23#define EFX_MEM_BAR 2
24
22/* TX */ 25/* TX */
23extern netdev_tx_t efx_xmit(struct efx_nic *efx, 26extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
24 struct efx_tx_queue *tx_queue, 27extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
25 struct sk_buff *skb); 28extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
29extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
30extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
31extern netdev_tx_t
32efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
33extern netdev_tx_t
34efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
35extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
26extern void efx_stop_queue(struct efx_nic *efx); 36extern void efx_stop_queue(struct efx_nic *efx);
27extern void efx_wake_queue(struct efx_nic *efx); 37extern void efx_wake_queue(struct efx_nic *efx);
38#define EFX_TXQ_SIZE 1024
39#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
28 40
29/* RX */ 41/* RX */
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 42extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
43extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
44extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
45extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
46extern void efx_rx_strategy(struct efx_channel *channel);
47extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
48extern void efx_rx_work(struct work_struct *data);
49extern void __efx_rx_packet(struct efx_channel *channel,
50 struct efx_rx_buffer *rx_buf, bool checksummed);
31extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 51extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
32 unsigned int len, bool checksummed, bool discard); 52 unsigned int len, bool checksummed, bool discard);
33extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 53extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
54#define EFX_RXQ_SIZE 1024
55#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
34 56
35/* Channels */ 57/* Channels */
36extern void efx_process_channel_now(struct efx_channel *channel); 58extern void efx_process_channel_now(struct efx_channel *channel);
37extern void efx_flush_queues(struct efx_nic *efx); 59#define EFX_EVQ_SIZE 4096
60#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
38 61
39/* Ports */ 62/* Ports */
40extern void efx_stats_disable(struct efx_nic *efx);
41extern void efx_stats_enable(struct efx_nic *efx);
42extern void efx_reconfigure_port(struct efx_nic *efx); 63extern void efx_reconfigure_port(struct efx_nic *efx);
43extern void __efx_reconfigure_port(struct efx_nic *efx); 64extern void __efx_reconfigure_port(struct efx_nic *efx);
44 65
66/* Ethtool support */
67extern int efx_ethtool_get_settings(struct net_device *net_dev,
68 struct ethtool_cmd *ecmd);
69extern int efx_ethtool_set_settings(struct net_device *net_dev,
70 struct ethtool_cmd *ecmd);
71extern const struct ethtool_ops efx_ethtool_ops;
72
45/* Reset handling */ 73/* Reset handling */
46extern void efx_reset_down(struct efx_nic *efx, enum reset_type method, 74extern void efx_reset_down(struct efx_nic *efx, enum reset_type method,
47 struct ethtool_cmd *ecmd); 75 struct ethtool_cmd *ecmd);
@@ -60,7 +88,9 @@ extern void efx_hex_dump(const u8 *, unsigned int, const char *);
60/* Dummy PHY ops for PHY drivers */ 88/* Dummy PHY ops for PHY drivers */
61extern int efx_port_dummy_op_int(struct efx_nic *efx); 89extern int efx_port_dummy_op_int(struct efx_nic *efx);
62extern void efx_port_dummy_op_void(struct efx_nic *efx); 90extern void efx_port_dummy_op_void(struct efx_nic *efx);
63extern void efx_port_dummy_op_blink(struct efx_nic *efx, bool blink); 91extern void
92efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
93extern bool efx_port_dummy_op_poll(struct efx_nic *efx);
64 94
65/* MTD */ 95/* MTD */
66#ifdef CONFIG_SFC_MTD 96#ifdef CONFIG_SFC_MTD
@@ -84,4 +114,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
84 napi_schedule(&channel->napi_str); 114 napi_schedule(&channel->napi_str);
85} 115}
86 116
117extern void efx_link_status_changed(struct efx_nic *efx);
118
87#endif /* EFX_EFX_H */ 119#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
index 60cbc6e1e66b..fcd14b73f24d 100644
--- a/drivers/net/sfc/enum.h
+++ b/drivers/net/sfc/enum.h
@@ -40,12 +40,6 @@ enum efx_loopback_mode {
40 40
41#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD 41#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
42 42
43extern const char *efx_loopback_mode_names[];
44#define LOOPBACK_MODE_NAME(mode) \
45 STRING_TABLE_LOOKUP(mode, efx_loopback_mode)
46#define LOOPBACK_MODE(efx) \
47 LOOPBACK_MODE_NAME(efx->loopback_mode)
48
49/* These loopbacks occur within the controller */ 43/* These loopbacks occur within the controller */
50#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_GMAC) | \ 44#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_GMAC) | \
51 (1 << LOOPBACK_XGMII)| \ 45 (1 << LOOPBACK_XGMII)| \
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 45018f283ffa..d3da360f09bc 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -16,24 +16,10 @@
16#include "workarounds.h" 16#include "workarounds.h"
17#include "selftest.h" 17#include "selftest.h"
18#include "efx.h" 18#include "efx.h"
19#include "ethtool.h"
20#include "falcon.h" 19#include "falcon.h"
21#include "spi.h" 20#include "spi.h"
22#include "mdio_10g.h" 21#include "mdio_10g.h"
23 22
24const char *efx_loopback_mode_names[] = {
25 [LOOPBACK_NONE] = "NONE",
26 [LOOPBACK_GMAC] = "GMAC",
27 [LOOPBACK_XGMII] = "XGMII",
28 [LOOPBACK_XGXS] = "XGXS",
29 [LOOPBACK_XAUI] = "XAUI",
30 [LOOPBACK_GPHY] = "GPHY",
31 [LOOPBACK_PHYXS] = "PHYXS",
32 [LOOPBACK_PCS] = "PCS",
33 [LOOPBACK_PMAPMD] = "PMA/PMD",
34 [LOOPBACK_NETWORK] = "NETWORK",
35};
36
37struct ethtool_string { 23struct ethtool_string {
38 char name[ETH_GSTRING_LEN]; 24 char name[ETH_GSTRING_LEN];
39}; 25};
@@ -167,6 +153,7 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
167 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), 153 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
168 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), 154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
169 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), 155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
170 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 157 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
171}; 158};
172 159
@@ -187,13 +174,15 @@ static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count)
187{ 174{
188 struct efx_nic *efx = netdev_priv(net_dev); 175 struct efx_nic *efx = netdev_priv(net_dev);
189 176
190 efx->board_info.blink(efx, 1); 177 do {
191 set_current_state(TASK_INTERRUPTIBLE); 178 falcon_board(efx)->type->set_id_led(efx, EFX_LED_ON);
192 if (count) 179 schedule_timeout_interruptible(HZ / 2);
193 schedule_timeout(count * HZ); 180
194 else 181 falcon_board(efx)->type->set_id_led(efx, EFX_LED_OFF);
195 schedule(); 182 schedule_timeout_interruptible(HZ / 2);
196 efx->board_info.blink(efx, 0); 183 } while (!signal_pending(current) && --count != 0);
184
185 falcon_board(efx)->type->set_id_led(efx, EFX_LED_DEFAULT);
197 return 0; 186 return 0;
198} 187}
199 188
@@ -289,7 +278,7 @@ static void efx_fill_test(unsigned int test_index,
289#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue 278#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
290#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue 279#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
291#define EFX_LOOPBACK_NAME(_mode, _counter) \ 280#define EFX_LOOPBACK_NAME(_mode, _counter) \
292 "loopback.%s." _counter, LOOPBACK_MODE_NAME(mode) 281 "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
293 282
294/** 283/**
295 * efx_fill_loopback_test - fill in a block of loopback self-test entries 284 * efx_fill_loopback_test - fill in a block of loopback self-test entries
@@ -537,7 +526,7 @@ static u32 efx_ethtool_get_link(struct net_device *net_dev)
537{ 526{
538 struct efx_nic *efx = netdev_priv(net_dev); 527 struct efx_nic *efx = netdev_priv(net_dev);
539 528
540 return efx->link_up; 529 return efx->link_state.up;
541} 530}
542 531
543static int efx_ethtool_get_eeprom_len(struct net_device *net_dev) 532static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
@@ -618,6 +607,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
618 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; 607 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
619 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; 608 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
620 609
610 coalesce->tx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
611 coalesce->rx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
612
621 return 0; 613 return 0;
622} 614}
623 615
@@ -656,11 +648,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
656 } 648 }
657 649
658 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); 650 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
659
660 /* Reset channel to pick up new moderation value. Note that
661 * this may change the value of the irq_moderation field
662 * (e.g. to allow for hardware timer granularity).
663 */
664 efx_for_each_channel(channel, efx) 651 efx_for_each_channel(channel, efx)
665 falcon_set_int_moderation(channel); 652 falcon_set_int_moderation(channel);
666 653
@@ -697,9 +684,9 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
697 * and fix it be cycling transmit flow control on this end. */ 684 * and fix it be cycling transmit flow control on this end. */
698 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); 685 reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
699 if (EFX_WORKAROUND_11482(efx) && reset) { 686 if (EFX_WORKAROUND_11482(efx) && reset) {
700 if (falcon_rev(efx) >= FALCON_REV_B0) { 687 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
701 /* Recover by resetting the EM block */ 688 /* Recover by resetting the EM block */
702 if (efx->link_up) 689 if (efx->link_state.up)
703 falcon_drain_tx_fifo(efx); 690 falcon_drain_tx_fifo(efx);
704 } else { 691 } else {
705 /* Schedule a reset to recover */ 692 /* Schedule a reset to recover */
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
deleted file mode 100644
index 295ead403356..000000000000
--- a/drivers/net/sfc/ethtool.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005 Fen Systems Ltd.
4 * Copyright 2006 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_ETHTOOL_H
12#define EFX_ETHTOOL_H
13
14#include "net_driver.h"
15
16/*
17 * Ethtool support
18 */
19
20extern int efx_ethtool_get_settings(struct net_device *net_dev,
21 struct ethtool_cmd *ecmd);
22extern int efx_ethtool_set_settings(struct net_device *net_dev,
23 struct ethtool_cmd *ecmd);
24
25extern const struct ethtool_ops efx_ethtool_ops;
26
27#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c049364aec46..2f219ce61392 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -14,7 +14,6 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/i2c.h> 16#include <linux/i2c.h>
17#include <linux/i2c-algo-bit.h>
18#include <linux/mii.h> 17#include <linux/mii.h>
19#include "net_driver.h" 18#include "net_driver.h"
20#include "bitfield.h" 19#include "bitfield.h"
@@ -22,34 +21,13 @@
22#include "mac.h" 21#include "mac.h"
23#include "spi.h" 22#include "spi.h"
24#include "falcon.h" 23#include "falcon.h"
25#include "falcon_hwdefs.h" 24#include "regs.h"
26#include "falcon_io.h" 25#include "io.h"
27#include "mdio_10g.h" 26#include "mdio_10g.h"
28#include "phy.h" 27#include "phy.h"
29#include "boards.h"
30#include "workarounds.h" 28#include "workarounds.h"
31 29
32/* Falcon hardware control. 30/* Hardware control for SFC4000 (aka Falcon). */
33 * Falcon is the internal codename for the SFC4000 controller that is
34 * present in SFE400X evaluation boards
35 */
36
37/**
38 * struct falcon_nic_data - Falcon NIC state
39 * @next_buffer_table: First available buffer table id
40 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm
42 * @int_error_count: Number of internal errors seen recently
43 * @int_error_expire: Time at which error count will be expired
44 */
45struct falcon_nic_data {
46 unsigned next_buffer_table;
47 struct pci_dev *pci_dev2;
48 struct i2c_algo_bit_data i2c_data;
49
50 unsigned int_error_count;
51 unsigned long int_error_expire;
52};
53 31
54/************************************************************************** 32/**************************************************************************
55 * 33 *
@@ -58,8 +36,6 @@ struct falcon_nic_data {
58 ************************************************************************** 36 **************************************************************************
59 */ 37 */
60 38
61static int disable_dma_stats;
62
63/* This is set to 16 for a good reason. In summary, if larger than 39/* This is set to 16 for a good reason. In summary, if larger than
64 * 16, the descriptor cache holds more than a default socket 40 * 16, the descriptor cache holds more than a default socket
65 * buffer's worth of packets (for UDP we can only have at most one 41 * buffer's worth of packets (for UDP we can only have at most one
@@ -68,12 +44,10 @@ static int disable_dma_stats;
68 * goes idle. 44 * goes idle.
69 */ 45 */
70#define TX_DC_ENTRIES 16 46#define TX_DC_ENTRIES 16
71#define TX_DC_ENTRIES_ORDER 0 47#define TX_DC_ENTRIES_ORDER 1
72#define TX_DC_BASE 0x130000
73 48
74#define RX_DC_ENTRIES 64 49#define RX_DC_ENTRIES 64
75#define RX_DC_ENTRIES_ORDER 2 50#define RX_DC_ENTRIES_ORDER 3
76#define RX_DC_BASE 0x100000
77 51
78static const unsigned int 52static const unsigned int
79/* "Large" EEPROM device: Atmel AT25640 or similar 53/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -109,21 +83,6 @@ static int rx_xon_thresh_bytes = -1;
109module_param(rx_xon_thresh_bytes, int, 0644); 83module_param(rx_xon_thresh_bytes, int, 0644);
110MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); 84MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
111 85
112/* TX descriptor ring size - min 512 max 4k */
113#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
114#define FALCON_TXD_RING_SIZE 1024
115#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
116
117/* RX descriptor ring size - min 512 max 4k */
118#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
119#define FALCON_RXD_RING_SIZE 1024
120#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
121
122/* Event queue size - max 32k */
123#define FALCON_EVQ_ORDER EVQ_SIZE_4K
124#define FALCON_EVQ_SIZE 4096
125#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
126
127/* If FALCON_MAX_INT_ERRORS internal errors occur within 86/* If FALCON_MAX_INT_ERRORS internal errors occur within
128 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 87 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
129 * disable it. 88 * disable it.
@@ -143,20 +102,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
143 ************************************************************************** 102 **************************************************************************
144 */ 103 */
145 104
146/* DMA address mask */
147#define FALCON_DMA_MASK DMA_BIT_MASK(46)
148
149/* TX DMA length mask (13-bit) */
150#define FALCON_TX_DMA_MASK (4096 - 1)
151
152/* Size and alignment of special buffers (4KB) */ 105/* Size and alignment of special buffers (4KB) */
153#define FALCON_BUF_SIZE 4096 106#define FALCON_BUF_SIZE 4096
154 107
155/* Dummy SRAM size code */ 108/* Depth of RX flush request fifo */
156#define SRM_NB_BSZ_ONCHIP_ONLY (-1) 109#define FALCON_RX_FLUSH_COUNT 4
157 110
158#define FALCON_IS_DUAL_FUNC(efx) \ 111#define FALCON_IS_DUAL_FUNC(efx) \
159 (falcon_rev(efx) < FALCON_REV_B0) 112 (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
160 113
161/************************************************************************** 114/**************************************************************************
162 * 115 *
@@ -164,6 +117,13 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
164 * 117 *
165 **************************************************************************/ 118 **************************************************************************/
166 119
120static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
121 unsigned int index)
122{
123 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
124 value, index);
125}
126
167/* Read the current event from the event queue */ 127/* Read the current event from the event queue */
168static inline efx_qword_t *falcon_event(struct efx_channel *channel, 128static inline efx_qword_t *falcon_event(struct efx_channel *channel,
169 unsigned int index) 129 unsigned int index)
@@ -200,9 +160,9 @@ static void falcon_setsda(void *data, int state)
200 struct efx_nic *efx = (struct efx_nic *)data; 160 struct efx_nic *efx = (struct efx_nic *)data;
201 efx_oword_t reg; 161 efx_oword_t reg;
202 162
203 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 163 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
204 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state); 164 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
205 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 165 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
206} 166}
207 167
208static void falcon_setscl(void *data, int state) 168static void falcon_setscl(void *data, int state)
@@ -210,9 +170,9 @@ static void falcon_setscl(void *data, int state)
210 struct efx_nic *efx = (struct efx_nic *)data; 170 struct efx_nic *efx = (struct efx_nic *)data;
211 efx_oword_t reg; 171 efx_oword_t reg;
212 172
213 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 173 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
214 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state); 174 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
215 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 175 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
216} 176}
217 177
218static int falcon_getsda(void *data) 178static int falcon_getsda(void *data)
@@ -220,8 +180,8 @@ static int falcon_getsda(void *data)
220 struct efx_nic *efx = (struct efx_nic *)data; 180 struct efx_nic *efx = (struct efx_nic *)data;
221 efx_oword_t reg; 181 efx_oword_t reg;
222 182
223 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 183 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
224 return EFX_OWORD_FIELD(reg, GPIO3_IN); 184 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
225} 185}
226 186
227static int falcon_getscl(void *data) 187static int falcon_getscl(void *data)
@@ -229,8 +189,8 @@ static int falcon_getscl(void *data)
229 struct efx_nic *efx = (struct efx_nic *)data; 189 struct efx_nic *efx = (struct efx_nic *)data;
230 efx_oword_t reg; 190 efx_oword_t reg;
231 191
232 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 192 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
233 return EFX_OWORD_FIELD(reg, GPIO0_IN); 193 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
234} 194}
235 195
236static struct i2c_algo_bit_data falcon_i2c_bit_operations = { 196static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -275,12 +235,11 @@ falcon_init_special_buffer(struct efx_nic *efx,
275 dma_addr = buffer->dma_addr + (i * 4096); 235 dma_addr = buffer->dma_addr + (i * 4096);
276 EFX_LOG(efx, "mapping special buffer %d at %llx\n", 236 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
277 index, (unsigned long long)dma_addr); 237 index, (unsigned long long)dma_addr);
278 EFX_POPULATE_QWORD_4(buf_desc, 238 EFX_POPULATE_QWORD_3(buf_desc,
279 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K, 239 FRF_AZ_BUF_ADR_REGION, 0,
280 BUF_ADR_REGION, 0, 240 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
281 BUF_ADR_FBUF, (dma_addr >> 12), 241 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
282 BUF_OWNER_ID_FBUF, 0); 242 falcon_write_buf_tbl(efx, &buf_desc, index);
283 falcon_write_sram(efx, &buf_desc, index);
284 } 243 }
285} 244}
286 245
@@ -300,11 +259,11 @@ falcon_fini_special_buffer(struct efx_nic *efx,
300 buffer->index, buffer->index + buffer->entries - 1); 259 buffer->index, buffer->index + buffer->entries - 1);
301 260
302 EFX_POPULATE_OWORD_4(buf_tbl_upd, 261 EFX_POPULATE_OWORD_4(buf_tbl_upd,
303 BUF_UPD_CMD, 0, 262 FRF_AZ_BUF_UPD_CMD, 0,
304 BUF_CLR_CMD, 1, 263 FRF_AZ_BUF_CLR_CMD, 1,
305 BUF_CLR_END_ID, end, 264 FRF_AZ_BUF_CLR_END_ID, end,
306 BUF_CLR_START_ID, start); 265 FRF_AZ_BUF_CLR_START_ID, start);
307 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER); 266 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
308} 267}
309 268
310/* 269/*
@@ -320,8 +279,6 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
320 struct efx_special_buffer *buffer, 279 struct efx_special_buffer *buffer,
321 unsigned int len) 280 unsigned int len)
322{ 281{
323 struct falcon_nic_data *nic_data = efx->nic_data;
324
325 len = ALIGN(len, FALCON_BUF_SIZE); 282 len = ALIGN(len, FALCON_BUF_SIZE);
326 283
327 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 284 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
@@ -336,8 +293,8 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
336 memset(buffer->addr, 0xff, len); 293 memset(buffer->addr, 0xff, len);
337 294
338 /* Select new buffer ID */ 295 /* Select new buffer ID */
339 buffer->index = nic_data->next_buffer_table; 296 buffer->index = efx->next_buffer_table;
340 nic_data->next_buffer_table += buffer->entries; 297 efx->next_buffer_table += buffer->entries;
341 298
342 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " 299 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
343 "(virt %p phys %llx)\n", buffer->index, 300 "(virt %p phys %llx)\n", buffer->index,
@@ -415,10 +372,10 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
415 unsigned write_ptr; 372 unsigned write_ptr;
416 efx_dword_t reg; 373 efx_dword_t reg;
417 374
418 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 375 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
419 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr); 376 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
420 falcon_writel_page(tx_queue->efx, &reg, 377 efx_writed_page(tx_queue->efx, &reg,
421 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue); 378 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
422} 379}
423 380
424 381
@@ -436,18 +393,17 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
436 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 393 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
437 394
438 do { 395 do {
439 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 396 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
440 buffer = &tx_queue->buffer[write_ptr]; 397 buffer = &tx_queue->buffer[write_ptr];
441 txd = falcon_tx_desc(tx_queue, write_ptr); 398 txd = falcon_tx_desc(tx_queue, write_ptr);
442 ++tx_queue->write_count; 399 ++tx_queue->write_count;
443 400
444 /* Create TX descriptor ring entry */ 401 /* Create TX descriptor ring entry */
445 EFX_POPULATE_QWORD_5(*txd, 402 EFX_POPULATE_QWORD_4(*txd,
446 TX_KER_PORT, 0, 403 FSF_AZ_TX_KER_CONT, buffer->continuation,
447 TX_KER_CONT, buffer->continuation, 404 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
448 TX_KER_BYTE_CNT, buffer->len, 405 FSF_AZ_TX_KER_BUF_REGION, 0,
449 TX_KER_BUF_REGION, 0, 406 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
450 TX_KER_BUF_ADR, buffer->dma_addr);
451 } while (tx_queue->write_count != tx_queue->insert_count); 407 } while (tx_queue->write_count != tx_queue->insert_count);
452 408
453 wmb(); /* Ensure descriptors are written before they are fetched */ 409 wmb(); /* Ensure descriptors are written before they are fetched */
@@ -458,9 +414,10 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
458int falcon_probe_tx(struct efx_tx_queue *tx_queue) 414int falcon_probe_tx(struct efx_tx_queue *tx_queue)
459{ 415{
460 struct efx_nic *efx = tx_queue->efx; 416 struct efx_nic *efx = tx_queue->efx;
417 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
418 EFX_TXQ_SIZE & EFX_TXQ_MASK);
461 return falcon_alloc_special_buffer(efx, &tx_queue->txd, 419 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
462 FALCON_TXD_RING_SIZE * 420 EFX_TXQ_SIZE * sizeof(efx_qword_t));
463 sizeof(efx_qword_t));
464} 421}
465 422
466void falcon_init_tx(struct efx_tx_queue *tx_queue) 423void falcon_init_tx(struct efx_tx_queue *tx_queue)
@@ -468,45 +425,48 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
468 efx_oword_t tx_desc_ptr; 425 efx_oword_t tx_desc_ptr;
469 struct efx_nic *efx = tx_queue->efx; 426 struct efx_nic *efx = tx_queue->efx;
470 427
471 tx_queue->flushed = false; 428 tx_queue->flushed = FLUSH_NONE;
472 429
473 /* Pin TX descriptor ring */ 430 /* Pin TX descriptor ring */
474 falcon_init_special_buffer(efx, &tx_queue->txd); 431 falcon_init_special_buffer(efx, &tx_queue->txd);
475 432
476 /* Push TX descriptor ring to card */ 433 /* Push TX descriptor ring to card */
477 EFX_POPULATE_OWORD_10(tx_desc_ptr, 434 EFX_POPULATE_OWORD_10(tx_desc_ptr,
478 TX_DESCQ_EN, 1, 435 FRF_AZ_TX_DESCQ_EN, 1,
479 TX_ISCSI_DDIG_EN, 0, 436 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
480 TX_ISCSI_HDIG_EN, 0, 437 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
481 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 438 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
482 TX_DESCQ_EVQ_ID, tx_queue->channel->channel, 439 FRF_AZ_TX_DESCQ_EVQ_ID,
483 TX_DESCQ_OWNER_ID, 0, 440 tx_queue->channel->channel,
484 TX_DESCQ_LABEL, tx_queue->queue, 441 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
485 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, 442 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
486 TX_DESCQ_TYPE, 0, 443 FRF_AZ_TX_DESCQ_SIZE,
487 TX_NON_IP_DROP_DIS_B0, 1); 444 __ffs(tx_queue->txd.entries),
488 445 FRF_AZ_TX_DESCQ_TYPE, 0,
489 if (falcon_rev(efx) >= FALCON_REV_B0) { 446 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
447
448 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
490 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 449 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
491 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum); 450 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
492 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum); 451 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
452 !csum);
493 } 453 }
494 454
495 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 455 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
496 tx_queue->queue); 456 tx_queue->queue);
497 457
498 if (falcon_rev(efx) < FALCON_REV_B0) { 458 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
499 efx_oword_t reg; 459 efx_oword_t reg;
500 460
501 /* Only 128 bits in this register */ 461 /* Only 128 bits in this register */
502 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 462 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
503 463
504 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 464 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
505 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 465 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
506 clear_bit_le(tx_queue->queue, (void *)&reg); 466 clear_bit_le(tx_queue->queue, (void *)&reg);
507 else 467 else
508 set_bit_le(tx_queue->queue, (void *)&reg); 468 set_bit_le(tx_queue->queue, (void *)&reg);
509 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 469 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
510 } 470 }
511} 471}
512 472
@@ -515,11 +475,13 @@ static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
515 struct efx_nic *efx = tx_queue->efx; 475 struct efx_nic *efx = tx_queue->efx;
516 efx_oword_t tx_flush_descq; 476 efx_oword_t tx_flush_descq;
517 477
478 tx_queue->flushed = FLUSH_PENDING;
479
518 /* Post a flush command */ 480 /* Post a flush command */
519 EFX_POPULATE_OWORD_2(tx_flush_descq, 481 EFX_POPULATE_OWORD_2(tx_flush_descq,
520 TX_FLUSH_DESCQ_CMD, 1, 482 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
521 TX_FLUSH_DESCQ, tx_queue->queue); 483 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
522 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 484 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
523} 485}
524 486
525void falcon_fini_tx(struct efx_tx_queue *tx_queue) 487void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -528,12 +490,12 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
528 efx_oword_t tx_desc_ptr; 490 efx_oword_t tx_desc_ptr;
529 491
530 /* The queue should have been flushed */ 492 /* The queue should have been flushed */
531 WARN_ON(!tx_queue->flushed); 493 WARN_ON(tx_queue->flushed != FLUSH_DONE);
532 494
533 /* Remove TX descriptor ring from card */ 495 /* Remove TX descriptor ring from card */
534 EFX_ZERO_OWORD(tx_desc_ptr); 496 EFX_ZERO_OWORD(tx_desc_ptr);
535 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 497 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue); 498 tx_queue->queue);
537 499
538 /* Unpin TX descriptor ring */ 500 /* Unpin TX descriptor ring */
539 falcon_fini_special_buffer(efx, &tx_queue->txd); 501 falcon_fini_special_buffer(efx, &tx_queue->txd);
@@ -568,11 +530,11 @@ static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
568 rxd = falcon_rx_desc(rx_queue, index); 530 rxd = falcon_rx_desc(rx_queue, index);
569 rx_buf = efx_rx_buffer(rx_queue, index); 531 rx_buf = efx_rx_buffer(rx_queue, index);
570 EFX_POPULATE_QWORD_3(*rxd, 532 EFX_POPULATE_QWORD_3(*rxd,
571 RX_KER_BUF_SIZE, 533 FSF_AZ_RX_KER_BUF_SIZE,
572 rx_buf->len - 534 rx_buf->len -
573 rx_queue->efx->type->rx_buffer_padding, 535 rx_queue->efx->type->rx_buffer_padding,
574 RX_KER_BUF_REGION, 0, 536 FSF_AZ_RX_KER_BUF_REGION, 0,
575 RX_KER_BUF_ADR, rx_buf->dma_addr); 537 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
576} 538}
577 539
578/* This writes to the RX_DESC_WPTR register for the specified receive 540/* This writes to the RX_DESC_WPTR register for the specified receive
@@ -586,56 +548,59 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
586 while (rx_queue->notified_count != rx_queue->added_count) { 548 while (rx_queue->notified_count != rx_queue->added_count) {
587 falcon_build_rx_desc(rx_queue, 549 falcon_build_rx_desc(rx_queue,
588 rx_queue->notified_count & 550 rx_queue->notified_count &
589 FALCON_RXD_RING_MASK); 551 EFX_RXQ_MASK);
590 ++rx_queue->notified_count; 552 ++rx_queue->notified_count;
591 } 553 }
592 554
593 wmb(); 555 wmb();
594 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; 556 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
595 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr); 557 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
596 falcon_writel_page(rx_queue->efx, &reg, 558 efx_writed_page(rx_queue->efx, &reg,
597 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue); 559 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
598} 560}
599 561
600int falcon_probe_rx(struct efx_rx_queue *rx_queue) 562int falcon_probe_rx(struct efx_rx_queue *rx_queue)
601{ 563{
602 struct efx_nic *efx = rx_queue->efx; 564 struct efx_nic *efx = rx_queue->efx;
565 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
566 EFX_RXQ_SIZE & EFX_RXQ_MASK);
603 return falcon_alloc_special_buffer(efx, &rx_queue->rxd, 567 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
604 FALCON_RXD_RING_SIZE * 568 EFX_RXQ_SIZE * sizeof(efx_qword_t));
605 sizeof(efx_qword_t));
606} 569}
607 570
608void falcon_init_rx(struct efx_rx_queue *rx_queue) 571void falcon_init_rx(struct efx_rx_queue *rx_queue)
609{ 572{
610 efx_oword_t rx_desc_ptr; 573 efx_oword_t rx_desc_ptr;
611 struct efx_nic *efx = rx_queue->efx; 574 struct efx_nic *efx = rx_queue->efx;
612 bool is_b0 = falcon_rev(efx) >= FALCON_REV_B0; 575 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
613 bool iscsi_digest_en = is_b0; 576 bool iscsi_digest_en = is_b0;
614 577
615 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", 578 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
616 rx_queue->queue, rx_queue->rxd.index, 579 rx_queue->queue, rx_queue->rxd.index,
617 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 580 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
618 581
619 rx_queue->flushed = false; 582 rx_queue->flushed = FLUSH_NONE;
620 583
621 /* Pin RX descriptor ring */ 584 /* Pin RX descriptor ring */
622 falcon_init_special_buffer(efx, &rx_queue->rxd); 585 falcon_init_special_buffer(efx, &rx_queue->rxd);
623 586
624 /* Push RX descriptor ring to card */ 587 /* Push RX descriptor ring to card */
625 EFX_POPULATE_OWORD_10(rx_desc_ptr, 588 EFX_POPULATE_OWORD_10(rx_desc_ptr,
626 RX_ISCSI_DDIG_EN, iscsi_digest_en, 589 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
627 RX_ISCSI_HDIG_EN, iscsi_digest_en, 590 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
628 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 591 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
629 RX_DESCQ_EVQ_ID, rx_queue->channel->channel, 592 FRF_AZ_RX_DESCQ_EVQ_ID,
630 RX_DESCQ_OWNER_ID, 0, 593 rx_queue->channel->channel,
631 RX_DESCQ_LABEL, rx_queue->queue, 594 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
632 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, 595 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
633 RX_DESCQ_TYPE, 0 /* kernel queue */ , 596 FRF_AZ_RX_DESCQ_SIZE,
597 __ffs(rx_queue->rxd.entries),
598 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
634 /* For >=B0 this is scatter so disable */ 599 /* For >=B0 this is scatter so disable */
635 RX_DESCQ_JUMBO, !is_b0, 600 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
636 RX_DESCQ_EN, 1); 601 FRF_AZ_RX_DESCQ_EN, 1);
637 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 602 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
638 rx_queue->queue); 603 rx_queue->queue);
639} 604}
640 605
641static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 606static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -643,11 +608,13 @@ static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
643 struct efx_nic *efx = rx_queue->efx; 608 struct efx_nic *efx = rx_queue->efx;
644 efx_oword_t rx_flush_descq; 609 efx_oword_t rx_flush_descq;
645 610
611 rx_queue->flushed = FLUSH_PENDING;
612
646 /* Post a flush command */ 613 /* Post a flush command */
647 EFX_POPULATE_OWORD_2(rx_flush_descq, 614 EFX_POPULATE_OWORD_2(rx_flush_descq,
648 RX_FLUSH_DESCQ_CMD, 1, 615 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
649 RX_FLUSH_DESCQ, rx_queue->queue); 616 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
650 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 617 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
651} 618}
652 619
653void falcon_fini_rx(struct efx_rx_queue *rx_queue) 620void falcon_fini_rx(struct efx_rx_queue *rx_queue)
@@ -656,12 +623,12 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
656 struct efx_nic *efx = rx_queue->efx; 623 struct efx_nic *efx = rx_queue->efx;
657 624
658 /* The queue should already have been flushed */ 625 /* The queue should already have been flushed */
659 WARN_ON(!rx_queue->flushed); 626 WARN_ON(rx_queue->flushed != FLUSH_DONE);
660 627
661 /* Remove RX descriptor ring from card */ 628 /* Remove RX descriptor ring from card */
662 EFX_ZERO_OWORD(rx_desc_ptr); 629 EFX_ZERO_OWORD(rx_desc_ptr);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 630 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue); 631 rx_queue->queue);
665 632
666 /* Unpin RX descriptor ring */ 633 /* Unpin RX descriptor ring */
667 falcon_fini_special_buffer(efx, &rx_queue->rxd); 634 falcon_fini_special_buffer(efx, &rx_queue->rxd);
@@ -694,8 +661,8 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
694 efx_dword_t reg; 661 efx_dword_t reg;
695 struct efx_nic *efx = channel->efx; 662 struct efx_nic *efx = channel->efx;
696 663
697 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); 664 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
698 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base, 665 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
699 channel->channel); 666 channel->channel);
700} 667}
701 668
@@ -704,11 +671,14 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
704{ 671{
705 efx_oword_t drv_ev_reg; 672 efx_oword_t drv_ev_reg;
706 673
707 EFX_POPULATE_OWORD_2(drv_ev_reg, 674 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
708 DRV_EV_QID, channel->channel, 675 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
709 DRV_EV_DATA, 676 drv_ev_reg.u32[0] = event->u32[0];
710 EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); 677 drv_ev_reg.u32[1] = event->u32[1];
711 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); 678 drv_ev_reg.u32[2] = 0;
679 drv_ev_reg.u32[3] = 0;
680 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
681 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
712} 682}
713 683
714/* Handle a transmit completion event 684/* Handle a transmit completion event
@@ -724,18 +694,18 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
724 struct efx_tx_queue *tx_queue; 694 struct efx_tx_queue *tx_queue;
725 struct efx_nic *efx = channel->efx; 695 struct efx_nic *efx = channel->efx;
726 696
727 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) { 697 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
728 /* Transmit completion */ 698 /* Transmit completion */
729 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR); 699 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
730 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 700 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
731 tx_queue = &efx->tx_queue[tx_ev_q_label]; 701 tx_queue = &efx->tx_queue[tx_ev_q_label];
732 channel->irq_mod_score += 702 channel->irq_mod_score +=
733 (tx_ev_desc_ptr - tx_queue->read_count) & 703 (tx_ev_desc_ptr - tx_queue->read_count) &
734 efx->type->txd_ring_mask; 704 EFX_TXQ_MASK;
735 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 705 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
736 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) { 706 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
737 /* Rewrite the FIFO write pointer */ 707 /* Rewrite the FIFO write pointer */
738 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 708 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
739 tx_queue = &efx->tx_queue[tx_ev_q_label]; 709 tx_queue = &efx->tx_queue[tx_ev_q_label];
740 710
741 if (efx_dev_registered(efx)) 711 if (efx_dev_registered(efx))
@@ -743,7 +713,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
743 falcon_notify_tx_desc(tx_queue); 713 falcon_notify_tx_desc(tx_queue);
744 if (efx_dev_registered(efx)) 714 if (efx_dev_registered(efx))
745 netif_tx_unlock(efx->net_dev); 715 netif_tx_unlock(efx->net_dev);
746 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 716 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
747 EFX_WORKAROUND_10727(efx)) { 717 EFX_WORKAROUND_10727(efx)) {
748 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 718 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
749 } else { 719 } else {
@@ -764,25 +734,24 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
764 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 734 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
765 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 735 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
766 bool rx_ev_other_err, rx_ev_pause_frm; 736 bool rx_ev_other_err, rx_ev_pause_frm;
767 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; 737 bool rx_ev_hdr_type, rx_ev_mcast_pkt;
768 unsigned rx_ev_pkt_type; 738 unsigned rx_ev_pkt_type;
769 739
770 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 740 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
771 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 741 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
772 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC); 742 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
773 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE); 743 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
774 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 744 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
775 RX_EV_BUF_OWNER_ID_ERR); 745 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
776 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
777 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 746 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
778 RX_EV_IP_HDR_CHKSUM_ERR); 747 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
779 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 748 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
780 RX_EV_TCP_UDP_CHKSUM_ERR); 749 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
781 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 750 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
782 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 751 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
783 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? 752 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
784 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 753 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
785 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 754 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
786 755
787 /* Every error apart from tobe_disc and pause_frm */ 756 /* Every error apart from tobe_disc and pause_frm */
788 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 757 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
@@ -801,8 +770,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
801 else if (rx_ev_tcp_udp_chksum_err) 770 else if (rx_ev_tcp_udp_chksum_err)
802 ++rx_queue->channel->n_rx_tcp_udp_chksum_err; 771 ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
803 } 772 }
804 if (rx_ev_ip_frag_err)
805 ++rx_queue->channel->n_rx_ip_frag_err;
806 773
807 /* The frame must be discarded if any of these are true. */ 774 /* The frame must be discarded if any of these are true. */
808 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 775 *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
@@ -838,9 +805,8 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
838 struct efx_nic *efx = rx_queue->efx; 805 struct efx_nic *efx = rx_queue->efx;
839 unsigned expected, dropped; 806 unsigned expected, dropped;
840 807
841 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK; 808 expected = rx_queue->removed_count & EFX_RXQ_MASK;
842 dropped = ((index + FALCON_RXD_RING_SIZE - expected) & 809 dropped = (index - expected) & EFX_RXQ_MASK;
843 FALCON_RXD_RING_MASK);
844 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", 810 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
845 dropped, index, expected); 811 dropped, index, expected);
846 812
@@ -866,17 +832,18 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
866 struct efx_nic *efx = channel->efx; 832 struct efx_nic *efx = channel->efx;
867 833
868 /* Basic packet information */ 834 /* Basic packet information */
869 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT); 835 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
870 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK); 836 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
871 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 837 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
872 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); 838 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
873 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); 839 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
874 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel); 840 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
841 channel->channel);
875 842
876 rx_queue = &efx->rx_queue[channel->channel]; 843 rx_queue = &efx->rx_queue[channel->channel];
877 844
878 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); 845 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
879 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; 846 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
880 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 847 if (unlikely(rx_ev_desc_ptr != expected_ptr))
881 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 848 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
882 849
@@ -884,7 +851,10 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
884 /* If packet is marked as OK and packet type is TCP/IPv4 or 851 /* If packet is marked as OK and packet type is TCP/IPv4 or
885 * UDP/IPv4, then we can rely on the hardware checksum. 852 * UDP/IPv4, then we can rely on the hardware checksum.
886 */ 853 */
887 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); 854 checksummed =
855 likely(efx->rx_checksum_enabled) &&
856 (rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
857 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
888 } else { 858 } else {
889 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, 859 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
890 &discard); 860 &discard);
@@ -892,13 +862,15 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
892 } 862 }
893 863
894 /* Detect multicast packets that didn't match the filter */ 864 /* Detect multicast packets that didn't match the filter */
895 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 865 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
896 if (rx_ev_mcast_pkt) { 866 if (rx_ev_mcast_pkt) {
897 unsigned int rx_ev_mcast_hash_match = 867 unsigned int rx_ev_mcast_hash_match =
898 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); 868 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
899 869
900 if (unlikely(!rx_ev_mcast_hash_match)) 870 if (unlikely(!rx_ev_mcast_hash_match)) {
871 ++channel->n_rx_mcast_mismatch;
901 discard = true; 872 discard = true;
873 }
902 } 874 }
903 875
904 channel->irq_mod_score += 2; 876 channel->irq_mod_score += 2;
@@ -915,22 +887,22 @@ static void falcon_handle_global_event(struct efx_channel *channel,
915 struct efx_nic *efx = channel->efx; 887 struct efx_nic *efx = channel->efx;
916 bool handled = false; 888 bool handled = false;
917 889
918 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 890 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
919 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 891 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
920 EFX_QWORD_FIELD(*event, XG_PHY_INTR) || 892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
921 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) { 893 /* Ignored */
922 efx->phy_op->clear_interrupt(efx);
923 queue_work(efx->workqueue, &efx->phy_work);
924 handled = true; 894 handled = true;
925 } 895 }
926 896
927 if ((falcon_rev(efx) >= FALCON_REV_B0) && 897 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
928 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) { 898 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
929 queue_work(efx->workqueue, &efx->mac_work); 899 efx->xmac_poll_required = true;
930 handled = true; 900 handled = true;
931 } 901 }
932 902
933 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { 903 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
904 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
905 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
934 EFX_ERR(efx, "channel %d seen global RX_RESET " 906 EFX_ERR(efx, "channel %d seen global RX_RESET "
935 "event. Resetting.\n", channel->channel); 907 "event. Resetting.\n", channel->channel);
936 908
@@ -953,35 +925,35 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
953 unsigned int ev_sub_code; 925 unsigned int ev_sub_code;
954 unsigned int ev_sub_data; 926 unsigned int ev_sub_data;
955 927
956 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 928 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
957 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA); 929 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
958 930
959 switch (ev_sub_code) { 931 switch (ev_sub_code) {
960 case TX_DESCQ_FLS_DONE_EV_DECODE: 932 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
961 EFX_TRACE(efx, "channel %d TXQ %d flushed\n", 933 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
962 channel->channel, ev_sub_data); 934 channel->channel, ev_sub_data);
963 break; 935 break;
964 case RX_DESCQ_FLS_DONE_EV_DECODE: 936 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
965 EFX_TRACE(efx, "channel %d RXQ %d flushed\n", 937 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
966 channel->channel, ev_sub_data); 938 channel->channel, ev_sub_data);
967 break; 939 break;
968 case EVQ_INIT_DONE_EV_DECODE: 940 case FSE_AZ_EVQ_INIT_DONE_EV:
969 EFX_LOG(efx, "channel %d EVQ %d initialised\n", 941 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
970 channel->channel, ev_sub_data); 942 channel->channel, ev_sub_data);
971 break; 943 break;
972 case SRM_UPD_DONE_EV_DECODE: 944 case FSE_AZ_SRM_UPD_DONE_EV:
973 EFX_TRACE(efx, "channel %d SRAM update done\n", 945 EFX_TRACE(efx, "channel %d SRAM update done\n",
974 channel->channel); 946 channel->channel);
975 break; 947 break;
976 case WAKE_UP_EV_DECODE: 948 case FSE_AZ_WAKE_UP_EV:
977 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", 949 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
978 channel->channel, ev_sub_data); 950 channel->channel, ev_sub_data);
979 break; 951 break;
980 case TIMER_EV_DECODE: 952 case FSE_AZ_TIMER_EV:
981 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", 953 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
982 channel->channel, ev_sub_data); 954 channel->channel, ev_sub_data);
983 break; 955 break;
984 case RX_RECOVERY_EV_DECODE: 956 case FSE_AA_RX_RECOVER_EV:
985 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 957 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
986 "Resetting.\n", channel->channel); 958 "Resetting.\n", channel->channel);
987 atomic_inc(&efx->rx_reset); 959 atomic_inc(&efx->rx_reset);
@@ -990,12 +962,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
990 RESET_TYPE_RX_RECOVERY : 962 RESET_TYPE_RX_RECOVERY :
991 RESET_TYPE_DISABLE); 963 RESET_TYPE_DISABLE);
992 break; 964 break;
993 case RX_DSC_ERROR_EV_DECODE: 965 case FSE_BZ_RX_DSC_ERROR_EV:
994 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." 966 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
995 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 967 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
996 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 968 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
997 break; 969 break;
998 case TX_DSC_ERROR_EV_DECODE: 970 case FSE_BZ_TX_DSC_ERROR_EV:
999 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." 971 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1000 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 972 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1001 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 973 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -1031,27 +1003,27 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1031 /* Clear this event by marking it all ones */ 1003 /* Clear this event by marking it all ones */
1032 EFX_SET_QWORD(*p_event); 1004 EFX_SET_QWORD(*p_event);
1033 1005
1034 ev_code = EFX_QWORD_FIELD(event, EV_CODE); 1006 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1035 1007
1036 switch (ev_code) { 1008 switch (ev_code) {
1037 case RX_IP_EV_DECODE: 1009 case FSE_AZ_EV_CODE_RX_EV:
1038 falcon_handle_rx_event(channel, &event); 1010 falcon_handle_rx_event(channel, &event);
1039 ++rx_packets; 1011 ++rx_packets;
1040 break; 1012 break;
1041 case TX_IP_EV_DECODE: 1013 case FSE_AZ_EV_CODE_TX_EV:
1042 falcon_handle_tx_event(channel, &event); 1014 falcon_handle_tx_event(channel, &event);
1043 break; 1015 break;
1044 case DRV_GEN_EV_DECODE: 1016 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1045 channel->eventq_magic 1017 channel->eventq_magic = EFX_QWORD_FIELD(
1046 = EFX_QWORD_FIELD(event, EVQ_MAGIC); 1018 event, FSF_AZ_DRV_GEN_EV_MAGIC);
1047 EFX_LOG(channel->efx, "channel %d received generated " 1019 EFX_LOG(channel->efx, "channel %d received generated "
1048 "event "EFX_QWORD_FMT"\n", channel->channel, 1020 "event "EFX_QWORD_FMT"\n", channel->channel,
1049 EFX_QWORD_VAL(event)); 1021 EFX_QWORD_VAL(event));
1050 break; 1022 break;
1051 case GLOBAL_EV_DECODE: 1023 case FSE_AZ_EV_CODE_GLOBAL_EV:
1052 falcon_handle_global_event(channel, &event); 1024 falcon_handle_global_event(channel, &event);
1053 break; 1025 break;
1054 case DRIVER_EV_DECODE: 1026 case FSE_AZ_EV_CODE_DRIVER_EV:
1055 falcon_handle_driver_event(channel, &event); 1027 falcon_handle_driver_event(channel, &event);
1056 break; 1028 break;
1057 default: 1029 default:
@@ -1061,7 +1033,7 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1061 } 1033 }
1062 1034
1063 /* Increment read pointer */ 1035 /* Increment read pointer */
1064 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1036 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1065 1037
1066 } while (rx_packets < rx_quota); 1038 } while (rx_packets < rx_quota);
1067 1039
@@ -1076,26 +1048,20 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1076 1048
1077 /* Set timer register */ 1049 /* Set timer register */
1078 if (channel->irq_moderation) { 1050 if (channel->irq_moderation) {
1079 /* Round to resolution supported by hardware. The value we
1080 * program is based at 0. So actual interrupt moderation
1081 * achieved is ((x + 1) * res).
1082 */
1083 channel->irq_moderation -= (channel->irq_moderation %
1084 FALCON_IRQ_MOD_RESOLUTION);
1085 if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
1086 channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
1087 EFX_POPULATE_DWORD_2(timer_cmd, 1051 EFX_POPULATE_DWORD_2(timer_cmd,
1088 TIMER_MODE, TIMER_MODE_INT_HLDOFF, 1052 FRF_AB_TC_TIMER_MODE,
1089 TIMER_VAL, 1053 FFE_BB_TIMER_MODE_INT_HLDOFF,
1090 channel->irq_moderation / 1054 FRF_AB_TC_TIMER_VAL,
1091 FALCON_IRQ_MOD_RESOLUTION - 1); 1055 channel->irq_moderation - 1);
1092 } else { 1056 } else {
1093 EFX_POPULATE_DWORD_2(timer_cmd, 1057 EFX_POPULATE_DWORD_2(timer_cmd,
1094 TIMER_MODE, TIMER_MODE_DIS, 1058 FRF_AB_TC_TIMER_MODE,
1095 TIMER_VAL, 0); 1059 FFE_BB_TIMER_MODE_DIS,
1060 FRF_AB_TC_TIMER_VAL, 0);
1096 } 1061 }
1097 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, 1062 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1098 channel->channel); 1063 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1064 channel->channel);
1099 1065
1100} 1066}
1101 1067
@@ -1103,10 +1069,10 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1103int falcon_probe_eventq(struct efx_channel *channel) 1069int falcon_probe_eventq(struct efx_channel *channel)
1104{ 1070{
1105 struct efx_nic *efx = channel->efx; 1071 struct efx_nic *efx = channel->efx;
1106 unsigned int evq_size; 1072 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1107 1073 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1108 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); 1074 return falcon_alloc_special_buffer(efx, &channel->eventq,
1109 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); 1075 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1110} 1076}
1111 1077
1112void falcon_init_eventq(struct efx_channel *channel) 1078void falcon_init_eventq(struct efx_channel *channel)
@@ -1126,11 +1092,11 @@ void falcon_init_eventq(struct efx_channel *channel)
1126 1092
1127 /* Push event queue to card */ 1093 /* Push event queue to card */
1128 EFX_POPULATE_OWORD_3(evq_ptr, 1094 EFX_POPULATE_OWORD_3(evq_ptr,
1129 EVQ_EN, 1, 1095 FRF_AZ_EVQ_EN, 1,
1130 EVQ_SIZE, FALCON_EVQ_ORDER, 1096 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1131 EVQ_BUF_BASE_ID, channel->eventq.index); 1097 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1132 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1098 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1133 channel->channel); 1099 channel->channel);
1134 1100
1135 falcon_set_int_moderation(channel); 1101 falcon_set_int_moderation(channel);
1136} 1102}
@@ -1142,8 +1108,8 @@ void falcon_fini_eventq(struct efx_channel *channel)
1142 1108
1143 /* Remove event queue from card */ 1109 /* Remove event queue from card */
1144 EFX_ZERO_OWORD(eventq_ptr); 1110 EFX_ZERO_OWORD(eventq_ptr);
1145 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, 1111 efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1146 channel->channel); 1112 channel->channel);
1147 1113
1148 /* Unpin event queue */ 1114 /* Unpin event queue */
1149 falcon_fini_special_buffer(efx, &channel->eventq); 1115 falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1164,25 +1130,12 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1164{ 1130{
1165 efx_qword_t test_event; 1131 efx_qword_t test_event;
1166 1132
1167 EFX_POPULATE_QWORD_2(test_event, 1133 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1168 EV_CODE, DRV_GEN_EV_DECODE, 1134 FSE_AZ_EV_CODE_DRV_GEN_EV,
1169 EVQ_MAGIC, magic); 1135 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1170 falcon_generate_event(channel, &test_event); 1136 falcon_generate_event(channel, &test_event);
1171} 1137}
1172 1138
1173void falcon_sim_phy_event(struct efx_nic *efx)
1174{
1175 efx_qword_t phy_event;
1176
1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE);
1178 if (EFX_IS10G(efx))
1179 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1);
1180 else
1181 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1);
1182
1183 falcon_generate_event(&efx->channel[0], &phy_event);
1184}
1185
1186/************************************************************************** 1139/**************************************************************************
1187 * 1140 *
1188 * Flush handling 1141 * Flush handling
@@ -1196,7 +1149,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1196 struct efx_tx_queue *tx_queue; 1149 struct efx_tx_queue *tx_queue;
1197 struct efx_rx_queue *rx_queue; 1150 struct efx_rx_queue *rx_queue;
1198 unsigned int read_ptr = channel->eventq_read_ptr; 1151 unsigned int read_ptr = channel->eventq_read_ptr;
1199 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK; 1152 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1200 1153
1201 do { 1154 do {
1202 efx_qword_t *event = falcon_event(channel, read_ptr); 1155 efx_qword_t *event = falcon_event(channel, read_ptr);
@@ -1206,35 +1159,48 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1206 if (!falcon_event_present(event)) 1159 if (!falcon_event_present(event))
1207 break; 1160 break;
1208 1161
1209 ev_code = EFX_QWORD_FIELD(*event, EV_CODE); 1162 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1210 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 1163 ev_sub_code = EFX_QWORD_FIELD(*event,
1211 if (ev_code == DRIVER_EV_DECODE && 1164 FSF_AZ_DRIVER_EV_SUBCODE);
1212 ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) { 1165 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1166 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1213 ev_queue = EFX_QWORD_FIELD(*event, 1167 ev_queue = EFX_QWORD_FIELD(*event,
1214 DRIVER_EV_TX_DESCQ_ID); 1168 FSF_AZ_DRIVER_EV_SUBDATA);
1215 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1169 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1216 tx_queue = efx->tx_queue + ev_queue; 1170 tx_queue = efx->tx_queue + ev_queue;
1217 tx_queue->flushed = true; 1171 tx_queue->flushed = FLUSH_DONE;
1218 } 1172 }
1219 } else if (ev_code == DRIVER_EV_DECODE && 1173 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1220 ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) { 1174 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1221 ev_queue = EFX_QWORD_FIELD(*event, 1175 ev_queue = EFX_QWORD_FIELD(
1222 DRIVER_EV_RX_DESCQ_ID); 1176 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1223 ev_failed = EFX_QWORD_FIELD(*event, 1177 ev_failed = EFX_QWORD_FIELD(
1224 DRIVER_EV_RX_FLUSH_FAIL); 1178 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1225 if (ev_queue < efx->n_rx_queues) { 1179 if (ev_queue < efx->n_rx_queues) {
1226 rx_queue = efx->rx_queue + ev_queue; 1180 rx_queue = efx->rx_queue + ev_queue;
1227 1181 rx_queue->flushed =
1228 /* retry the rx flush */ 1182 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1229 if (ev_failed)
1230 falcon_flush_rx_queue(rx_queue);
1231 else
1232 rx_queue->flushed = true;
1233 } 1183 }
1234 } 1184 }
1235 1185
1236 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1186 /* We're about to destroy the queue anyway, so
1187 * it's ok to throw away every non-flush event */
1188 EFX_SET_QWORD(*event);
1189
1190 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1237 } while (read_ptr != end_ptr); 1191 } while (read_ptr != end_ptr);
1192
1193 channel->eventq_read_ptr = read_ptr;
1194}
1195
1196static void falcon_prepare_flush(struct efx_nic *efx)
1197{
1198 falcon_deconfigure_mac_wrapper(efx);
1199
1200 /* Wait for the tx and rx fifo's to get to the next packet boundary
1201 * (~1ms without back-pressure), then to drain the remainder of the
1202 * fifo's at data path speeds (negligible), with a healthy margin. */
1203 msleep(10);
1238} 1204}
1239 1205
1240/* Handle tx and rx flushes at the same time, since they run in 1206/* Handle tx and rx flushes at the same time, since they run in
@@ -1244,50 +1210,56 @@ int falcon_flush_queues(struct efx_nic *efx)
1244{ 1210{
1245 struct efx_rx_queue *rx_queue; 1211 struct efx_rx_queue *rx_queue;
1246 struct efx_tx_queue *tx_queue; 1212 struct efx_tx_queue *tx_queue;
1247 int i; 1213 int i, tx_pending, rx_pending;
1248 bool outstanding;
1249 1214
1250 /* Issue flush requests */ 1215 falcon_prepare_flush(efx);
1251 efx_for_each_tx_queue(tx_queue, efx) { 1216
1252 tx_queue->flushed = false; 1217 /* Flush all tx queues in parallel */
1218 efx_for_each_tx_queue(tx_queue, efx)
1253 falcon_flush_tx_queue(tx_queue); 1219 falcon_flush_tx_queue(tx_queue);
1254 }
1255 efx_for_each_rx_queue(rx_queue, efx) {
1256 rx_queue->flushed = false;
1257 falcon_flush_rx_queue(rx_queue);
1258 }
1259 1220
1260 /* Poll the evq looking for flush completions. Since we're not pushing 1221 /* The hardware supports four concurrent rx flushes, each of which may
1261 * any more rx or tx descriptors at this point, we're in no danger of 1222 * need to be retried if there is an outstanding descriptor fetch */
1262 * overflowing the evq whilst we wait */
1263 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { 1223 for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) {
1264 msleep(FALCON_FLUSH_INTERVAL); 1224 rx_pending = tx_pending = 0;
1265 falcon_poll_flush_events(efx); 1225 efx_for_each_rx_queue(rx_queue, efx) {
1226 if (rx_queue->flushed == FLUSH_PENDING)
1227 ++rx_pending;
1228 }
1229 efx_for_each_rx_queue(rx_queue, efx) {
1230 if (rx_pending == FALCON_RX_FLUSH_COUNT)
1231 break;
1232 if (rx_queue->flushed == FLUSH_FAILED ||
1233 rx_queue->flushed == FLUSH_NONE) {
1234 falcon_flush_rx_queue(rx_queue);
1235 ++rx_pending;
1236 }
1237 }
1238 efx_for_each_tx_queue(tx_queue, efx) {
1239 if (tx_queue->flushed != FLUSH_DONE)
1240 ++tx_pending;
1241 }
1266 1242
1267 /* Check if every queue has been succesfully flushed */ 1243 if (rx_pending == 0 && tx_pending == 0)
1268 outstanding = false;
1269 efx_for_each_tx_queue(tx_queue, efx)
1270 outstanding |= !tx_queue->flushed;
1271 efx_for_each_rx_queue(rx_queue, efx)
1272 outstanding |= !rx_queue->flushed;
1273 if (!outstanding)
1274 return 0; 1244 return 0;
1245
1246 msleep(FALCON_FLUSH_INTERVAL);
1247 falcon_poll_flush_events(efx);
1275 } 1248 }
1276 1249
1277 /* Mark the queues as all flushed. We're going to return failure 1250 /* Mark the queues as all flushed. We're going to return failure
1278 * leading to a reset, or fake up success anyway. "flushed" now 1251 * leading to a reset, or fake up success anyway */
1279 * indicates that we tried to flush. */
1280 efx_for_each_tx_queue(tx_queue, efx) { 1252 efx_for_each_tx_queue(tx_queue, efx) {
1281 if (!tx_queue->flushed) 1253 if (tx_queue->flushed != FLUSH_DONE)
1282 EFX_ERR(efx, "tx queue %d flush command timed out\n", 1254 EFX_ERR(efx, "tx queue %d flush command timed out\n",
1283 tx_queue->queue); 1255 tx_queue->queue);
1284 tx_queue->flushed = true; 1256 tx_queue->flushed = FLUSH_DONE;
1285 } 1257 }
1286 efx_for_each_rx_queue(rx_queue, efx) { 1258 efx_for_each_rx_queue(rx_queue, efx) {
1287 if (!rx_queue->flushed) 1259 if (rx_queue->flushed != FLUSH_DONE)
1288 EFX_ERR(efx, "rx queue %d flush command timed out\n", 1260 EFX_ERR(efx, "rx queue %d flush command timed out\n",
1289 rx_queue->queue); 1261 rx_queue->queue);
1290 rx_queue->flushed = true; 1262 rx_queue->flushed = FLUSH_DONE;
1291 } 1263 }
1292 1264
1293 if (EFX_WORKAROUND_7803(efx)) 1265 if (EFX_WORKAROUND_7803(efx))
@@ -1311,9 +1283,9 @@ static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1311 efx_oword_t int_en_reg_ker; 1283 efx_oword_t int_en_reg_ker;
1312 1284
1313 EFX_POPULATE_OWORD_2(int_en_reg_ker, 1285 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1314 KER_INT_KER, force, 1286 FRF_AZ_KER_INT_KER, force,
1315 DRV_INT_EN_KER, enabled); 1287 FRF_AZ_DRV_INT_EN_KER, enabled);
1316 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER); 1288 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1317} 1289}
1318 1290
1319void falcon_enable_interrupts(struct efx_nic *efx) 1291void falcon_enable_interrupts(struct efx_nic *efx)
@@ -1326,9 +1298,10 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1326 1298
1327 /* Program address */ 1299 /* Program address */
1328 EFX_POPULATE_OWORD_2(int_adr_reg_ker, 1300 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1329 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), 1301 FRF_AZ_NORM_INT_VEC_DIS_KER,
1330 INT_ADR_KER, efx->irq_status.dma_addr); 1302 EFX_INT_MODE_USE_MSI(efx),
1331 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER); 1303 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1304 efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
1332 1305
1333 /* Enable interrupts */ 1306 /* Enable interrupts */
1334 falcon_interrupts(efx, 1, 0); 1307 falcon_interrupts(efx, 1, 0);
@@ -1368,9 +1341,9 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1368{ 1341{
1369 efx_dword_t reg; 1342 efx_dword_t reg;
1370 1343
1371 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e); 1344 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1372 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1); 1345 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
1373 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1); 1346 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1374} 1347}
1375 1348
1376/* Process a fatal interrupt 1349/* Process a fatal interrupt
@@ -1383,8 +1356,8 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1383 efx_oword_t fatal_intr; 1356 efx_oword_t fatal_intr;
1384 int error, mem_perr; 1357 int error, mem_perr;
1385 1358
1386 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER); 1359 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1387 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR); 1360 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1388 1361
1389 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " 1362 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1390 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1363 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
@@ -1394,10 +1367,10 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1394 goto out; 1367 goto out;
1395 1368
1396 /* If this is a memory parity error dump which blocks are offending */ 1369 /* If this is a memory parity error dump which blocks are offending */
1397 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER); 1370 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1398 if (mem_perr) { 1371 if (mem_perr) {
1399 efx_oword_t reg; 1372 efx_oword_t reg;
1400 falcon_read(efx, &reg, MEM_STAT_REG_KER); 1373 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1401 EFX_ERR(efx, "SYSTEM ERROR: memory parity error " 1374 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1402 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1375 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1403 } 1376 }
@@ -1409,13 +1382,13 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1409 falcon_disable_interrupts(efx); 1382 falcon_disable_interrupts(efx);
1410 1383
1411 /* Count errors and reset or disable the NIC accordingly */ 1384 /* Count errors and reset or disable the NIC accordingly */
1412 if (nic_data->int_error_count == 0 || 1385 if (efx->int_error_count == 0 ||
1413 time_after(jiffies, nic_data->int_error_expire)) { 1386 time_after(jiffies, efx->int_error_expire)) {
1414 nic_data->int_error_count = 0; 1387 efx->int_error_count = 0;
1415 nic_data->int_error_expire = 1388 efx->int_error_expire =
1416 jiffies + FALCON_INT_ERROR_EXPIRE * HZ; 1389 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1417 } 1390 }
1418 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) { 1391 if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
1419 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1392 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1420 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1393 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1421 } else { 1394 } else {
@@ -1441,11 +1414,11 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1441 int syserr; 1414 int syserr;
1442 1415
1443 /* Read the ISR which also ACKs the interrupts */ 1416 /* Read the ISR which also ACKs the interrupts */
1444 falcon_readl(efx, &reg, INT_ISR0_B0); 1417 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1445 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1418 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1446 1419
1447 /* Check to see if we have a serious error condition */ 1420 /* Check to see if we have a serious error condition */
1448 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 1421 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1449 if (unlikely(syserr)) 1422 if (unlikely(syserr))
1450 return falcon_fatal_interrupt(efx); 1423 return falcon_fatal_interrupt(efx);
1451 1424
@@ -1491,7 +1464,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1464 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1492 1465
1493 /* Check to see if we have a serious error condition */ 1466 /* Check to see if we have a serious error condition */
1494 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 1467 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1495 if (unlikely(syserr)) 1468 if (unlikely(syserr))
1496 return falcon_fatal_interrupt(efx); 1469 return falcon_fatal_interrupt(efx);
1497 1470
@@ -1555,15 +1528,15 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1555 unsigned long offset; 1528 unsigned long offset;
1556 efx_dword_t dword; 1529 efx_dword_t dword;
1557 1530
1558 if (falcon_rev(efx) < FALCON_REV_B0) 1531 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1559 return; 1532 return;
1560 1533
1561 for (offset = RX_RSS_INDIR_TBL_B0; 1534 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1562 offset < RX_RSS_INDIR_TBL_B0 + 0x800; 1535 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1563 offset += 0x10) { 1536 offset += 0x10) {
1564 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, 1537 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1565 i % efx->n_rx_queues); 1538 i % efx->n_rx_queues);
1566 falcon_writel(efx, &dword, offset); 1539 efx_writed(efx, &dword, offset);
1567 i++; 1540 i++;
1568 } 1541 }
1569} 1542}
@@ -1578,7 +1551,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
1578 1551
1579 if (!EFX_INT_MODE_USE_MSI(efx)) { 1552 if (!EFX_INT_MODE_USE_MSI(efx)) {
1580 irq_handler_t handler; 1553 irq_handler_t handler;
1581 if (falcon_rev(efx) >= FALCON_REV_B0) 1554 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1582 handler = falcon_legacy_interrupt_b0; 1555 handler = falcon_legacy_interrupt_b0;
1583 else 1556 else
1584 handler = falcon_legacy_interrupt_a1; 1557 handler = falcon_legacy_interrupt_a1;
@@ -1625,8 +1598,8 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1625 } 1598 }
1626 1599
1627 /* ACK legacy interrupt */ 1600 /* ACK legacy interrupt */
1628 if (falcon_rev(efx) >= FALCON_REV_B0) 1601 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1629 falcon_read(efx, &reg, INT_ISR0_B0); 1602 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1630 else 1603 else
1631 falcon_irq_ack_a1(efx); 1604 falcon_irq_ack_a1(efx);
1632 1605
@@ -1647,8 +1620,8 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1647static int falcon_spi_poll(struct efx_nic *efx) 1620static int falcon_spi_poll(struct efx_nic *efx)
1648{ 1621{
1649 efx_oword_t reg; 1622 efx_oword_t reg;
1650 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1623 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
1651 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; 1624 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1652} 1625}
1653 1626
1654/* Wait for SPI command completion */ 1627/* Wait for SPI command completion */
@@ -1700,27 +1673,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1700 1673
1701 /* Program address register, if we have an address */ 1674 /* Program address register, if we have an address */
1702 if (addressed) { 1675 if (addressed) {
1703 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); 1676 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1704 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER); 1677 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
1705 } 1678 }
1706 1679
1707 /* Program data register, if we have data */ 1680 /* Program data register, if we have data */
1708 if (in != NULL) { 1681 if (in != NULL) {
1709 memcpy(&reg, in, len); 1682 memcpy(&reg, in, len);
1710 falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER); 1683 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
1711 } 1684 }
1712 1685
1713 /* Issue read/write command */ 1686 /* Issue read/write command */
1714 EFX_POPULATE_OWORD_7(reg, 1687 EFX_POPULATE_OWORD_7(reg,
1715 EE_SPI_HCMD_CMD_EN, 1, 1688 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1716 EE_SPI_HCMD_SF_SEL, spi->device_id, 1689 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1717 EE_SPI_HCMD_DABCNT, len, 1690 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1718 EE_SPI_HCMD_READ, reading, 1691 FRF_AB_EE_SPI_HCMD_READ, reading,
1719 EE_SPI_HCMD_DUBCNT, 0, 1692 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1720 EE_SPI_HCMD_ADBCNT, 1693 FRF_AB_EE_SPI_HCMD_ADBCNT,
1721 (addressed ? spi->addr_len : 0), 1694 (addressed ? spi->addr_len : 0),
1722 EE_SPI_HCMD_ENC, command); 1695 FRF_AB_EE_SPI_HCMD_ENC, command);
1723 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER); 1696 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
1724 1697
1725 /* Wait for read/write to complete */ 1698 /* Wait for read/write to complete */
1726 rc = falcon_spi_wait(efx); 1699 rc = falcon_spi_wait(efx);
@@ -1729,7 +1702,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1729 1702
1730 /* Read data */ 1703 /* Read data */
1731 if (out != NULL) { 1704 if (out != NULL) {
1732 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER); 1705 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
1733 memcpy(out, &reg, len); 1706 memcpy(out, &reg, len);
1734 } 1707 }
1735 1708
@@ -1865,26 +1838,27 @@ static int falcon_reset_macs(struct efx_nic *efx)
1865 efx_oword_t reg; 1838 efx_oword_t reg;
1866 int count; 1839 int count;
1867 1840
1868 if (falcon_rev(efx) < FALCON_REV_B0) { 1841 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
1869 /* It's not safe to use GLB_CTL_REG to reset the 1842 /* It's not safe to use GLB_CTL_REG to reset the
1870 * macs, so instead use the internal MAC resets 1843 * macs, so instead use the internal MAC resets
1871 */ 1844 */
1872 if (!EFX_IS10G(efx)) { 1845 if (!EFX_IS10G(efx)) {
1873 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1); 1846 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1874 falcon_write(efx, &reg, GM_CFG1_REG); 1847 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1875 udelay(1000); 1848 udelay(1000);
1876 1849
1877 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0); 1850 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1878 falcon_write(efx, &reg, GM_CFG1_REG); 1851 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1879 udelay(1000); 1852 udelay(1000);
1880 return 0; 1853 return 0;
1881 } else { 1854 } else {
1882 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); 1855 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1883 falcon_write(efx, &reg, XM_GLB_CFG_REG); 1856 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1884 1857
1885 for (count = 0; count < 10000; count++) { 1858 for (count = 0; count < 10000; count++) {
1886 falcon_read(efx, &reg, XM_GLB_CFG_REG); 1859 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1887 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) 1860 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1861 0)
1888 return 0; 1862 return 0;
1889 udelay(10); 1863 udelay(10);
1890 } 1864 }
@@ -1896,24 +1870,24 @@ static int falcon_reset_macs(struct efx_nic *efx)
1896 1870
1897 /* MAC stats will fail whilst the TX fifo is draining. Serialise 1871 /* MAC stats will fail whilst the TX fifo is draining. Serialise
1898 * the drain sequence with the statistics fetch */ 1872 * the drain sequence with the statistics fetch */
1899 efx_stats_disable(efx); 1873 falcon_stop_nic_stats(efx);
1900 1874
1901 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1875 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1902 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); 1876 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1903 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 1877 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1904 1878
1905 falcon_read(efx, &reg, GLB_CTL_REG_KER); 1879 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1906 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1); 1880 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1907 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1); 1881 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1908 EFX_SET_OWORD_FIELD(reg, RST_EM, 1); 1882 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1909 falcon_write(efx, &reg, GLB_CTL_REG_KER); 1883 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1910 1884
1911 count = 0; 1885 count = 0;
1912 while (1) { 1886 while (1) {
1913 falcon_read(efx, &reg, GLB_CTL_REG_KER); 1887 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1914 if (!EFX_OWORD_FIELD(reg, RST_XGTX) && 1888 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1915 !EFX_OWORD_FIELD(reg, RST_XGRX) && 1889 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1916 !EFX_OWORD_FIELD(reg, RST_EM)) { 1890 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1917 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 1891 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1918 count); 1892 count);
1919 break; 1893 break;
@@ -1926,13 +1900,13 @@ static int falcon_reset_macs(struct efx_nic *efx)
1926 udelay(10); 1900 udelay(10);
1927 } 1901 }
1928 1902
1929 efx_stats_enable(efx);
1930
1931 /* If we've reset the EM block and the link is up, then 1903 /* If we've reset the EM block and the link is up, then
1932 * we'll have to kick the XAUI link so the PHY can recover */ 1904 * we'll have to kick the XAUI link so the PHY can recover */
1933 if (efx->link_up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx)) 1905 if (efx->link_state.up && EFX_IS10G(efx) && EFX_WORKAROUND_5147(efx))
1934 falcon_reset_xaui(efx); 1906 falcon_reset_xaui(efx);
1935 1907
1908 falcon_start_nic_stats(efx);
1909
1936 return 0; 1910 return 0;
1937} 1911}
1938 1912
@@ -1940,13 +1914,13 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1940{ 1914{
1941 efx_oword_t reg; 1915 efx_oword_t reg;
1942 1916
1943 if ((falcon_rev(efx) < FALCON_REV_B0) || 1917 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
1944 (efx->loopback_mode != LOOPBACK_NONE)) 1918 (efx->loopback_mode != LOOPBACK_NONE))
1945 return; 1919 return;
1946 1920
1947 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1921 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1948 /* There is no point in draining more than once */ 1922 /* There is no point in draining more than once */
1949 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0)) 1923 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1950 return; 1924 return;
1951 1925
1952 falcon_reset_macs(efx); 1926 falcon_reset_macs(efx);
@@ -1956,25 +1930,26 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1956{ 1930{
1957 efx_oword_t reg; 1931 efx_oword_t reg;
1958 1932
1959 if (falcon_rev(efx) < FALCON_REV_B0) 1933 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1960 return; 1934 return;
1961 1935
1962 /* Isolate the MAC -> RX */ 1936 /* Isolate the MAC -> RX */
1963 falcon_read(efx, &reg, RX_CFG_REG_KER); 1937 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1964 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0); 1938 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1965 falcon_write(efx, &reg, RX_CFG_REG_KER); 1939 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1966 1940
1967 if (!efx->link_up) 1941 if (!efx->link_state.up)
1968 falcon_drain_tx_fifo(efx); 1942 falcon_drain_tx_fifo(efx);
1969} 1943}
1970 1944
1971void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) 1945void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1972{ 1946{
1947 struct efx_link_state *link_state = &efx->link_state;
1973 efx_oword_t reg; 1948 efx_oword_t reg;
1974 int link_speed; 1949 int link_speed;
1975 bool tx_fc; 1950 bool tx_fc;
1976 1951
1977 switch (efx->link_speed) { 1952 switch (link_state->speed) {
1978 case 10000: link_speed = 3; break; 1953 case 10000: link_speed = 3; break;
1979 case 1000: link_speed = 2; break; 1954 case 1000: link_speed = 2; break;
1980 case 100: link_speed = 1; break; 1955 case 100: link_speed = 1; break;
@@ -1985,75 +1960,108 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1985 * indefinitely held and TX queue can be flushed at any point 1960 * indefinitely held and TX queue can be flushed at any point
1986 * while the link is down. */ 1961 * while the link is down. */
1987 EFX_POPULATE_OWORD_5(reg, 1962 EFX_POPULATE_OWORD_5(reg,
1988 MAC_XOFF_VAL, 0xffff /* max pause time */, 1963 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1989 MAC_BCAD_ACPT, 1, 1964 FRF_AB_MAC_BCAD_ACPT, 1,
1990 MAC_UC_PROM, efx->promiscuous, 1965 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1991 MAC_LINK_STATUS, 1, /* always set */ 1966 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1992 MAC_SPEED, link_speed); 1967 FRF_AB_MAC_SPEED, link_speed);
1993 /* On B0, MAC backpressure can be disabled and packets get 1968 /* On B0, MAC backpressure can be disabled and packets get
1994 * discarded. */ 1969 * discarded. */
1995 if (falcon_rev(efx) >= FALCON_REV_B0) { 1970 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1996 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1971 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1997 !efx->link_up); 1972 !link_state->up);
1998 } 1973 }
1999 1974
2000 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 1975 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
2001 1976
2002 /* Restore the multicast hash registers. */ 1977 /* Restore the multicast hash registers. */
2003 falcon_set_multicast_hash(efx); 1978 falcon_push_multicast_hash(efx);
2004 1979
2005 /* Transmission of pause frames when RX crosses the threshold is 1980 /* Transmission of pause frames when RX crosses the threshold is
2006 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1981 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2007 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1982 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2008 tx_fc = !!(efx->link_fc & EFX_FC_TX); 1983 tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
2009 falcon_read(efx, &reg, RX_CFG_REG_KER); 1984 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2010 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1985 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
2011 1986
2012 /* Unisolate the MAC -> RX */ 1987 /* Unisolate the MAC -> RX */
2013 if (falcon_rev(efx) >= FALCON_REV_B0) 1988 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
2014 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1989 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2015 falcon_write(efx, &reg, RX_CFG_REG_KER); 1990 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2016} 1991}
2017 1992
2018int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) 1993static void falcon_stats_request(struct efx_nic *efx)
2019{ 1994{
1995 struct falcon_nic_data *nic_data = efx->nic_data;
2020 efx_oword_t reg; 1996 efx_oword_t reg;
2021 u32 *dma_done;
2022 int i;
2023 1997
2024 if (disable_dma_stats) 1998 WARN_ON(nic_data->stats_pending);
2025 return 0; 1999 WARN_ON(nic_data->stats_disable_count);
2026 2000
2027 /* Statistics fetch will fail if the MAC is in TX drain */ 2001 if (nic_data->stats_dma_done == NULL)
2028 if (falcon_rev(efx) >= FALCON_REV_B0) { 2002 return; /* no mac selected */
2029 efx_oword_t temp;
2030 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
2031 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
2032 return 0;
2033 }
2034 2003
2035 dma_done = (efx->stats_buffer.addr + done_offset); 2004 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
2036 *dma_done = FALCON_STATS_NOT_DONE; 2005 nic_data->stats_pending = true;
2037 wmb(); /* ensure done flag is clear */ 2006 wmb(); /* ensure done flag is clear */
2038 2007
2039 /* Initiate DMA transfer of stats */ 2008 /* Initiate DMA transfer of stats */
2040 EFX_POPULATE_OWORD_2(reg, 2009 EFX_POPULATE_OWORD_2(reg,
2041 MAC_STAT_DMA_CMD, 1, 2010 FRF_AB_MAC_STAT_DMA_CMD, 1,
2042 MAC_STAT_DMA_ADR, 2011 FRF_AB_MAC_STAT_DMA_ADR,
2043 efx->stats_buffer.dma_addr); 2012 efx->stats_buffer.dma_addr);
2044 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER); 2013 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
2045 2014
2046 /* Wait for transfer to complete */ 2015 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
2047 for (i = 0; i < 400; i++) { 2016}
2048 if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) { 2017
2049 rmb(); /* Ensure the stats are valid. */ 2018static void falcon_stats_complete(struct efx_nic *efx)
2050 return 0; 2019{
2051 } 2020 struct falcon_nic_data *nic_data = efx->nic_data;
2052 udelay(10); 2021
2022 if (!nic_data->stats_pending)
2023 return;
2024
2025 nic_data->stats_pending = 0;
2026 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
2027 rmb(); /* read the done flag before the stats */
2028 efx->mac_op->update_stats(efx);
2029 } else {
2030 EFX_ERR(efx, "timed out waiting for statistics\n");
2053 } 2031 }
2032}
2054 2033
2055 EFX_ERR(efx, "timed out waiting for statistics\n"); 2034static void falcon_stats_timer_func(unsigned long context)
2056 return -ETIMEDOUT; 2035{
2036 struct efx_nic *efx = (struct efx_nic *)context;
2037 struct falcon_nic_data *nic_data = efx->nic_data;
2038
2039 spin_lock(&efx->stats_lock);
2040
2041 falcon_stats_complete(efx);
2042 if (nic_data->stats_disable_count == 0)
2043 falcon_stats_request(efx);
2044
2045 spin_unlock(&efx->stats_lock);
2046}
2047
2048static bool falcon_loopback_link_poll(struct efx_nic *efx)
2049{
2050 struct efx_link_state old_state = efx->link_state;
2051
2052 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2053 WARN_ON(!LOOPBACK_INTERNAL(efx));
2054
2055 efx->link_state.fd = true;
2056 efx->link_state.fc = efx->wanted_fc;
2057 efx->link_state.up = true;
2058
2059 if (efx->loopback_mode == LOOPBACK_GMAC)
2060 efx->link_state.speed = 1000;
2061 else
2062 efx->link_state.speed = 10000;
2063
2064 return !efx_link_state_equal(&efx->link_state, &old_state);
2057} 2065}
2058 2066
2059/************************************************************************** 2067/**************************************************************************
@@ -2066,18 +2074,18 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2066/* Wait for GMII access to complete */ 2074/* Wait for GMII access to complete */
2067static int falcon_gmii_wait(struct efx_nic *efx) 2075static int falcon_gmii_wait(struct efx_nic *efx)
2068{ 2076{
2069 efx_dword_t md_stat; 2077 efx_oword_t md_stat;
2070 int count; 2078 int count;
2071 2079
2072 /* wait upto 50ms - taken max from datasheet */ 2080 /* wait upto 50ms - taken max from datasheet */
2073 for (count = 0; count < 5000; count++) { 2081 for (count = 0; count < 5000; count++) {
2074 falcon_readl(efx, &md_stat, MD_STAT_REG_KER); 2082 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
2075 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { 2083 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2076 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || 2084 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2077 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { 2085 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2078 EFX_ERR(efx, "error from GMII access " 2086 EFX_ERR(efx, "error from GMII access "
2079 EFX_DWORD_FMT"\n", 2087 EFX_OWORD_FMT"\n",
2080 EFX_DWORD_VAL(md_stat)); 2088 EFX_OWORD_VAL(md_stat));
2081 return -EIO; 2089 return -EIO;
2082 } 2090 }
2083 return 0; 2091 return 0;
@@ -2099,7 +2107,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
2099 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n", 2107 EFX_REGDUMP(efx, "writing MDIO %d register %d.%d with 0x%04x\n",
2100 prtad, devad, addr, value); 2108 prtad, devad, addr, value);
2101 2109
2102 spin_lock_bh(&efx->phy_lock); 2110 mutex_lock(&efx->mdio_lock);
2103 2111
2104 /* Check MDIO not currently being accessed */ 2112 /* Check MDIO not currently being accessed */
2105 rc = falcon_gmii_wait(efx); 2113 rc = falcon_gmii_wait(efx);
@@ -2107,34 +2115,35 @@ static int falcon_mdio_write(struct net_device *net_dev,
2107 goto out; 2115 goto out;
2108 2116
2109 /* Write the address/ID register */ 2117 /* Write the address/ID register */
2110 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 2118 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2111 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 2119 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2112 2120
2113 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 2121 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2114 falcon_write(efx, &reg, MD_ID_REG_KER); 2122 FRF_AB_MD_DEV_ADR, devad);
2123 efx_writeo(efx, &reg, FR_AB_MD_ID);
2115 2124
2116 /* Write data */ 2125 /* Write data */
2117 EFX_POPULATE_OWORD_1(reg, MD_TXD, value); 2126 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2118 falcon_write(efx, &reg, MD_TXD_REG_KER); 2127 efx_writeo(efx, &reg, FR_AB_MD_TXD);
2119 2128
2120 EFX_POPULATE_OWORD_2(reg, 2129 EFX_POPULATE_OWORD_2(reg,
2121 MD_WRC, 1, 2130 FRF_AB_MD_WRC, 1,
2122 MD_GC, 0); 2131 FRF_AB_MD_GC, 0);
2123 falcon_write(efx, &reg, MD_CS_REG_KER); 2132 efx_writeo(efx, &reg, FR_AB_MD_CS);
2124 2133
2125 /* Wait for data to be written */ 2134 /* Wait for data to be written */
2126 rc = falcon_gmii_wait(efx); 2135 rc = falcon_gmii_wait(efx);
2127 if (rc) { 2136 if (rc) {
2128 /* Abort the write operation */ 2137 /* Abort the write operation */
2129 EFX_POPULATE_OWORD_2(reg, 2138 EFX_POPULATE_OWORD_2(reg,
2130 MD_WRC, 0, 2139 FRF_AB_MD_WRC, 0,
2131 MD_GC, 1); 2140 FRF_AB_MD_GC, 1);
2132 falcon_write(efx, &reg, MD_CS_REG_KER); 2141 efx_writeo(efx, &reg, FR_AB_MD_CS);
2133 udelay(10); 2142 udelay(10);
2134 } 2143 }
2135 2144
2136 out: 2145out:
2137 spin_unlock_bh(&efx->phy_lock); 2146 mutex_unlock(&efx->mdio_lock);
2138 return rc; 2147 return rc;
2139} 2148}
2140 2149
@@ -2146,124 +2155,99 @@ static int falcon_mdio_read(struct net_device *net_dev,
2146 efx_oword_t reg; 2155 efx_oword_t reg;
2147 int rc; 2156 int rc;
2148 2157
2149 spin_lock_bh(&efx->phy_lock); 2158 mutex_lock(&efx->mdio_lock);
2150 2159
2151 /* Check MDIO not currently being accessed */ 2160 /* Check MDIO not currently being accessed */
2152 rc = falcon_gmii_wait(efx); 2161 rc = falcon_gmii_wait(efx);
2153 if (rc) 2162 if (rc)
2154 goto out; 2163 goto out;
2155 2164
2156 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 2165 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2157 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 2166 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2158 2167
2159 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 2168 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2160 falcon_write(efx, &reg, MD_ID_REG_KER); 2169 FRF_AB_MD_DEV_ADR, devad);
2170 efx_writeo(efx, &reg, FR_AB_MD_ID);
2161 2171
2162 /* Request data to be read */ 2172 /* Request data to be read */
2163 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); 2173 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2164 falcon_write(efx, &reg, MD_CS_REG_KER); 2174 efx_writeo(efx, &reg, FR_AB_MD_CS);
2165 2175
2166 /* Wait for data to become available */ 2176 /* Wait for data to become available */
2167 rc = falcon_gmii_wait(efx); 2177 rc = falcon_gmii_wait(efx);
2168 if (rc == 0) { 2178 if (rc == 0) {
2169 falcon_read(efx, &reg, MD_RXD_REG_KER); 2179 efx_reado(efx, &reg, FR_AB_MD_RXD);
2170 rc = EFX_OWORD_FIELD(reg, MD_RXD); 2180 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2171 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", 2181 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2172 prtad, devad, addr, rc); 2182 prtad, devad, addr, rc);
2173 } else { 2183 } else {
2174 /* Abort the read operation */ 2184 /* Abort the read operation */
2175 EFX_POPULATE_OWORD_2(reg, 2185 EFX_POPULATE_OWORD_2(reg,
2176 MD_RIC, 0, 2186 FRF_AB_MD_RIC, 0,
2177 MD_GC, 1); 2187 FRF_AB_MD_GC, 1);
2178 falcon_write(efx, &reg, MD_CS_REG_KER); 2188 efx_writeo(efx, &reg, FR_AB_MD_CS);
2179 2189
2180 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", 2190 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2181 prtad, devad, addr, rc); 2191 prtad, devad, addr, rc);
2182 } 2192 }
2183 2193
2184 out: 2194out:
2185 spin_unlock_bh(&efx->phy_lock); 2195 mutex_unlock(&efx->mdio_lock);
2186 return rc; 2196 return rc;
2187} 2197}
2188 2198
2189static int falcon_probe_phy(struct efx_nic *efx) 2199static void falcon_clock_mac(struct efx_nic *efx)
2190{ 2200{
2191 switch (efx->phy_type) { 2201 unsigned strap_val;
2192 case PHY_TYPE_SFX7101: 2202 efx_oword_t nic_stat;
2193 efx->phy_op = &falcon_sfx7101_phy_ops;
2194 break;
2195 case PHY_TYPE_SFT9001A:
2196 case PHY_TYPE_SFT9001B:
2197 efx->phy_op = &falcon_sft9001_phy_ops;
2198 break;
2199 case PHY_TYPE_QT2022C2:
2200 case PHY_TYPE_QT2025C:
2201 efx->phy_op = &falcon_xfp_phy_ops;
2202 break;
2203 default:
2204 EFX_ERR(efx, "Unknown PHY type %d\n",
2205 efx->phy_type);
2206 return -1;
2207 }
2208
2209 if (efx->phy_op->macs & EFX_XMAC)
2210 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2211 (1 << LOOPBACK_XGXS) |
2212 (1 << LOOPBACK_XAUI));
2213 if (efx->phy_op->macs & EFX_GMAC)
2214 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2215 efx->loopback_modes |= efx->phy_op->loopbacks;
2216 2203
2217 return 0; 2204 /* Configure the NIC generated MAC clock correctly */
2205 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2206 strap_val = EFX_IS10G(efx) ? 5 : 3;
2207 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2208 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
2209 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2210 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2211 } else {
2212 /* Falcon A1 does not support 1G/10G speed switching
2213 * and must not be used with a PHY that does. */
2214 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
2215 strap_val);
2216 }
2218} 2217}
2219 2218
2220int falcon_switch_mac(struct efx_nic *efx) 2219int falcon_switch_mac(struct efx_nic *efx)
2221{ 2220{
2222 struct efx_mac_operations *old_mac_op = efx->mac_op; 2221 struct efx_mac_operations *old_mac_op = efx->mac_op;
2223 efx_oword_t nic_stat; 2222 struct falcon_nic_data *nic_data = efx->nic_data;
2224 unsigned strap_val; 2223 unsigned int stats_done_offset;
2225 int rc = 0; 2224 int rc = 0;
2226 2225
2227 /* Don't try to fetch MAC stats while we're switching MACs */ 2226 /* Don't try to fetch MAC stats while we're switching MACs */
2228 efx_stats_disable(efx); 2227 falcon_stop_nic_stats(efx);
2229
2230 /* Internal loopbacks override the phy speed setting */
2231 if (efx->loopback_mode == LOOPBACK_GMAC) {
2232 efx->link_speed = 1000;
2233 efx->link_fd = true;
2234 } else if (LOOPBACK_INTERNAL(efx)) {
2235 efx->link_speed = 10000;
2236 efx->link_fd = true;
2237 }
2238 2228
2239 WARN_ON(!mutex_is_locked(&efx->mac_lock)); 2229 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2240 efx->mac_op = (EFX_IS10G(efx) ? 2230 efx->mac_op = (EFX_IS10G(efx) ?
2241 &falcon_xmac_operations : &falcon_gmac_operations); 2231 &falcon_xmac_operations : &falcon_gmac_operations);
2242 2232
2243 /* Always push the NIC_STAT_REG setting even if the mac hasn't 2233 if (EFX_IS10G(efx))
2244 * changed, because this function is run post online reset */ 2234 stats_done_offset = XgDmaDone_offset;
2245 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2235 else
2246 strap_val = EFX_IS10G(efx) ? 5 : 3; 2236 stats_done_offset = GDmaDone_offset;
2247 if (falcon_rev(efx) >= FALCON_REV_B0) { 2237 nic_data->stats_dma_done = efx->stats_buffer.addr + stats_done_offset;
2248 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1);
2249 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val);
2250 falcon_write(efx, &nic_stat, NIC_STAT_REG);
2251 } else {
2252 /* Falcon A1 does not support 1G/10G speed switching
2253 * and must not be used with a PHY that does. */
2254 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val);
2255 }
2256 2238
2257 if (old_mac_op == efx->mac_op) 2239 if (old_mac_op == efx->mac_op)
2258 goto out; 2240 goto out;
2259 2241
2242 falcon_clock_mac(efx);
2243
2260 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); 2244 EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G');
2261 /* Not all macs support a mac-level link state */ 2245 /* Not all macs support a mac-level link state */
2262 efx->mac_up = true; 2246 efx->xmac_poll_required = false;
2263 2247
2264 rc = falcon_reset_macs(efx); 2248 rc = falcon_reset_macs(efx);
2265out: 2249out:
2266 efx_stats_enable(efx); 2250 falcon_start_nic_stats(efx);
2267 return rc; 2251 return rc;
2268} 2252}
2269 2253
@@ -2272,10 +2256,31 @@ int falcon_probe_port(struct efx_nic *efx)
2272{ 2256{
2273 int rc; 2257 int rc;
2274 2258
2275 /* Hook in PHY operations table */ 2259 switch (efx->phy_type) {
2276 rc = falcon_probe_phy(efx); 2260 case PHY_TYPE_SFX7101:
2277 if (rc) 2261 efx->phy_op = &falcon_sfx7101_phy_ops;
2278 return rc; 2262 break;
2263 case PHY_TYPE_SFT9001A:
2264 case PHY_TYPE_SFT9001B:
2265 efx->phy_op = &falcon_sft9001_phy_ops;
2266 break;
2267 case PHY_TYPE_QT2022C2:
2268 case PHY_TYPE_QT2025C:
2269 efx->phy_op = &falcon_qt202x_phy_ops;
2270 break;
2271 default:
2272 EFX_ERR(efx, "Unknown PHY type %d\n",
2273 efx->phy_type);
2274 return -ENODEV;
2275 }
2276
2277 if (efx->phy_op->macs & EFX_XMAC)
2278 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2279 (1 << LOOPBACK_XGXS) |
2280 (1 << LOOPBACK_XAUI));
2281 if (efx->phy_op->macs & EFX_GMAC)
2282 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2283 efx->loopback_modes |= efx->phy_op->loopbacks;
2279 2284
2280 /* Set up MDIO structure for PHY */ 2285 /* Set up MDIO structure for PHY */
2281 efx->mdio.mmds = efx->phy_op->mmds; 2286 efx->mdio.mmds = efx->phy_op->mmds;
@@ -2283,8 +2288,12 @@ int falcon_probe_port(struct efx_nic *efx)
2283 efx->mdio.mdio_read = falcon_mdio_read; 2288 efx->mdio.mdio_read = falcon_mdio_read;
2284 efx->mdio.mdio_write = falcon_mdio_write; 2289 efx->mdio.mdio_write = falcon_mdio_write;
2285 2290
2291 /* Initial assumption */
2292 efx->link_state.speed = 10000;
2293 efx->link_state.fd = true;
2294
2286 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2295 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
2287 if (falcon_rev(efx) >= FALCON_REV_B0) 2296 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
2288 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; 2297 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
2289 else 2298 else
2290 efx->wanted_fc = EFX_FC_RX; 2299 efx->wanted_fc = EFX_FC_RX;
@@ -2314,18 +2323,14 @@ void falcon_remove_port(struct efx_nic *efx)
2314 ************************************************************************** 2323 **************************************************************************
2315 */ 2324 */
2316 2325
2317void falcon_set_multicast_hash(struct efx_nic *efx) 2326void falcon_push_multicast_hash(struct efx_nic *efx)
2318{ 2327{
2319 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 2328 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
2320 2329
2321 /* Broadcast packets go through the multicast hash filter. 2330 WARN_ON(!mutex_is_locked(&efx->mac_lock));
2322 * ether_crc_le() of the broadcast address is 0xbe2612ff
2323 * so we always add bit 0xff to the mask.
2324 */
2325 set_bit_le(0xff, mc_hash->byte);
2326 2331
2327 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER); 2332 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
2328 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); 2333 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2329} 2334}
2330 2335
2331 2336
@@ -2351,7 +2356,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2351 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 2356 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2352 if (!region) 2357 if (!region)
2353 return -ENOMEM; 2358 return -ENOMEM;
2354 nvconfig = region + NVCONFIG_OFFSET; 2359 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2355 2360
2356 mutex_lock(&efx->spi_lock); 2361 mutex_lock(&efx->spi_lock);
2357 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); 2362 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
@@ -2367,7 +2372,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2367 struct_ver = le16_to_cpu(nvconfig->board_struct_ver); 2372 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2368 2373
2369 rc = -EINVAL; 2374 rc = -EINVAL;
2370 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { 2375 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2371 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); 2376 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2372 goto out; 2377 goto out;
2373 } 2378 }
@@ -2403,41 +2408,41 @@ static struct {
2403 unsigned address; 2408 unsigned address;
2404 efx_oword_t mask; 2409 efx_oword_t mask;
2405} efx_test_registers[] = { 2410} efx_test_registers[] = {
2406 { ADR_REGION_REG_KER, 2411 { FR_AZ_ADR_REGION,
2407 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 2412 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2408 { RX_CFG_REG_KER, 2413 { FR_AZ_RX_CFG,
2409 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 2414 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2410 { TX_CFG_REG_KER, 2415 { FR_AZ_TX_CFG,
2411 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, 2416 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2412 { TX_CFG2_REG_KER, 2417 { FR_AZ_TX_RESERVED,
2413 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, 2418 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2414 { MAC0_CTRL_REG_KER, 2419 { FR_AB_MAC_CTRL,
2415 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, 2420 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2416 { SRM_TX_DC_CFG_REG_KER, 2421 { FR_AZ_SRM_TX_DC_CFG,
2417 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, 2422 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2418 { RX_DC_CFG_REG_KER, 2423 { FR_AZ_RX_DC_CFG,
2419 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, 2424 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2420 { RX_DC_PF_WM_REG_KER, 2425 { FR_AZ_RX_DC_PF_WM,
2421 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 2426 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2422 { DP_CTRL_REG, 2427 { FR_BZ_DP_CTRL,
2423 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 2428 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2424 { GM_CFG2_REG, 2429 { FR_AB_GM_CFG2,
2425 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, 2430 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2426 { GMF_CFG0_REG, 2431 { FR_AB_GMF_CFG0,
2427 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, 2432 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2428 { XM_GLB_CFG_REG, 2433 { FR_AB_XM_GLB_CFG,
2429 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 2434 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2430 { XM_TX_CFG_REG, 2435 { FR_AB_XM_TX_CFG,
2431 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, 2436 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2432 { XM_RX_CFG_REG, 2437 { FR_AB_XM_RX_CFG,
2433 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, 2438 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2434 { XM_RX_PARAM_REG, 2439 { FR_AB_XM_RX_PARAM,
2435 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, 2440 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2436 { XM_FC_REG, 2441 { FR_AB_XM_FC,
2437 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, 2442 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2438 { XM_ADR_LO_REG, 2443 { FR_AB_XM_ADR_LO,
2439 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, 2444 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2440 { XX_SD_CTL_REG, 2445 { FR_AB_XX_SD_CTL,
2441 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, 2446 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2442}; 2447};
2443 2448
@@ -2461,7 +2466,7 @@ int falcon_test_registers(struct efx_nic *efx)
2461 mask = imask = efx_test_registers[i].mask; 2466 mask = imask = efx_test_registers[i].mask;
2462 EFX_INVERT_OWORD(imask); 2467 EFX_INVERT_OWORD(imask);
2463 2468
2464 falcon_read(efx, &original, address); 2469 efx_reado(efx, &original, address);
2465 2470
2466 /* bit sweep on and off */ 2471 /* bit sweep on and off */
2467 for (j = 0; j < 128; j++) { 2472 for (j = 0; j < 128; j++) {
@@ -2472,8 +2477,8 @@ int falcon_test_registers(struct efx_nic *efx)
2472 EFX_AND_OWORD(reg, original, mask); 2477 EFX_AND_OWORD(reg, original, mask);
2473 EFX_SET_OWORD32(reg, j, j, 1); 2478 EFX_SET_OWORD32(reg, j, j, 1);
2474 2479
2475 falcon_write(efx, &reg, address); 2480 efx_writeo(efx, &reg, address);
2476 falcon_read(efx, &buf, address); 2481 efx_reado(efx, &buf, address);
2477 2482
2478 if (efx_masked_compare_oword(&reg, &buf, &mask)) 2483 if (efx_masked_compare_oword(&reg, &buf, &mask))
2479 goto fail; 2484 goto fail;
@@ -2482,14 +2487,14 @@ int falcon_test_registers(struct efx_nic *efx)
2482 EFX_OR_OWORD(reg, original, mask); 2487 EFX_OR_OWORD(reg, original, mask);
2483 EFX_SET_OWORD32(reg, j, j, 0); 2488 EFX_SET_OWORD32(reg, j, j, 0);
2484 2489
2485 falcon_write(efx, &reg, address); 2490 efx_writeo(efx, &reg, address);
2486 falcon_read(efx, &buf, address); 2491 efx_reado(efx, &buf, address);
2487 2492
2488 if (efx_masked_compare_oword(&reg, &buf, &mask)) 2493 if (efx_masked_compare_oword(&reg, &buf, &mask))
2489 goto fail; 2494 goto fail;
2490 } 2495 }
2491 2496
2492 falcon_write(efx, &original, address); 2497 efx_writeo(efx, &original, address);
2493 } 2498 }
2494 2499
2495 return 0; 2500 return 0;
@@ -2516,7 +2521,7 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2516 efx_oword_t glb_ctl_reg_ker; 2521 efx_oword_t glb_ctl_reg_ker;
2517 int rc; 2522 int rc;
2518 2523
2519 EFX_LOG(efx, "performing hardware reset (%d)\n", method); 2524 EFX_LOG(efx, "performing %s hardware reset\n", RESET_TYPE(method));
2520 2525
2521 /* Initiate device reset */ 2526 /* Initiate device reset */
2522 if (method == RESET_TYPE_WORLD) { 2527 if (method == RESET_TYPE_WORLD) {
@@ -2537,22 +2542,24 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2537 } 2542 }
2538 2543
2539 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, 2544 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2540 EXT_PHY_RST_DUR, 0x7, 2545 FRF_AB_EXT_PHY_RST_DUR,
2541 SWRST, 1); 2546 FFE_AB_EXT_PHY_RST_DUR_10240US,
2547 FRF_AB_SWRST, 1);
2542 } else { 2548 } else {
2543 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2544 EXCLUDE_FROM_RESET : 0);
2545
2546 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, 2549 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2547 EXT_PHY_RST_CTL, reset_phy, 2550 /* exclude PHY from "invisible" reset */
2548 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, 2551 FRF_AB_EXT_PHY_RST_CTL,
2549 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, 2552 method == RESET_TYPE_INVISIBLE,
2550 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, 2553 /* exclude EEPROM/flash and PCIe */
2551 EE_RST_CTL, EXCLUDE_FROM_RESET, 2554 FRF_AB_PCIE_CORE_RST_CTL, 1,
2552 EXT_PHY_RST_DUR, 0x7 /* 10ms */, 2555 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2553 SWRST, 1); 2556 FRF_AB_PCIE_SD_RST_CTL, 1,
2554 } 2557 FRF_AB_EE_RST_CTL, 1,
2555 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 2558 FRF_AB_EXT_PHY_RST_DUR,
2559 FFE_AB_EXT_PHY_RST_DUR_10240US,
2560 FRF_AB_SWRST, 1);
2561 }
2562 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2556 2563
2557 EFX_LOG(efx, "waiting for hardware reset\n"); 2564 EFX_LOG(efx, "waiting for hardware reset\n");
2558 schedule_timeout_uninterruptible(HZ / 20); 2565 schedule_timeout_uninterruptible(HZ / 20);
@@ -2577,8 +2584,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2577 } 2584 }
2578 2585
2579 /* Assert that reset complete */ 2586 /* Assert that reset complete */
2580 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 2587 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2581 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { 2588 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2582 rc = -ETIMEDOUT; 2589 rc = -ETIMEDOUT;
2583 EFX_ERR(efx, "timed out waiting for hardware reset\n"); 2590 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2584 goto fail5; 2591 goto fail5;
@@ -2597,6 +2604,42 @@ fail5:
2597 return rc; 2604 return rc;
2598} 2605}
2599 2606
2607void falcon_monitor(struct efx_nic *efx)
2608{
2609 bool link_changed;
2610 int rc;
2611
2612 BUG_ON(!mutex_is_locked(&efx->mac_lock));
2613
2614 rc = falcon_board(efx)->type->monitor(efx);
2615 if (rc) {
2616 EFX_ERR(efx, "Board sensor %s; shutting down PHY\n",
2617 (rc == -ERANGE) ? "reported fault" : "failed");
2618 efx->phy_mode |= PHY_MODE_LOW_POWER;
2619 __efx_reconfigure_port(efx);
2620 }
2621
2622 if (LOOPBACK_INTERNAL(efx))
2623 link_changed = falcon_loopback_link_poll(efx);
2624 else
2625 link_changed = efx->phy_op->poll(efx);
2626
2627 if (link_changed) {
2628 falcon_stop_nic_stats(efx);
2629 falcon_deconfigure_mac_wrapper(efx);
2630
2631 falcon_switch_mac(efx);
2632 efx->mac_op->reconfigure(efx);
2633
2634 falcon_start_nic_stats(efx);
2635
2636 efx_link_status_changed(efx);
2637 }
2638
2639 if (EFX_IS10G(efx))
2640 falcon_poll_xmac(efx);
2641}
2642
2600/* Zeroes out the SRAM contents. This routine must be called in 2643/* Zeroes out the SRAM contents. This routine must be called in
2601 * process context and is allowed to sleep. 2644 * process context and is allowed to sleep.
2602 */ 2645 */
@@ -2606,16 +2649,16 @@ static int falcon_reset_sram(struct efx_nic *efx)
2606 int count; 2649 int count;
2607 2650
2608 /* Set the SRAM wake/sleep GPIO appropriately. */ 2651 /* Set the SRAM wake/sleep GPIO appropriately. */
2609 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 2652 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2610 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); 2653 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2611 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); 2654 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2612 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 2655 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2613 2656
2614 /* Initiate SRAM reset */ 2657 /* Initiate SRAM reset */
2615 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, 2658 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2616 SRAM_OOB_BT_INIT_EN, 1, 2659 FRF_AZ_SRM_INIT_EN, 1,
2617 SRM_NUM_BANKS_AND_BANK_SIZE, 0); 2660 FRF_AZ_SRM_NB_SZ, 0);
2618 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 2661 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2619 2662
2620 /* Wait for SRAM reset to complete */ 2663 /* Wait for SRAM reset to complete */
2621 count = 0; 2664 count = 0;
@@ -2626,8 +2669,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
2626 schedule_timeout_uninterruptible(HZ / 50); 2669 schedule_timeout_uninterruptible(HZ / 50);
2627 2670
2628 /* Check for reset complete */ 2671 /* Check for reset complete */
2629 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 2672 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2630 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { 2673 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2631 EFX_LOG(efx, "SRAM reset complete\n"); 2674 EFX_LOG(efx, "SRAM reset complete\n");
2632 2675
2633 return 0; 2676 return 0;
@@ -2712,16 +2755,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2712 board_rev = le16_to_cpu(v2->board_revision); 2755 board_rev = le16_to_cpu(v2->board_revision);
2713 2756
2714 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { 2757 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2715 __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; 2758 rc = falcon_spi_device_init(
2716 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; 2759 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2717 rc = falcon_spi_device_init(efx, &efx->spi_flash, 2760 le32_to_cpu(v3->spi_device_type
2718 EE_SPI_FLASH, 2761 [FFE_AB_SPI_DEVICE_FLASH]));
2719 le32_to_cpu(fl));
2720 if (rc) 2762 if (rc)
2721 goto fail2; 2763 goto fail2;
2722 rc = falcon_spi_device_init(efx, &efx->spi_eeprom, 2764 rc = falcon_spi_device_init(
2723 EE_SPI_EEPROM, 2765 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2724 le32_to_cpu(ee)); 2766 le32_to_cpu(v3->spi_device_type
2767 [FFE_AB_SPI_DEVICE_EEPROM]));
2725 if (rc) 2768 if (rc)
2726 goto fail2; 2769 goto fail2;
2727 } 2770 }
@@ -2732,7 +2775,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2732 2775
2733 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 2776 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2734 2777
2735 efx_set_board_info(efx, board_rev); 2778 falcon_probe_board(efx, board_rev);
2736 2779
2737 kfree(nvconfig); 2780 kfree(nvconfig);
2738 return 0; 2781 return 0;
@@ -2752,38 +2795,31 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2752 efx_oword_t altera_build; 2795 efx_oword_t altera_build;
2753 efx_oword_t nic_stat; 2796 efx_oword_t nic_stat;
2754 2797
2755 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); 2798 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2756 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { 2799 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2757 EFX_ERR(efx, "Falcon FPGA not supported\n"); 2800 EFX_ERR(efx, "Falcon FPGA not supported\n");
2758 return -ENODEV; 2801 return -ENODEV;
2759 } 2802 }
2760 2803
2761 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2804 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2762 2805
2763 switch (falcon_rev(efx)) { 2806 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2764 case FALCON_REV_A0: 2807 u8 pci_rev = efx->pci_dev->revision;
2765 case 0xff:
2766 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2767 return -ENODEV;
2768 2808
2769 case FALCON_REV_A1: 2809 if ((pci_rev == 0xff) || (pci_rev == 0)) {
2770 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { 2810 EFX_ERR(efx, "Falcon rev A0 not supported\n");
2811 return -ENODEV;
2812 }
2813 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2814 EFX_ERR(efx, "Falcon rev A1 1G not supported\n");
2815 return -ENODEV;
2816 }
2817 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2771 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); 2818 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2772 return -ENODEV; 2819 return -ENODEV;
2773 } 2820 }
2774 break;
2775
2776 case FALCON_REV_B0:
2777 break;
2778
2779 default:
2780 EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
2781 return -ENODEV;
2782 } 2821 }
2783 2822
2784 /* Initial assumed speed */
2785 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000;
2786
2787 return 0; 2823 return 0;
2788} 2824}
2789 2825
@@ -2793,40 +2829,43 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
2793 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 2829 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2794 int boot_dev; 2830 int boot_dev;
2795 2831
2796 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); 2832 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2797 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2833 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2798 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2834 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2799 2835
2800 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) { 2836 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2801 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ? 2837 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2802 EE_SPI_FLASH : EE_SPI_EEPROM); 2838 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2803 EFX_LOG(efx, "Booted from %s\n", 2839 EFX_LOG(efx, "Booted from %s\n",
2804 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM"); 2840 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2805 } else { 2841 } else {
2806 /* Disable VPD and set clock dividers to safe 2842 /* Disable VPD and set clock dividers to safe
2807 * values for initial programming. */ 2843 * values for initial programming. */
2808 boot_dev = -1; 2844 boot_dev = -1;
2809 EFX_LOG(efx, "Booted from internal ASIC settings;" 2845 EFX_LOG(efx, "Booted from internal ASIC settings;"
2810 " setting SPI config\n"); 2846 " setting SPI config\n");
2811 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, 2847 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2812 /* 125 MHz / 7 ~= 20 MHz */ 2848 /* 125 MHz / 7 ~= 20 MHz */
2813 EE_SF_CLOCK_DIV, 7, 2849 FRF_AB_EE_SF_CLOCK_DIV, 7,
2814 /* 125 MHz / 63 ~= 2 MHz */ 2850 /* 125 MHz / 63 ~= 2 MHz */
2815 EE_EE_CLOCK_DIV, 63); 2851 FRF_AB_EE_EE_CLOCK_DIV, 63);
2816 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2852 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2817 } 2853 }
2818 2854
2819 if (boot_dev == EE_SPI_FLASH) 2855 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2820 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH, 2856 falcon_spi_device_init(efx, &efx->spi_flash,
2857 FFE_AB_SPI_DEVICE_FLASH,
2821 default_flash_type); 2858 default_flash_type);
2822 if (boot_dev == EE_SPI_EEPROM) 2859 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2823 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM, 2860 falcon_spi_device_init(efx, &efx->spi_eeprom,
2861 FFE_AB_SPI_DEVICE_EEPROM,
2824 large_eeprom_type); 2862 large_eeprom_type);
2825} 2863}
2826 2864
2827int falcon_probe_nic(struct efx_nic *efx) 2865int falcon_probe_nic(struct efx_nic *efx)
2828{ 2866{
2829 struct falcon_nic_data *nic_data; 2867 struct falcon_nic_data *nic_data;
2868 struct falcon_board *board;
2830 int rc; 2869 int rc;
2831 2870
2832 /* Allocate storage for hardware specific data */ 2871 /* Allocate storage for hardware specific data */
@@ -2884,18 +2923,33 @@ int falcon_probe_nic(struct efx_nic *efx)
2884 goto fail5; 2923 goto fail5;
2885 2924
2886 /* Initialise I2C adapter */ 2925 /* Initialise I2C adapter */
2887 efx->i2c_adap.owner = THIS_MODULE; 2926 board = falcon_board(efx);
2888 nic_data->i2c_data = falcon_i2c_bit_operations; 2927 board->i2c_adap.owner = THIS_MODULE;
2889 nic_data->i2c_data.data = efx; 2928 board->i2c_data = falcon_i2c_bit_operations;
2890 efx->i2c_adap.algo_data = &nic_data->i2c_data; 2929 board->i2c_data.data = efx;
2891 efx->i2c_adap.dev.parent = &efx->pci_dev->dev; 2930 board->i2c_adap.algo_data = &board->i2c_data;
2892 strlcpy(efx->i2c_adap.name, "SFC4000 GPIO", sizeof(efx->i2c_adap.name)); 2931 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2893 rc = i2c_bit_add_bus(&efx->i2c_adap); 2932 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2933 sizeof(board->i2c_adap.name));
2934 rc = i2c_bit_add_bus(&board->i2c_adap);
2894 if (rc) 2935 if (rc)
2895 goto fail5; 2936 goto fail5;
2896 2937
2938 rc = falcon_board(efx)->type->init(efx);
2939 if (rc) {
2940 EFX_ERR(efx, "failed to initialise board\n");
2941 goto fail6;
2942 }
2943
2944 nic_data->stats_disable_count = 1;
2945 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
2946 (unsigned long)efx);
2947
2897 return 0; 2948 return 0;
2898 2949
2950 fail6:
2951 BUG_ON(i2c_del_adapter(&board->i2c_adap));
2952 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2899 fail5: 2953 fail5:
2900 falcon_remove_spi_devices(efx); 2954 falcon_remove_spi_devices(efx);
2901 falcon_free_buffer(efx, &efx->irq_status); 2955 falcon_free_buffer(efx, &efx->irq_status);
@@ -2911,6 +2965,52 @@ int falcon_probe_nic(struct efx_nic *efx)
2911 return rc; 2965 return rc;
2912} 2966}
2913 2967
2968static void falcon_init_rx_cfg(struct efx_nic *efx)
2969{
2970 /* Prior to Siena the RX DMA engine will split each frame at
2971 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2972 * be so large that that never happens. */
2973 const unsigned huge_buf_size = (3 * 4096) >> 5;
2974 /* RX control FIFO thresholds (32 entries) */
2975 const unsigned ctrl_xon_thr = 20;
2976 const unsigned ctrl_xoff_thr = 25;
2977 /* RX data FIFO thresholds (256-byte units; size varies) */
2978 int data_xon_thr = rx_xon_thresh_bytes >> 8;
2979 int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2980 efx_oword_t reg;
2981
2982 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2983 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2984 /* Data FIFO size is 5.5K */
2985 if (data_xon_thr < 0)
2986 data_xon_thr = 512 >> 8;
2987 if (data_xoff_thr < 0)
2988 data_xoff_thr = 2048 >> 8;
2989 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2990 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2991 huge_buf_size);
2992 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
2993 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
2994 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2995 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2996 } else {
2997 /* Data FIFO size is 80K; register fields moved */
2998 if (data_xon_thr < 0)
2999 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
3000 if (data_xoff_thr < 0)
3001 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
3002 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
3003 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
3004 huge_buf_size);
3005 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
3006 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
3007 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
3008 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
3009 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
3010 }
3011 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
3012}
3013
2914/* This call performs hardware-specific global initialisation, such as 3014/* This call performs hardware-specific global initialisation, such as
2915 * defining the descriptor cache sizes and number of RSS channels. 3015 * defining the descriptor cache sizes and number of RSS channels.
2916 * It does not set up any buffers, descriptor rings or event queues. 3016 * It does not set up any buffers, descriptor rings or event queues.
@@ -2918,56 +3018,56 @@ int falcon_probe_nic(struct efx_nic *efx)
2918int falcon_init_nic(struct efx_nic *efx) 3018int falcon_init_nic(struct efx_nic *efx)
2919{ 3019{
2920 efx_oword_t temp; 3020 efx_oword_t temp;
2921 unsigned thresh;
2922 int rc; 3021 int rc;
2923 3022
2924 /* Use on-chip SRAM */ 3023 /* Use on-chip SRAM */
2925 falcon_read(efx, &temp, NIC_STAT_REG); 3024 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2926 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); 3025 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2927 falcon_write(efx, &temp, NIC_STAT_REG); 3026 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2928 3027
2929 /* Set the source of the GMAC clock */ 3028 /* Set the source of the GMAC clock */
2930 if (falcon_rev(efx) == FALCON_REV_B0) { 3029 if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
2931 falcon_read(efx, &temp, GPIO_CTL_REG_KER); 3030 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2932 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true); 3031 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2933 falcon_write(efx, &temp, GPIO_CTL_REG_KER); 3032 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
2934 } 3033 }
2935 3034
2936 /* Set buffer table mode */ 3035 /* Select the correct MAC */
2937 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL); 3036 falcon_clock_mac(efx);
2938 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2939 3037
2940 rc = falcon_reset_sram(efx); 3038 rc = falcon_reset_sram(efx);
2941 if (rc) 3039 if (rc)
2942 return rc; 3040 return rc;
2943 3041
2944 /* Set positions of descriptor caches in SRAM. */ 3042 /* Set positions of descriptor caches in SRAM. */
2945 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); 3043 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
2946 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); 3044 efx->type->tx_dc_base / 8);
2947 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); 3045 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
2948 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); 3046 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
3047 efx->type->rx_dc_base / 8);
3048 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
2949 3049
2950 /* Set TX descriptor cache size. */ 3050 /* Set TX descriptor cache size. */
2951 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); 3051 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
2952 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 3052 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2953 falcon_write(efx, &temp, TX_DC_CFG_REG_KER); 3053 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
2954 3054
2955 /* Set RX descriptor cache size. Set low watermark to size-8, as 3055 /* Set RX descriptor cache size. Set low watermark to size-8, as
2956 * this allows most efficient prefetching. 3056 * this allows most efficient prefetching.
2957 */ 3057 */
2958 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); 3058 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
2959 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 3059 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2960 falcon_write(efx, &temp, RX_DC_CFG_REG_KER); 3060 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
2961 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 3061 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2962 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER); 3062 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
2963 3063
2964 /* Clear the parity enables on the TX data fifos as 3064 /* Clear the parity enables on the TX data fifos as
2965 * they produce false parity errors because of timing issues 3065 * they produce false parity errors because of timing issues
2966 */ 3066 */
2967 if (EFX_WORKAROUND_5129(efx)) { 3067 if (EFX_WORKAROUND_5129(efx)) {
2968 falcon_read(efx, &temp, SPARE_REG_KER); 3068 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2969 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); 3069 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2970 falcon_write(efx, &temp, SPARE_REG_KER); 3070 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2971 } 3071 }
2972 3072
2973 /* Enable all the genuinely fatal interrupts. (They are still 3073 /* Enable all the genuinely fatal interrupts. (They are still
@@ -2977,83 +3077,65 @@ int falcon_init_nic(struct efx_nic *efx)
2977 * Note: All other fatal interrupts are enabled 3077 * Note: All other fatal interrupts are enabled
2978 */ 3078 */
2979 EFX_POPULATE_OWORD_3(temp, 3079 EFX_POPULATE_OWORD_3(temp,
2980 ILL_ADR_INT_KER_EN, 1, 3080 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
2981 RBUF_OWN_INT_KER_EN, 1, 3081 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
2982 TBUF_OWN_INT_KER_EN, 1); 3082 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
2983 EFX_INVERT_OWORD(temp); 3083 EFX_INVERT_OWORD(temp);
2984 falcon_write(efx, &temp, FATAL_INTR_REG_KER); 3084 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
2985 3085
2986 if (EFX_WORKAROUND_7244(efx)) { 3086 if (EFX_WORKAROUND_7244(efx)) {
2987 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 3087 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2988 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); 3088 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2989 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); 3089 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2990 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); 3090 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2991 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); 3091 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2992 falcon_write(efx, &temp, RX_FILTER_CTL_REG); 3092 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2993 } 3093 }
2994 3094
2995 falcon_setup_rss_indir_table(efx); 3095 falcon_setup_rss_indir_table(efx);
2996 3096
3097 /* XXX This is documented only for Falcon A0/A1 */
2997 /* Setup RX. Wait for descriptor is broken and must 3098 /* Setup RX. Wait for descriptor is broken and must
2998 * be disabled. RXDP recovery shouldn't be needed, but is. 3099 * be disabled. RXDP recovery shouldn't be needed, but is.
2999 */ 3100 */
3000 falcon_read(efx, &temp, RX_SELF_RST_REG_KER); 3101 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3001 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); 3102 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3002 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); 3103 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3003 if (EFX_WORKAROUND_5583(efx)) 3104 if (EFX_WORKAROUND_5583(efx))
3004 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); 3105 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3005 falcon_write(efx, &temp, RX_SELF_RST_REG_KER); 3106 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3006 3107
3007 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 3108 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3008 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 3109 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3009 */ 3110 */
3010 falcon_read(efx, &temp, TX_CFG2_REG_KER); 3111 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3011 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe); 3112 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3012 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1); 3113 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3013 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1); 3114 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3014 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0); 3115 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3015 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1); 3116 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3016 /* Enable SW_EV to inherit in char driver - assume harmless here */ 3117 /* Enable SW_EV to inherit in char driver - assume harmless here */
3017 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1); 3118 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3018 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 3119 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3019 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 3120 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3020 /* Squash TX of packets of 16 bytes or less */ 3121 /* Squash TX of packets of 16 bytes or less */
3021 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 3122 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
3022 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 3123 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3023 falcon_write(efx, &temp, TX_CFG2_REG_KER); 3124 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3024 3125
3025 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 3126 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3026 * descriptors (which is bad). 3127 * descriptors (which is bad).
3027 */ 3128 */
3028 falcon_read(efx, &temp, TX_CFG_REG_KER); 3129 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3029 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); 3130 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3030 falcon_write(efx, &temp, TX_CFG_REG_KER); 3131 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3031 3132
3032 /* RX config */ 3133 falcon_init_rx_cfg(efx);
3033 falcon_read(efx, &temp, RX_CFG_REG_KER);
3034 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
3035 if (EFX_WORKAROUND_7575(efx))
3036 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
3037 (3 * 4096) / 32);
3038 if (falcon_rev(efx) >= FALCON_REV_B0)
3039 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
3040
3041 /* RX FIFO flow control thresholds */
3042 thresh = ((rx_xon_thresh_bytes >= 0) ?
3043 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
3044 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
3045 thresh = ((rx_xoff_thresh_bytes >= 0) ?
3046 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
3047 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
3048 /* RX control FIFO thresholds [32 entries] */
3049 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
3050 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
3051 falcon_write(efx, &temp, RX_CFG_REG_KER);
3052 3134
3053 /* Set destination of both TX and RX Flush events */ 3135 /* Set destination of both TX and RX Flush events */
3054 if (falcon_rev(efx) >= FALCON_REV_B0) { 3136 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
3055 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 3137 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3056 falcon_write(efx, &temp, DP_CTRL_REG); 3138 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3057 } 3139 }
3058 3140
3059 return 0; 3141 return 0;
@@ -3062,12 +3144,15 @@ int falcon_init_nic(struct efx_nic *efx)
3062void falcon_remove_nic(struct efx_nic *efx) 3144void falcon_remove_nic(struct efx_nic *efx)
3063{ 3145{
3064 struct falcon_nic_data *nic_data = efx->nic_data; 3146 struct falcon_nic_data *nic_data = efx->nic_data;
3147 struct falcon_board *board = falcon_board(efx);
3065 int rc; 3148 int rc;
3066 3149
3150 board->type->fini(efx);
3151
3067 /* Remove I2C adapter and clear it in preparation for a retry */ 3152 /* Remove I2C adapter and clear it in preparation for a retry */
3068 rc = i2c_del_adapter(&efx->i2c_adap); 3153 rc = i2c_del_adapter(&board->i2c_adap);
3069 BUG_ON(rc); 3154 BUG_ON(rc);
3070 memset(&efx->i2c_adap, 0, sizeof(efx->i2c_adap)); 3155 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
3071 3156
3072 falcon_remove_spi_devices(efx); 3157 falcon_remove_spi_devices(efx);
3073 falcon_free_buffer(efx, &efx->irq_status); 3158 falcon_free_buffer(efx, &efx->irq_status);
@@ -3087,10 +3172,58 @@ void falcon_remove_nic(struct efx_nic *efx)
3087 3172
3088void falcon_update_nic_stats(struct efx_nic *efx) 3173void falcon_update_nic_stats(struct efx_nic *efx)
3089{ 3174{
3175 struct falcon_nic_data *nic_data = efx->nic_data;
3090 efx_oword_t cnt; 3176 efx_oword_t cnt;
3091 3177
3092 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); 3178 if (nic_data->stats_disable_count)
3093 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); 3179 return;
3180
3181 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3182 efx->n_rx_nodesc_drop_cnt +=
3183 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3184
3185 if (nic_data->stats_pending &&
3186 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
3187 nic_data->stats_pending = false;
3188 rmb(); /* read the done flag before the stats */
3189 efx->mac_op->update_stats(efx);
3190 }
3191}
3192
3193void falcon_start_nic_stats(struct efx_nic *efx)
3194{
3195 struct falcon_nic_data *nic_data = efx->nic_data;
3196
3197 spin_lock_bh(&efx->stats_lock);
3198 if (--nic_data->stats_disable_count == 0)
3199 falcon_stats_request(efx);
3200 spin_unlock_bh(&efx->stats_lock);
3201}
3202
3203void falcon_stop_nic_stats(struct efx_nic *efx)
3204{
3205 struct falcon_nic_data *nic_data = efx->nic_data;
3206 int i;
3207
3208 might_sleep();
3209
3210 spin_lock_bh(&efx->stats_lock);
3211 ++nic_data->stats_disable_count;
3212 spin_unlock_bh(&efx->stats_lock);
3213
3214 del_timer_sync(&nic_data->stats_timer);
3215
3216 /* Wait enough time for the most recent transfer to
3217 * complete. */
3218 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
3219 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
3220 break;
3221 msleep(1);
3222 }
3223
3224 spin_lock_bh(&efx->stats_lock);
3225 falcon_stats_complete(efx);
3226 spin_unlock_bh(&efx->stats_lock);
3094} 3227}
3095 3228
3096/************************************************************************** 3229/**************************************************************************
@@ -3100,50 +3233,46 @@ void falcon_update_nic_stats(struct efx_nic *efx)
3100 ************************************************************************** 3233 **************************************************************************
3101 */ 3234 */
3102 3235
3103struct efx_nic_type falcon_a_nic_type = { 3236struct efx_nic_type falcon_a1_nic_type = {
3104 .mem_bar = 2, 3237 .default_mac_ops = &falcon_xmac_operations,
3238
3239 .revision = EFX_REV_FALCON_A1,
3105 .mem_map_size = 0x20000, 3240 .mem_map_size = 0x20000,
3106 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, 3241 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3107 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, 3242 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3108 .buf_tbl_base = BUF_TBL_KER_A1, 3243 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3109 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, 3244 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3110 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, 3245 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3111 .txd_ring_mask = FALCON_TXD_RING_MASK, 3246 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3112 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3113 .evq_size = FALCON_EVQ_SIZE,
3114 .max_dma_mask = FALCON_DMA_MASK,
3115 .tx_dma_mask = FALCON_TX_DMA_MASK,
3116 .bug5391_mask = 0xf,
3117 .rx_xoff_thresh = 2048,
3118 .rx_xon_thresh = 512,
3119 .rx_buffer_padding = 0x24, 3247 .rx_buffer_padding = 0x24,
3120 .max_interrupt_mode = EFX_INT_MODE_MSI, 3248 .max_interrupt_mode = EFX_INT_MODE_MSI,
3121 .phys_addr_channels = 4, 3249 .phys_addr_channels = 4,
3250 .tx_dc_base = 0x130000,
3251 .rx_dc_base = 0x100000,
3122}; 3252};
3123 3253
3124struct efx_nic_type falcon_b_nic_type = { 3254struct efx_nic_type falcon_b0_nic_type = {
3125 .mem_bar = 2, 3255 .default_mac_ops = &falcon_xmac_operations,
3256
3257 .revision = EFX_REV_FALCON_B0,
3126 /* Map everything up to and including the RSS indirection 3258 /* Map everything up to and including the RSS indirection
3127 * table. Don't map MSI-X table, MSI-X PBA since Linux 3259 * table. Don't map MSI-X table, MSI-X PBA since Linux
3128 * requires that they not be mapped. */ 3260 * requires that they not be mapped. */
3129 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, 3261 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3130 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, 3262 FR_BZ_RX_INDIRECTION_TBL_STEP *
3131 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, 3263 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3132 .buf_tbl_base = BUF_TBL_KER_B0, 3264 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3133 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, 3265 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3134 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, 3266 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3135 .txd_ring_mask = FALCON_TXD_RING_MASK, 3267 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3136 .rxd_ring_mask = FALCON_RXD_RING_MASK, 3268 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3137 .evq_size = FALCON_EVQ_SIZE, 3269 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3138 .max_dma_mask = FALCON_DMA_MASK,
3139 .tx_dma_mask = FALCON_TX_DMA_MASK,
3140 .bug5391_mask = 0,
3141 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
3142 .rx_xon_thresh = 27648, /* ~3*max MTU */
3143 .rx_buffer_padding = 0, 3270 .rx_buffer_padding = 0,
3144 .max_interrupt_mode = EFX_INT_MODE_MSIX, 3271 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3145 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 3272 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
3146 * interrupt handler only supports 32 3273 * interrupt handler only supports 32
3147 * channels */ 3274 * channels */
3275 .tx_dc_base = 0x130000,
3276 .rx_dc_base = 0x100000,
3148}; 3277};
3149 3278
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 77f2e0db7ca1..81196a0fb504 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -11,6 +11,7 @@
11#ifndef EFX_FALCON_H 11#ifndef EFX_FALCON_H
12#define EFX_FALCON_H 12#define EFX_FALCON_H
13 13
14#include <linux/i2c-algo-bit.h>
14#include "net_driver.h" 15#include "net_driver.h"
15#include "efx.h" 16#include "efx.h"
16 17
@@ -18,19 +19,84 @@
18 * Falcon hardware control 19 * Falcon hardware control
19 */ 20 */
20 21
21enum falcon_revision { 22enum {
22 FALCON_REV_A0 = 0, 23 EFX_REV_FALCON_A0 = 0,
23 FALCON_REV_A1 = 1, 24 EFX_REV_FALCON_A1 = 1,
24 FALCON_REV_B0 = 2, 25 EFX_REV_FALCON_B0 = 2,
25}; 26};
26 27
27static inline int falcon_rev(struct efx_nic *efx) 28static inline int efx_nic_rev(struct efx_nic *efx)
28{ 29{
29 return efx->pci_dev->revision; 30 return efx->type->revision;
30} 31}
31 32
32extern struct efx_nic_type falcon_a_nic_type; 33/**
33extern struct efx_nic_type falcon_b_nic_type; 34 * struct falcon_board_type - board operations and type information
35 * @id: Board type id, as found in NVRAM
36 * @ref_model: Model number of Solarflare reference design
37 * @gen_type: Generic board type description
38 * @init: Allocate resources and initialise peripheral hardware
39 * @init_phy: Do board-specific PHY initialisation
40 * @fini: Shut down hardware and free resources
41 * @set_id_led: Set state of identifying LED or revert to automatic function
42 * @monitor: Board-specific health check function
43 */
44struct falcon_board_type {
45 u8 id;
46 const char *ref_model;
47 const char *gen_type;
48 int (*init) (struct efx_nic *nic);
49 void (*init_phy) (struct efx_nic *efx);
50 void (*fini) (struct efx_nic *nic);
51 void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode);
52 int (*monitor) (struct efx_nic *nic);
53};
54
55/**
56 * struct falcon_board - board information
57 * @type: Type of board
58 * @major: Major rev. ('A', 'B' ...)
59 * @minor: Minor rev. (0, 1, ...)
60 * @i2c_adap: I2C adapter for on-board peripherals
61 * @i2c_data: Data for bit-banging algorithm
62 * @hwmon_client: I2C client for hardware monitor
63 * @ioexp_client: I2C client for power/port control
64 */
65struct falcon_board {
66 const struct falcon_board_type *type;
67 int major;
68 int minor;
69 struct i2c_adapter i2c_adap;
70 struct i2c_algo_bit_data i2c_data;
71 struct i2c_client *hwmon_client, *ioexp_client;
72};
73
74/**
75 * struct falcon_nic_data - Falcon NIC state
76 * @pci_dev2: Secondary function of Falcon A
77 * @board: Board state and functions
78 * @stats_disable_count: Nest count for disabling statistics fetches
79 * @stats_pending: Is there a pending DMA of MAC statistics.
80 * @stats_timer: A timer for regularly fetching MAC statistics.
81 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
82 */
83struct falcon_nic_data {
84 struct pci_dev *pci_dev2;
85 struct falcon_board board;
86 unsigned int stats_disable_count;
87 bool stats_pending;
88 struct timer_list stats_timer;
89 u32 *stats_dma_done;
90};
91
92static inline struct falcon_board *falcon_board(struct efx_nic *efx)
93{
94 struct falcon_nic_data *data = efx->nic_data;
95 return &data->board;
96}
97
98extern struct efx_nic_type falcon_a1_nic_type;
99extern struct efx_nic_type falcon_b0_nic_type;
34 100
35/************************************************************************** 101/**************************************************************************
36 * 102 *
@@ -39,6 +105,8 @@ extern struct efx_nic_type falcon_b_nic_type;
39 ************************************************************************** 105 **************************************************************************
40 */ 106 */
41 107
108extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
109
42/* TX data path */ 110/* TX data path */
43extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); 111extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
44extern void falcon_init_tx(struct efx_tx_queue *tx_queue); 112extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
@@ -68,8 +136,6 @@ extern void falcon_remove_port(struct efx_nic *efx);
68/* MAC/PHY */ 136/* MAC/PHY */
69extern int falcon_switch_mac(struct efx_nic *efx); 137extern int falcon_switch_mac(struct efx_nic *efx);
70extern bool falcon_xaui_link_ok(struct efx_nic *efx); 138extern bool falcon_xaui_link_ok(struct efx_nic *efx);
71extern int falcon_dma_stats(struct efx_nic *efx,
72 unsigned int done_offset);
73extern void falcon_drain_tx_fifo(struct efx_nic *efx); 139extern void falcon_drain_tx_fifo(struct efx_nic *efx);
74extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); 140extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
75extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); 141extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
@@ -79,7 +145,6 @@ extern int falcon_init_interrupt(struct efx_nic *efx);
79extern void falcon_enable_interrupts(struct efx_nic *efx); 145extern void falcon_enable_interrupts(struct efx_nic *efx);
80extern void falcon_generate_test_event(struct efx_channel *channel, 146extern void falcon_generate_test_event(struct efx_channel *channel,
81 unsigned int magic); 147 unsigned int magic);
82extern void falcon_sim_phy_event(struct efx_nic *efx);
83extern void falcon_generate_interrupt(struct efx_nic *efx); 148extern void falcon_generate_interrupt(struct efx_nic *efx);
84extern void falcon_set_int_moderation(struct efx_channel *channel); 149extern void falcon_set_int_moderation(struct efx_channel *channel);
85extern void falcon_disable_interrupts(struct efx_nic *efx); 150extern void falcon_disable_interrupts(struct efx_nic *efx);
@@ -89,14 +154,15 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
89 154
90/* Global Resources */ 155/* Global Resources */
91extern int falcon_probe_nic(struct efx_nic *efx); 156extern int falcon_probe_nic(struct efx_nic *efx);
92extern int falcon_probe_resources(struct efx_nic *efx);
93extern int falcon_init_nic(struct efx_nic *efx); 157extern int falcon_init_nic(struct efx_nic *efx);
94extern int falcon_flush_queues(struct efx_nic *efx); 158extern int falcon_flush_queues(struct efx_nic *efx);
95extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 159extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
96extern void falcon_remove_resources(struct efx_nic *efx); 160extern void falcon_monitor(struct efx_nic *efx);
97extern void falcon_remove_nic(struct efx_nic *efx); 161extern void falcon_remove_nic(struct efx_nic *efx);
98extern void falcon_update_nic_stats(struct efx_nic *efx); 162extern void falcon_update_nic_stats(struct efx_nic *efx);
99extern void falcon_set_multicast_hash(struct efx_nic *efx); 163extern void falcon_start_nic_stats(struct efx_nic *efx);
164extern void falcon_stop_nic_stats(struct efx_nic *efx);
165extern void falcon_push_multicast_hash(struct efx_nic *efx);
100extern int falcon_reset_xaui(struct efx_nic *efx); 166extern int falcon_reset_xaui(struct efx_nic *efx);
101 167
102/* Tests */ 168/* Tests */
@@ -142,4 +208,6 @@ extern int falcon_test_registers(struct efx_nic *efx);
142extern void falcon_generate_event(struct efx_channel *channel, 208extern void falcon_generate_event(struct efx_channel *channel,
143 efx_qword_t *event); 209 efx_qword_t *event);
144 210
211extern void falcon_poll_xmac(struct efx_nic *efx);
212
145#endif /* EFX_FALCON_H */ 213#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
new file mode 100644
index 000000000000..da750959c61a
--- /dev/null
+++ b/drivers/net/sfc/falcon_boards.c
@@ -0,0 +1,751 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include <linux/rtnetlink.h>
11
12#include "net_driver.h"
13#include "phy.h"
14#include "efx.h"
15#include "falcon.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h"
19
20/* Macros for unpacking the board revision */
21/* The revision info is in host byte order. */
22#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
23#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
24#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
25
26/* Board types */
27#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51
30#define FALCON_BOARD_SFN4112F 0x52
31
32/*****************************************************************************
33 * Support for LM87 sensor chip used on several boards
34 */
35#define LM87_REG_ALARMS1 0x41
36#define LM87_REG_ALARMS2 0x42
37#define LM87_IN_LIMITS(nr, _min, _max) \
38 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
39#define LM87_AIN_LIMITS(nr, _min, _max) \
40 0x3B + (nr), _max, 0x1A + (nr), _min
41#define LM87_TEMP_INT_LIMITS(_min, _max) \
42 0x39, _max, 0x3A, _min
43#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
44 0x37, _max, 0x38, _min
45
46#define LM87_ALARM_TEMP_INT 0x10
47#define LM87_ALARM_TEMP_EXT1 0x20
48
49#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
50
51static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
52 const u8 *reg_values)
53{
54 struct falcon_board *board = falcon_board(efx);
55 struct i2c_client *client = i2c_new_device(&board->i2c_adap, info);
56 int rc;
57
58 if (!client)
59 return -EIO;
60
61 while (*reg_values) {
62 u8 reg = *reg_values++;
63 u8 value = *reg_values++;
64 rc = i2c_smbus_write_byte_data(client, reg, value);
65 if (rc)
66 goto err;
67 }
68
69 board->hwmon_client = client;
70 return 0;
71
72err:
73 i2c_unregister_device(client);
74 return rc;
75}
76
77static void efx_fini_lm87(struct efx_nic *efx)
78{
79 i2c_unregister_device(falcon_board(efx)->hwmon_client);
80}
81
82static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
83{
84 struct i2c_client *client = falcon_board(efx)->hwmon_client;
85 s32 alarms1, alarms2;
86
87 /* If link is up then do not monitor temperature */
88 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
89 return 0;
90
91 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
92 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
93 if (alarms1 < 0)
94 return alarms1;
95 if (alarms2 < 0)
96 return alarms2;
97 alarms1 &= mask;
98 alarms2 &= mask >> 8;
99 if (alarms1 || alarms2) {
100 EFX_ERR(efx,
101 "LM87 detected a hardware failure (status %02x:%02x)"
102 "%s%s\n",
103 alarms1, alarms2,
104 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
105 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
106 return -ERANGE;
107 }
108
109 return 0;
110}
111
112#else /* !CONFIG_SENSORS_LM87 */
113
114static inline int
115efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
116 const u8 *reg_values)
117{
118 return 0;
119}
120static inline void efx_fini_lm87(struct efx_nic *efx)
121{
122}
123static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
124{
125 return 0;
126}
127
128#endif /* CONFIG_SENSORS_LM87 */
129
130/*****************************************************************************
131 * Support for the SFE4001 and SFN4111T NICs.
132 *
133 * The SFE4001 does not power-up fully at reset due to its high power
134 * consumption. We control its power via a PCA9539 I/O expander.
135 * Both boards have a MAX6647 temperature monitor which we expose to
136 * the lm90 driver.
137 *
138 * This also provides minimal support for reflashing the PHY, which is
139 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
140 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
141 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
142 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
143 * exclusive with the network device being open.
144 */
145
146/**************************************************************************
147 * Support for I2C IO Expander device on SFE4001
148 */
149#define PCA9539 0x74
150
151#define P0_IN 0x00
152#define P0_OUT 0x02
153#define P0_INVERT 0x04
154#define P0_CONFIG 0x06
155
156#define P0_EN_1V0X_LBN 0
157#define P0_EN_1V0X_WIDTH 1
158#define P0_EN_1V2_LBN 1
159#define P0_EN_1V2_WIDTH 1
160#define P0_EN_2V5_LBN 2
161#define P0_EN_2V5_WIDTH 1
162#define P0_EN_3V3X_LBN 3
163#define P0_EN_3V3X_WIDTH 1
164#define P0_EN_5V_LBN 4
165#define P0_EN_5V_WIDTH 1
166#define P0_SHORTEN_JTAG_LBN 5
167#define P0_SHORTEN_JTAG_WIDTH 1
168#define P0_X_TRST_LBN 6
169#define P0_X_TRST_WIDTH 1
170#define P0_DSP_RESET_LBN 7
171#define P0_DSP_RESET_WIDTH 1
172
173#define P1_IN 0x01
174#define P1_OUT 0x03
175#define P1_INVERT 0x05
176#define P1_CONFIG 0x07
177
178#define P1_AFE_PWD_LBN 0
179#define P1_AFE_PWD_WIDTH 1
180#define P1_DSP_PWD25_LBN 1
181#define P1_DSP_PWD25_WIDTH 1
182#define P1_RESERVED_LBN 2
183#define P1_RESERVED_WIDTH 2
184#define P1_SPARE_LBN 4
185#define P1_SPARE_WIDTH 4
186
187/* Temperature Sensor */
188#define MAX664X_REG_RSL 0x02
189#define MAX664X_REG_WLHO 0x0B
190
191static void sfe4001_poweroff(struct efx_nic *efx)
192{
193 struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
194 struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
195
196 /* Turn off all power rails and disable outputs */
197 i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
198 i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
199 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
200
201 /* Clear any over-temperature alert */
202 i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
203}
204
205static int sfe4001_poweron(struct efx_nic *efx)
206{
207 struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client;
208 struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client;
209 unsigned int i, j;
210 int rc;
211 u8 out;
212
213 /* Clear any previous over-temperature alert */
214 rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
215 if (rc < 0)
216 return rc;
217
218 /* Enable port 0 and port 1 outputs on IO expander */
219 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
220 if (rc)
221 return rc;
222 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
223 0xff & ~(1 << P1_SPARE_LBN));
224 if (rc)
225 goto fail_on;
226
227 /* If PHY power is on, turn it all off and wait 1 second to
228 * ensure a full reset.
229 */
230 rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
231 if (rc < 0)
232 goto fail_on;
233 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
234 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
235 (0 << P0_EN_1V0X_LBN));
236 if (rc != out) {
237 EFX_INFO(efx, "power-cycling PHY\n");
238 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
239 if (rc)
240 goto fail_on;
241 schedule_timeout_uninterruptible(HZ);
242 }
243
244 for (i = 0; i < 20; ++i) {
245 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
246 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
247 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
248 (1 << P0_X_TRST_LBN));
249 if (efx->phy_mode & PHY_MODE_SPECIAL)
250 out |= 1 << P0_EN_3V3X_LBN;
251
252 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
253 if (rc)
254 goto fail_on;
255 msleep(10);
256
257 /* Turn on 1V power rail */
258 out &= ~(1 << P0_EN_1V0X_LBN);
259 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
260 if (rc)
261 goto fail_on;
262
263 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
264
265 /* In flash config mode, DSP does not turn on AFE, so
266 * just wait 1 second.
267 */
268 if (efx->phy_mode & PHY_MODE_SPECIAL) {
269 schedule_timeout_uninterruptible(HZ);
270 return 0;
271 }
272
273 for (j = 0; j < 10; ++j) {
274 msleep(100);
275
276 /* Check DSP has asserted AFE power line */
277 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
278 if (rc < 0)
279 goto fail_on;
280 if (rc & (1 << P1_AFE_PWD_LBN))
281 return 0;
282 }
283 }
284
285 EFX_INFO(efx, "timed out waiting for DSP boot\n");
286 rc = -ETIMEDOUT;
287fail_on:
288 sfe4001_poweroff(efx);
289 return rc;
290}
291
292static int sfn4111t_reset(struct efx_nic *efx)
293{
294 struct falcon_board *board = falcon_board(efx);
295 efx_oword_t reg;
296
297 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
298 i2c_lock_adapter(&board->i2c_adap);
299
300 /* Pull RST_N (GPIO 2) low then let it up again, setting the
301 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
302 * output enables; the output levels should always be 0 (low)
303 * and we rely on external pull-ups. */
304 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
305 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
306 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
307 msleep(1000);
308 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
309 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
310 !!(efx->phy_mode & PHY_MODE_SPECIAL));
311 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
312 msleep(1);
313
314 i2c_unlock_adapter(&board->i2c_adap);
315
316 ssleep(1);
317 return 0;
318}
319
320static ssize_t show_phy_flash_cfg(struct device *dev,
321 struct device_attribute *attr, char *buf)
322{
323 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
324 return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
325}
326
327static ssize_t set_phy_flash_cfg(struct device *dev,
328 struct device_attribute *attr,
329 const char *buf, size_t count)
330{
331 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
332 enum efx_phy_mode old_mode, new_mode;
333 int err;
334
335 rtnl_lock();
336 old_mode = efx->phy_mode;
337 if (count == 0 || *buf == '0')
338 new_mode = old_mode & ~PHY_MODE_SPECIAL;
339 else
340 new_mode = PHY_MODE_SPECIAL;
341 if (old_mode == new_mode) {
342 err = 0;
343 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
344 err = -EBUSY;
345 } else {
346 /* Reset the PHY, reconfigure the MAC and enable/disable
347 * MAC stats accordingly. */
348 efx->phy_mode = new_mode;
349 if (new_mode & PHY_MODE_SPECIAL)
350 falcon_stop_nic_stats(efx);
351 if (falcon_board(efx)->type->id == FALCON_BOARD_SFE4001)
352 err = sfe4001_poweron(efx);
353 else
354 err = sfn4111t_reset(efx);
355 efx_reconfigure_port(efx);
356 if (!(new_mode & PHY_MODE_SPECIAL))
357 falcon_start_nic_stats(efx);
358 }
359 rtnl_unlock();
360
361 return err ? err : count;
362}
363
364static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
365
366static void sfe4001_fini(struct efx_nic *efx)
367{
368 struct falcon_board *board = falcon_board(efx);
369
370 EFX_INFO(efx, "%s\n", __func__);
371
372 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
373 sfe4001_poweroff(efx);
374 i2c_unregister_device(board->ioexp_client);
375 i2c_unregister_device(board->hwmon_client);
376}
377
378static int sfe4001_check_hw(struct efx_nic *efx)
379{
380 s32 status;
381
382 /* If XAUI link is up then do not monitor */
383 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
384 return 0;
385
386 /* Check the powered status of the PHY. Lack of power implies that
387 * the MAX6647 has shut down power to it, probably due to a temp.
388 * alarm. Reading the power status rather than the MAX6647 status
389 * directly because the later is read-to-clear and would thus
390 * start to power up the PHY again when polled, causing us to blip
391 * the power undesirably.
392 * We know we can read from the IO expander because we did
393 * it during power-on. Assume failure now is bad news. */
394 status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN);
395 if (status >= 0 &&
396 (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
397 return 0;
398
399 /* Use board power control, not PHY power control */
400 sfe4001_poweroff(efx);
401 efx->phy_mode = PHY_MODE_OFF;
402
403 return (status < 0) ? -EIO : -ERANGE;
404}
405
406static struct i2c_board_info sfe4001_hwmon_info = {
407 I2C_BOARD_INFO("max6647", 0x4e),
408};
409
410/* This board uses an I2C expander to provider power to the PHY, which needs to
411 * be turned on before the PHY can be used.
412 * Context: Process context, rtnl lock held
413 */
414static int sfe4001_init(struct efx_nic *efx)
415{
416 struct falcon_board *board = falcon_board(efx);
417 int rc;
418
419#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
420 board->hwmon_client =
421 i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info);
422#else
423 board->hwmon_client =
424 i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr);
425#endif
426 if (!board->hwmon_client)
427 return -EIO;
428
429 /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
430 rc = i2c_smbus_write_byte_data(board->hwmon_client,
431 MAX664X_REG_WLHO, 90);
432 if (rc)
433 goto fail_hwmon;
434
435 board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539);
436 if (!board->ioexp_client) {
437 rc = -EIO;
438 goto fail_hwmon;
439 }
440
441 if (efx->phy_mode & PHY_MODE_SPECIAL) {
442 /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
443 * will fail. */
444 falcon_stop_nic_stats(efx);
445 }
446 rc = sfe4001_poweron(efx);
447 if (rc)
448 goto fail_ioexp;
449
450 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
451 if (rc)
452 goto fail_on;
453
454 EFX_INFO(efx, "PHY is powered on\n");
455 return 0;
456
457fail_on:
458 sfe4001_poweroff(efx);
459fail_ioexp:
460 i2c_unregister_device(board->ioexp_client);
461fail_hwmon:
462 i2c_unregister_device(board->hwmon_client);
463 return rc;
464}
465
466static int sfn4111t_check_hw(struct efx_nic *efx)
467{
468 s32 status;
469
470 /* If XAUI link is up then do not monitor */
471 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
472 return 0;
473
474 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
475 status = i2c_smbus_read_byte_data(falcon_board(efx)->hwmon_client,
476 MAX664X_REG_RSL);
477 if (status < 0)
478 return -EIO;
479 if (status & 0x57)
480 return -ERANGE;
481 return 0;
482}
483
484static void sfn4111t_fini(struct efx_nic *efx)
485{
486 EFX_INFO(efx, "%s\n", __func__);
487
488 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
489 i2c_unregister_device(falcon_board(efx)->hwmon_client);
490}
491
492static struct i2c_board_info sfn4111t_a0_hwmon_info = {
493 I2C_BOARD_INFO("max6647", 0x4e),
494};
495
496static struct i2c_board_info sfn4111t_r5_hwmon_info = {
497 I2C_BOARD_INFO("max6646", 0x4d),
498};
499
500static void sfn4111t_init_phy(struct efx_nic *efx)
501{
502 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
503 if (sft9001_wait_boot(efx) != -EINVAL)
504 return;
505
506 efx->phy_mode = PHY_MODE_SPECIAL;
507 falcon_stop_nic_stats(efx);
508 }
509
510 sfn4111t_reset(efx);
511 sft9001_wait_boot(efx);
512}
513
514static int sfn4111t_init(struct efx_nic *efx)
515{
516 struct falcon_board *board = falcon_board(efx);
517 int rc;
518
519 board->hwmon_client =
520 i2c_new_device(&board->i2c_adap,
521 (board->minor < 5) ?
522 &sfn4111t_a0_hwmon_info :
523 &sfn4111t_r5_hwmon_info);
524 if (!board->hwmon_client)
525 return -EIO;
526
527 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
528 if (rc)
529 goto fail_hwmon;
530
531 if (efx->phy_mode & PHY_MODE_SPECIAL)
532 /* PHY may not generate a 156.25 MHz clock and MAC
533 * stats fetch will fail. */
534 falcon_stop_nic_stats(efx);
535
536 return 0;
537
538fail_hwmon:
539 i2c_unregister_device(board->hwmon_client);
540 return rc;
541}
542
543/*****************************************************************************
544 * Support for the SFE4002
545 *
546 */
547static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
548
549static const u8 sfe4002_lm87_regs[] = {
550 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
551 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
552 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
553 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
554 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
555 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
556 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
557 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
558 LM87_TEMP_INT_LIMITS(10, 60), /* board */
559 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
560 0
561};
562
563static struct i2c_board_info sfe4002_hwmon_info = {
564 I2C_BOARD_INFO("lm87", 0x2e),
565 .platform_data = &sfe4002_lm87_channel,
566};
567
568/****************************************************************************/
569/* LED allocations. Note that on rev A0 boards the schematic and the reality
570 * differ: red and green are swapped. Below is the fixed (A1) layout (there
571 * are only 3 A0 boards in existence, so no real reason to make this
572 * conditional).
573 */
574#define SFE4002_FAULT_LED (2) /* Red */
575#define SFE4002_RX_LED (0) /* Green */
576#define SFE4002_TX_LED (1) /* Amber */
577
578static void sfe4002_init_phy(struct efx_nic *efx)
579{
580 /* Set the TX and RX LEDs to reflect status and activity, and the
581 * fault LED off */
582 falcon_qt202x_set_led(efx, SFE4002_TX_LED,
583 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
584 falcon_qt202x_set_led(efx, SFE4002_RX_LED,
585 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
586 falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
587}
588
589static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
590{
591 falcon_qt202x_set_led(
592 efx, SFE4002_FAULT_LED,
593 (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF);
594}
595
596static int sfe4002_check_hw(struct efx_nic *efx)
597{
598 struct falcon_board *board = falcon_board(efx);
599
600 /* A0 board rev. 4002s report a temperature fault the whole time
601 * (bad sensor) so we mask it out. */
602 unsigned alarm_mask =
603 (board->major == 0 && board->minor == 0) ?
604 ~LM87_ALARM_TEMP_EXT1 : ~0;
605
606 return efx_check_lm87(efx, alarm_mask);
607}
608
609static int sfe4002_init(struct efx_nic *efx)
610{
611 return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
612}
613
614/*****************************************************************************
615 * Support for the SFN4112F
616 *
617 */
618static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
619
620static const u8 sfn4112f_lm87_regs[] = {
621 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
622 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
623 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
624 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
625 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
626 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
627 LM87_TEMP_INT_LIMITS(10, 60), /* board */
628 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
629 0
630};
631
632static struct i2c_board_info sfn4112f_hwmon_info = {
633 I2C_BOARD_INFO("lm87", 0x2e),
634 .platform_data = &sfn4112f_lm87_channel,
635};
636
637#define SFN4112F_ACT_LED 0
638#define SFN4112F_LINK_LED 1
639
640static void sfn4112f_init_phy(struct efx_nic *efx)
641{
642 falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
643 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
644 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
645 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
646}
647
648static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
649{
650 int reg;
651
652 switch (mode) {
653 case EFX_LED_OFF:
654 reg = QUAKE_LED_OFF;
655 break;
656 case EFX_LED_ON:
657 reg = QUAKE_LED_ON;
658 break;
659 default:
660 reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT;
661 break;
662 }
663
664 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg);
665}
666
667static int sfn4112f_check_hw(struct efx_nic *efx)
668{
669 /* Mask out unused sensors */
670 return efx_check_lm87(efx, ~0x48);
671}
672
673static int sfn4112f_init(struct efx_nic *efx)
674{
675 return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
676}
677
678static const struct falcon_board_type board_types[] = {
679 {
680 .id = FALCON_BOARD_SFE4001,
681 .ref_model = "SFE4001",
682 .gen_type = "10GBASE-T adapter",
683 .init = sfe4001_init,
684 .init_phy = efx_port_dummy_op_void,
685 .fini = sfe4001_fini,
686 .set_id_led = tenxpress_set_id_led,
687 .monitor = sfe4001_check_hw,
688 },
689 {
690 .id = FALCON_BOARD_SFE4002,
691 .ref_model = "SFE4002",
692 .gen_type = "XFP adapter",
693 .init = sfe4002_init,
694 .init_phy = sfe4002_init_phy,
695 .fini = efx_fini_lm87,
696 .set_id_led = sfe4002_set_id_led,
697 .monitor = sfe4002_check_hw,
698 },
699 {
700 .id = FALCON_BOARD_SFN4111T,
701 .ref_model = "SFN4111T",
702 .gen_type = "100/1000/10GBASE-T adapter",
703 .init = sfn4111t_init,
704 .init_phy = sfn4111t_init_phy,
705 .fini = sfn4111t_fini,
706 .set_id_led = tenxpress_set_id_led,
707 .monitor = sfn4111t_check_hw,
708 },
709 {
710 .id = FALCON_BOARD_SFN4112F,
711 .ref_model = "SFN4112F",
712 .gen_type = "SFP+ adapter",
713 .init = sfn4112f_init,
714 .init_phy = sfn4112f_init_phy,
715 .fini = efx_fini_lm87,
716 .set_id_led = sfn4112f_set_id_led,
717 .monitor = sfn4112f_check_hw,
718 },
719};
720
721static const struct falcon_board_type falcon_dummy_board = {
722 .init = efx_port_dummy_op_int,
723 .init_phy = efx_port_dummy_op_void,
724 .fini = efx_port_dummy_op_void,
725 .set_id_led = efx_port_dummy_op_set_id_led,
726 .monitor = efx_port_dummy_op_int,
727};
728
729void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
730{
731 struct falcon_board *board = falcon_board(efx);
732 u8 type_id = FALCON_BOARD_TYPE(revision_info);
733 int i;
734
735 board->major = FALCON_BOARD_MAJOR(revision_info);
736 board->minor = FALCON_BOARD_MINOR(revision_info);
737
738 for (i = 0; i < ARRAY_SIZE(board_types); i++)
739 if (board_types[i].id == type_id)
740 board->type = &board_types[i];
741
742 if (board->type) {
743 EFX_INFO(efx, "board is %s rev %c%d\n",
744 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
745 ? board->type->ref_model : board->type->gen_type,
746 'A' + board->major, board->minor);
747 } else {
748 EFX_ERR(efx, "unknown board type %d\n", type_id);
749 board->type = &falcon_dummy_board;
750 }
751}
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
index 8865eae20ac5..66d499cc23f2 100644
--- a/drivers/net/sfc/falcon_gmac.c
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -13,9 +13,8 @@
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "falcon.h"
15#include "mac.h" 15#include "mac.h"
16#include "falcon_hwdefs.h" 16#include "regs.h"
17#include "falcon_io.h" 17#include "io.h"
18#include "gmii.h"
19 18
20/************************************************************************** 19/**************************************************************************
21 * 20 *
@@ -25,101 +24,102 @@
25 24
26static void falcon_reconfigure_gmac(struct efx_nic *efx) 25static void falcon_reconfigure_gmac(struct efx_nic *efx)
27{ 26{
27 struct efx_link_state *link_state = &efx->link_state;
28 bool loopback, tx_fc, rx_fc, bytemode; 28 bool loopback, tx_fc, rx_fc, bytemode;
29 int if_mode; 29 int if_mode;
30 unsigned int max_frame_len; 30 unsigned int max_frame_len;
31 efx_oword_t reg; 31 efx_oword_t reg;
32 32
33 /* Configuration register 1 */ 33 /* Configuration register 1 */
34 tx_fc = (efx->link_fc & EFX_FC_TX) || !efx->link_fd; 34 tx_fc = (link_state->fc & EFX_FC_TX) || !link_state->fd;
35 rx_fc = !!(efx->link_fc & EFX_FC_RX); 35 rx_fc = !!(link_state->fc & EFX_FC_RX);
36 loopback = (efx->loopback_mode == LOOPBACK_GMAC); 36 loopback = (efx->loopback_mode == LOOPBACK_GMAC);
37 bytemode = (efx->link_speed == 1000); 37 bytemode = (link_state->speed == 1000);
38 38
39 EFX_POPULATE_OWORD_5(reg, 39 EFX_POPULATE_OWORD_5(reg,
40 GM_LOOP, loopback, 40 FRF_AB_GM_LOOP, loopback,
41 GM_TX_EN, 1, 41 FRF_AB_GM_TX_EN, 1,
42 GM_TX_FC_EN, tx_fc, 42 FRF_AB_GM_TX_FC_EN, tx_fc,
43 GM_RX_EN, 1, 43 FRF_AB_GM_RX_EN, 1,
44 GM_RX_FC_EN, rx_fc); 44 FRF_AB_GM_RX_FC_EN, rx_fc);
45 falcon_write(efx, &reg, GM_CFG1_REG); 45 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10); 46 udelay(10);
47 47
48 /* Configuration register 2 */ 48 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1; 49 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg, 50 EFX_POPULATE_OWORD_5(reg,
51 GM_IF_MODE, if_mode, 51 FRF_AB_GM_IF_MODE, if_mode,
52 GM_PAD_CRC_EN, 1, 52 FRF_AB_GM_PAD_CRC_EN, 1,
53 GM_LEN_CHK, 1, 53 FRF_AB_GM_LEN_CHK, 1,
54 GM_FD, efx->link_fd, 54 FRF_AB_GM_FD, link_state->fd,
55 GM_PAMBL_LEN, 0x7/*datasheet recommended */); 55 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56 56
57 falcon_write(efx, &reg, GM_CFG2_REG); 57 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10); 58 udelay(10);
59 59
60 /* Max frame len register */ 60 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len); 62 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 falcon_write(efx, &reg, GM_MAX_FLEN_REG); 63 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10); 64 udelay(10);
65 65
66 /* FIFO configuration register 0 */ 66 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg, 67 EFX_POPULATE_OWORD_5(reg,
68 GMF_FTFENREQ, 1, 68 FRF_AB_GMF_FTFENREQ, 1,
69 GMF_STFENREQ, 1, 69 FRF_AB_GMF_STFENREQ, 1,
70 GMF_FRFENREQ, 1, 70 FRF_AB_GMF_FRFENREQ, 1,
71 GMF_SRFENREQ, 1, 71 FRF_AB_GMF_SRFENREQ, 1,
72 GMF_WTMENREQ, 1); 72 FRF_AB_GMF_WTMENREQ, 1);
73 falcon_write(efx, &reg, GMF_CFG0_REG); 73 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10); 74 udelay(10);
75 75
76 /* FIFO configuration register 1 */ 76 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg, 77 EFX_POPULATE_OWORD_2(reg,
78 GMF_CFGFRTH, 0x12, 78 FRF_AB_GMF_CFGFRTH, 0x12,
79 GMF_CFGXOFFRTX, 0xffff); 79 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 falcon_write(efx, &reg, GMF_CFG1_REG); 80 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10); 81 udelay(10);
82 82
83 /* FIFO configuration register 2 */ 83 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg, 84 EFX_POPULATE_OWORD_2(reg,
85 GMF_CFGHWM, 0x3f, 85 FRF_AB_GMF_CFGHWM, 0x3f,
86 GMF_CFGLWM, 0xa); 86 FRF_AB_GMF_CFGLWM, 0xa);
87 falcon_write(efx, &reg, GMF_CFG2_REG); 87 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10); 88 udelay(10);
89 89
90 /* FIFO configuration register 3 */ 90 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg, 91 EFX_POPULATE_OWORD_2(reg,
92 GMF_CFGHWMFT, 0x1c, 92 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 GMF_CFGFTTH, 0x08); 93 FRF_AB_GMF_CFGFTTH, 0x08);
94 falcon_write(efx, &reg, GMF_CFG3_REG); 94 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10); 95 udelay(10);
96 96
97 /* FIFO configuration register 4 */ 97 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1); 98 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 falcon_write(efx, &reg, GMF_CFG4_REG); 99 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10); 100 udelay(10);
101 101
102 /* FIFO configuration register 5 */ 102 /* FIFO configuration register 5 */
103 falcon_read(efx, &reg, GMF_CFG5_REG); 103 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode); 104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd); 105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !link_state->fd);
106 EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd); 106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !link_state->fd);
107 EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0); 107 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 falcon_write(efx, &reg, GMF_CFG5_REG); 108 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10); 109 udelay(10);
110 110
111 /* MAC address */ 111 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg, 112 EFX_POPULATE_OWORD_4(reg,
113 GM_HWADDR_5, efx->net_dev->dev_addr[5], 113 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 GM_HWADDR_4, efx->net_dev->dev_addr[4], 114 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 GM_HWADDR_3, efx->net_dev->dev_addr[3], 115 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 GM_HWADDR_2, efx->net_dev->dev_addr[2]); 116 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 falcon_write(efx, &reg, GM_ADR1_REG); 117 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10); 118 udelay(10);
119 EFX_POPULATE_OWORD_2(reg, 119 EFX_POPULATE_OWORD_2(reg,
120 GM_HWADDR_1, efx->net_dev->dev_addr[1], 120 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 GM_HWADDR_0, efx->net_dev->dev_addr[0]); 121 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 falcon_write(efx, &reg, GM_ADR2_REG); 122 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10); 123 udelay(10);
124 124
125 falcon_reconfigure_mac_wrapper(efx); 125 falcon_reconfigure_mac_wrapper(efx);
@@ -130,11 +130,6 @@ static void falcon_update_stats_gmac(struct efx_nic *efx)
130 struct efx_mac_stats *mac_stats = &efx->mac_stats; 130 struct efx_mac_stats *mac_stats = &efx->mac_stats;
131 unsigned long old_rx_pause, old_tx_pause; 131 unsigned long old_rx_pause, old_tx_pause;
132 unsigned long new_rx_pause, new_tx_pause; 132 unsigned long new_rx_pause, new_tx_pause;
133 int rc;
134
135 rc = falcon_dma_stats(efx, GDmaDone_offset);
136 if (rc)
137 return;
138 133
139 /* Pause frames are erroneously counted as errors (SFC bug 3269) */ 134 /* Pause frames are erroneously counted as errors (SFC bug 3269) */
140 old_rx_pause = mac_stats->rx_pause; 135 old_rx_pause = mac_stats->rx_pause;
@@ -221,9 +216,13 @@ static void falcon_update_stats_gmac(struct efx_nic *efx)
221 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64; 216 mac_stats->rx_lt64 = mac_stats->rx_good_lt64 + mac_stats->rx_bad_lt64;
222} 217}
223 218
219static bool falcon_gmac_check_fault(struct efx_nic *efx)
220{
221 return false;
222}
223
224struct efx_mac_operations falcon_gmac_operations = { 224struct efx_mac_operations falcon_gmac_operations = {
225 .reconfigure = falcon_reconfigure_gmac, 225 .reconfigure = falcon_reconfigure_gmac,
226 .update_stats = falcon_update_stats_gmac, 226 .update_stats = falcon_update_stats_gmac,
227 .irq = efx_port_dummy_op_void, 227 .check_fault = falcon_gmac_check_fault,
228 .poll = efx_port_dummy_op_void,
229}; 228};
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
deleted file mode 100644
index 2d2261117ace..000000000000
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ /dev/null
@@ -1,1333 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* SPI/VPD config register */
96#define EE_VPD_CFG_REG_KER 0x0140
97#define EE_VPD_EN_LBN 0
98#define EE_VPD_EN_WIDTH 1
99#define EE_VPD_EN_AD9_MODE_LBN 1
100#define EE_VPD_EN_AD9_MODE_WIDTH 1
101#define EE_EE_CLOCK_DIV_LBN 112
102#define EE_EE_CLOCK_DIV_WIDTH 7
103#define EE_SF_CLOCK_DIV_LBN 120
104#define EE_SF_CLOCK_DIV_WIDTH 7
105
106/* PCIE CORE ACCESS REG */
107#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
108#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
109#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
110#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
111
112/* NIC status register */
113#define NIC_STAT_REG 0x0200
114#define EE_STRAP_EN_LBN 31
115#define EE_STRAP_EN_WIDTH 1
116#define EE_STRAP_OVR_LBN 24
117#define EE_STRAP_OVR_WIDTH 4
118#define ONCHIP_SRAM_LBN 16
119#define ONCHIP_SRAM_WIDTH 1
120#define SF_PRST_LBN 9
121#define SF_PRST_WIDTH 1
122#define EE_PRST_LBN 8
123#define EE_PRST_WIDTH 1
124#define STRAP_PINS_LBN 0
125#define STRAP_PINS_WIDTH 3
126/* These bit definitions are extrapolated from the list of numerical
127 * values for STRAP_PINS.
128 */
129#define STRAP_10G_LBN 2
130#define STRAP_10G_WIDTH 1
131#define STRAP_PCIE_LBN 0
132#define STRAP_PCIE_WIDTH 1
133
134#define BOOTED_USING_NVDEVICE_LBN 3
135#define BOOTED_USING_NVDEVICE_WIDTH 1
136
137/* GPIO control register */
138#define GPIO_CTL_REG_KER 0x0210
139#define GPIO_USE_NIC_CLK_LBN (30)
140#define GPIO_USE_NIC_CLK_WIDTH (1)
141#define GPIO_OUTPUTS_LBN (16)
142#define GPIO_OUTPUTS_WIDTH (4)
143#define GPIO_INPUTS_LBN (8)
144#define GPIO_DIRECTION_LBN (24)
145#define GPIO_DIRECTION_WIDTH (4)
146#define GPIO_DIRECTION_OUT (1)
147#define GPIO_SRAM_SLEEP (1 << 1)
148
149#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
150#define GPIO3_OEN_WIDTH 1
151#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
152#define GPIO2_OEN_WIDTH 1
153#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
154#define GPIO1_OEN_WIDTH 1
155#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
156#define GPIO0_OEN_WIDTH 1
157
158#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
159#define GPIO3_OUT_WIDTH 1
160#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
161#define GPIO2_OUT_WIDTH 1
162#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
163#define GPIO1_OUT_WIDTH 1
164#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
165#define GPIO0_OUT_WIDTH 1
166
167#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
168#define GPIO3_IN_WIDTH 1
169#define GPIO2_IN_WIDTH 1
170#define GPIO1_IN_WIDTH 1
171#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
172#define GPIO0_IN_WIDTH 1
173
174/* Global control register */
175#define GLB_CTL_REG_KER 0x0220
176#define EXT_PHY_RST_CTL_LBN 63
177#define EXT_PHY_RST_CTL_WIDTH 1
178#define PCIE_SD_RST_CTL_LBN 61
179#define PCIE_SD_RST_CTL_WIDTH 1
180
181#define PCIE_NSTCK_RST_CTL_LBN 58
182#define PCIE_NSTCK_RST_CTL_WIDTH 1
183#define PCIE_CORE_RST_CTL_LBN 57
184#define PCIE_CORE_RST_CTL_WIDTH 1
185#define EE_RST_CTL_LBN 49
186#define EE_RST_CTL_WIDTH 1
187#define RST_XGRX_LBN 24
188#define RST_XGRX_WIDTH 1
189#define RST_XGTX_LBN 23
190#define RST_XGTX_WIDTH 1
191#define RST_EM_LBN 22
192#define RST_EM_WIDTH 1
193#define EXT_PHY_RST_DUR_LBN 1
194#define EXT_PHY_RST_DUR_WIDTH 3
195#define SWRST_LBN 0
196#define SWRST_WIDTH 1
197#define INCLUDE_IN_RESET 0
198#define EXCLUDE_FROM_RESET 1
199
200/* Fatal interrupt register */
201#define FATAL_INTR_REG_KER 0x0230
202#define RBUF_OWN_INT_KER_EN_LBN 39
203#define RBUF_OWN_INT_KER_EN_WIDTH 1
204#define TBUF_OWN_INT_KER_EN_LBN 38
205#define TBUF_OWN_INT_KER_EN_WIDTH 1
206#define ILL_ADR_INT_KER_EN_LBN 33
207#define ILL_ADR_INT_KER_EN_WIDTH 1
208#define MEM_PERR_INT_KER_LBN 8
209#define MEM_PERR_INT_KER_WIDTH 1
210#define INT_KER_ERROR_LBN 0
211#define INT_KER_ERROR_WIDTH 12
212
213#define DP_CTRL_REG 0x250
214#define FLS_EVQ_ID_LBN 0
215#define FLS_EVQ_ID_WIDTH 11
216
217#define MEM_STAT_REG_KER 0x260
218
219/* Debug probe register */
220#define DEBUG_BLK_SEL_MISC 7
221#define DEBUG_BLK_SEL_SERDES 6
222#define DEBUG_BLK_SEL_EM 5
223#define DEBUG_BLK_SEL_SR 4
224#define DEBUG_BLK_SEL_EV 3
225#define DEBUG_BLK_SEL_RX 2
226#define DEBUG_BLK_SEL_TX 1
227#define DEBUG_BLK_SEL_BIU 0
228
229/* FPGA build version */
230#define ALTERA_BUILD_REG_KER 0x0300
231#define VER_ALL_LBN 0
232#define VER_ALL_WIDTH 32
233
234/* Spare EEPROM bits register (flash 0x390) */
235#define SPARE_REG_KER 0x310
236#define MEM_PERR_EN_TX_DATA_LBN 72
237#define MEM_PERR_EN_TX_DATA_WIDTH 2
238
239/* Timer table for kernel access */
240#define TIMER_CMD_REG_KER 0x420
241#define TIMER_MODE_LBN 12
242#define TIMER_MODE_WIDTH 2
243#define TIMER_MODE_DIS 0
244#define TIMER_MODE_INT_HLDOFF 2
245#define TIMER_VAL_LBN 0
246#define TIMER_VAL_WIDTH 12
247
248/* Driver generated event register */
249#define DRV_EV_REG_KER 0x440
250#define DRV_EV_QID_LBN 64
251#define DRV_EV_QID_WIDTH 12
252#define DRV_EV_DATA_LBN 0
253#define DRV_EV_DATA_WIDTH 64
254
255/* Buffer table configuration register */
256#define BUF_TBL_CFG_REG_KER 0x600
257#define BUF_TBL_MODE_LBN 3
258#define BUF_TBL_MODE_WIDTH 1
259#define BUF_TBL_MODE_HALF 0
260#define BUF_TBL_MODE_FULL 1
261
262/* SRAM receive descriptor cache configuration register */
263#define SRM_RX_DC_CFG_REG_KER 0x610
264#define SRM_RX_DC_BASE_ADR_LBN 0
265#define SRM_RX_DC_BASE_ADR_WIDTH 21
266
267/* SRAM transmit descriptor cache configuration register */
268#define SRM_TX_DC_CFG_REG_KER 0x620
269#define SRM_TX_DC_BASE_ADR_LBN 0
270#define SRM_TX_DC_BASE_ADR_WIDTH 21
271
272/* SRAM configuration register */
273#define SRM_CFG_REG_KER 0x630
274#define SRAM_OOB_BT_INIT_EN_LBN 3
275#define SRAM_OOB_BT_INIT_EN_WIDTH 1
276#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
277#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
278#define SRM_NB_BSZ_1BANKS_2M 0
279#define SRM_NB_BSZ_1BANKS_4M 1
280#define SRM_NB_BSZ_1BANKS_8M 2
281#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
282#define SRM_NB_BSZ_2BANKS_4M 4
283#define SRM_NB_BSZ_2BANKS_8M 5
284#define SRM_NB_BSZ_2BANKS_16M 6
285#define SRM_NB_BSZ_RESERVED 7
286
287/* Special buffer table update register */
288#define BUF_TBL_UPD_REG_KER 0x0650
289#define BUF_UPD_CMD_LBN 63
290#define BUF_UPD_CMD_WIDTH 1
291#define BUF_CLR_CMD_LBN 62
292#define BUF_CLR_CMD_WIDTH 1
293#define BUF_CLR_END_ID_LBN 32
294#define BUF_CLR_END_ID_WIDTH 20
295#define BUF_CLR_START_ID_LBN 0
296#define BUF_CLR_START_ID_WIDTH 20
297
298/* Receive configuration register */
299#define RX_CFG_REG_KER 0x800
300
301/* B0 */
302#define RX_INGR_EN_B0_LBN 47
303#define RX_INGR_EN_B0_WIDTH 1
304#define RX_DESC_PUSH_EN_B0_LBN 43
305#define RX_DESC_PUSH_EN_B0_WIDTH 1
306#define RX_XON_TX_TH_B0_LBN 33
307#define RX_XON_TX_TH_B0_WIDTH 5
308#define RX_XOFF_TX_TH_B0_LBN 28
309#define RX_XOFF_TX_TH_B0_WIDTH 5
310#define RX_USR_BUF_SIZE_B0_LBN 19
311#define RX_USR_BUF_SIZE_B0_WIDTH 9
312#define RX_XON_MAC_TH_B0_LBN 10
313#define RX_XON_MAC_TH_B0_WIDTH 9
314#define RX_XOFF_MAC_TH_B0_LBN 1
315#define RX_XOFF_MAC_TH_B0_WIDTH 9
316#define RX_XOFF_MAC_EN_B0_LBN 0
317#define RX_XOFF_MAC_EN_B0_WIDTH 1
318
319/* A1 */
320#define RX_DESC_PUSH_EN_A1_LBN 35
321#define RX_DESC_PUSH_EN_A1_WIDTH 1
322#define RX_XON_TX_TH_A1_LBN 25
323#define RX_XON_TX_TH_A1_WIDTH 5
324#define RX_XOFF_TX_TH_A1_LBN 20
325#define RX_XOFF_TX_TH_A1_WIDTH 5
326#define RX_USR_BUF_SIZE_A1_LBN 11
327#define RX_USR_BUF_SIZE_A1_WIDTH 9
328#define RX_XON_MAC_TH_A1_LBN 6
329#define RX_XON_MAC_TH_A1_WIDTH 5
330#define RX_XOFF_MAC_TH_A1_LBN 1
331#define RX_XOFF_MAC_TH_A1_WIDTH 5
332#define RX_XOFF_MAC_EN_A1_LBN 0
333#define RX_XOFF_MAC_EN_A1_WIDTH 1
334
335/* Receive filter control register */
336#define RX_FILTER_CTL_REG 0x810
337#define UDP_FULL_SRCH_LIMIT_LBN 32
338#define UDP_FULL_SRCH_LIMIT_WIDTH 8
339#define NUM_KER_LBN 24
340#define NUM_KER_WIDTH 2
341#define UDP_WILD_SRCH_LIMIT_LBN 16
342#define UDP_WILD_SRCH_LIMIT_WIDTH 8
343#define TCP_WILD_SRCH_LIMIT_LBN 8
344#define TCP_WILD_SRCH_LIMIT_WIDTH 8
345#define TCP_FULL_SRCH_LIMIT_LBN 0
346#define TCP_FULL_SRCH_LIMIT_WIDTH 8
347
348/* RX queue flush register */
349#define RX_FLUSH_DESCQ_REG_KER 0x0820
350#define RX_FLUSH_DESCQ_CMD_LBN 24
351#define RX_FLUSH_DESCQ_CMD_WIDTH 1
352#define RX_FLUSH_DESCQ_LBN 0
353#define RX_FLUSH_DESCQ_WIDTH 12
354
355/* Receive descriptor update register */
356#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
357#define RX_DESC_WPTR_DWORD_LBN 0
358#define RX_DESC_WPTR_DWORD_WIDTH 12
359
360/* Receive descriptor cache configuration register */
361#define RX_DC_CFG_REG_KER 0x840
362#define RX_DC_SIZE_LBN 0
363#define RX_DC_SIZE_WIDTH 2
364
365#define RX_DC_PF_WM_REG_KER 0x850
366#define RX_DC_PF_LWM_LBN 0
367#define RX_DC_PF_LWM_WIDTH 6
368
369/* RX no descriptor drop counter */
370#define RX_NODESC_DROP_REG_KER 0x880
371#define RX_NODESC_DROP_CNT_LBN 0
372#define RX_NODESC_DROP_CNT_WIDTH 16
373
374/* RX black magic register */
375#define RX_SELF_RST_REG_KER 0x890
376#define RX_ISCSI_DIS_LBN 17
377#define RX_ISCSI_DIS_WIDTH 1
378#define RX_NODESC_WAIT_DIS_LBN 9
379#define RX_NODESC_WAIT_DIS_WIDTH 1
380#define RX_RECOVERY_EN_LBN 8
381#define RX_RECOVERY_EN_WIDTH 1
382
383/* TX queue flush register */
384#define TX_FLUSH_DESCQ_REG_KER 0x0a00
385#define TX_FLUSH_DESCQ_CMD_LBN 12
386#define TX_FLUSH_DESCQ_CMD_WIDTH 1
387#define TX_FLUSH_DESCQ_LBN 0
388#define TX_FLUSH_DESCQ_WIDTH 12
389
390/* Transmit descriptor update register */
391#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
392#define TX_DESC_WPTR_DWORD_LBN 0
393#define TX_DESC_WPTR_DWORD_WIDTH 12
394
395/* Transmit descriptor cache configuration register */
396#define TX_DC_CFG_REG_KER 0xa20
397#define TX_DC_SIZE_LBN 0
398#define TX_DC_SIZE_WIDTH 2
399
400/* Transmit checksum configuration register (A0/A1 only) */
401#define TX_CHKSM_CFG_REG_KER_A1 0xa30
402
403/* Transmit configuration register */
404#define TX_CFG_REG_KER 0xa50
405#define TX_NO_EOP_DISC_EN_LBN 5
406#define TX_NO_EOP_DISC_EN_WIDTH 1
407
408/* Transmit configuration register 2 */
409#define TX_CFG2_REG_KER 0xa80
410#define TX_CSR_PUSH_EN_LBN 89
411#define TX_CSR_PUSH_EN_WIDTH 1
412#define TX_RX_SPACER_LBN 64
413#define TX_RX_SPACER_WIDTH 8
414#define TX_SW_EV_EN_LBN 59
415#define TX_SW_EV_EN_WIDTH 1
416#define TX_RX_SPACER_EN_LBN 57
417#define TX_RX_SPACER_EN_WIDTH 1
418#define TX_PREF_THRESHOLD_LBN 19
419#define TX_PREF_THRESHOLD_WIDTH 2
420#define TX_ONE_PKT_PER_Q_LBN 18
421#define TX_ONE_PKT_PER_Q_WIDTH 1
422#define TX_DIS_NON_IP_EV_LBN 17
423#define TX_DIS_NON_IP_EV_WIDTH 1
424#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
425#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
426
427/* PHY management transmit data register */
428#define MD_TXD_REG_KER 0xc00
429#define MD_TXD_LBN 0
430#define MD_TXD_WIDTH 16
431
432/* PHY management receive data register */
433#define MD_RXD_REG_KER 0xc10
434#define MD_RXD_LBN 0
435#define MD_RXD_WIDTH 16
436
437/* PHY management configuration & status register */
438#define MD_CS_REG_KER 0xc20
439#define MD_GC_LBN 4
440#define MD_GC_WIDTH 1
441#define MD_RIC_LBN 2
442#define MD_RIC_WIDTH 1
443#define MD_RDC_LBN 1
444#define MD_RDC_WIDTH 1
445#define MD_WRC_LBN 0
446#define MD_WRC_WIDTH 1
447
448/* PHY management PHY address register */
449#define MD_PHY_ADR_REG_KER 0xc30
450#define MD_PHY_ADR_LBN 0
451#define MD_PHY_ADR_WIDTH 16
452
453/* PHY management ID register */
454#define MD_ID_REG_KER 0xc40
455#define MD_PRT_ADR_LBN 11
456#define MD_PRT_ADR_WIDTH 5
457#define MD_DEV_ADR_LBN 6
458#define MD_DEV_ADR_WIDTH 5
459
460/* PHY management status & mask register (DWORD read only) */
461#define MD_STAT_REG_KER 0xc50
462#define MD_BSERR_LBN 2
463#define MD_BSERR_WIDTH 1
464#define MD_LNFL_LBN 1
465#define MD_LNFL_WIDTH 1
466#define MD_BSY_LBN 0
467#define MD_BSY_WIDTH 1
468
469/* Port 0 and 1 MAC stats registers */
470#define MAC0_STAT_DMA_REG_KER 0xc60
471#define MAC_STAT_DMA_CMD_LBN 48
472#define MAC_STAT_DMA_CMD_WIDTH 1
473#define MAC_STAT_DMA_ADR_LBN 0
474#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
475
476/* Port 0 and 1 MAC control registers */
477#define MAC0_CTRL_REG_KER 0xc80
478#define MAC_XOFF_VAL_LBN 16
479#define MAC_XOFF_VAL_WIDTH 16
480#define TXFIFO_DRAIN_EN_B0_LBN 7
481#define TXFIFO_DRAIN_EN_B0_WIDTH 1
482#define MAC_BCAD_ACPT_LBN 4
483#define MAC_BCAD_ACPT_WIDTH 1
484#define MAC_UC_PROM_LBN 3
485#define MAC_UC_PROM_WIDTH 1
486#define MAC_LINK_STATUS_LBN 2
487#define MAC_LINK_STATUS_WIDTH 1
488#define MAC_SPEED_LBN 0
489#define MAC_SPEED_WIDTH 2
490
491/* 10G XAUI XGXS default values */
492#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
493#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
494#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
495
496/* Multicast address hash table */
497#define MAC_MCAST_HASH_REG0_KER 0xca0
498#define MAC_MCAST_HASH_REG1_KER 0xcb0
499
500/* GMAC configuration register 1 */
501#define GM_CFG1_REG 0xe00
502#define GM_SW_RST_LBN 31
503#define GM_SW_RST_WIDTH 1
504#define GM_LOOP_LBN 8
505#define GM_LOOP_WIDTH 1
506#define GM_RX_FC_EN_LBN 5
507#define GM_RX_FC_EN_WIDTH 1
508#define GM_TX_FC_EN_LBN 4
509#define GM_TX_FC_EN_WIDTH 1
510#define GM_RX_EN_LBN 2
511#define GM_RX_EN_WIDTH 1
512#define GM_TX_EN_LBN 0
513#define GM_TX_EN_WIDTH 1
514
515/* GMAC configuration register 2 */
516#define GM_CFG2_REG 0xe10
517#define GM_PAMBL_LEN_LBN 12
518#define GM_PAMBL_LEN_WIDTH 4
519#define GM_IF_MODE_LBN 8
520#define GM_IF_MODE_WIDTH 2
521#define GM_LEN_CHK_LBN 4
522#define GM_LEN_CHK_WIDTH 1
523#define GM_PAD_CRC_EN_LBN 2
524#define GM_PAD_CRC_EN_WIDTH 1
525#define GM_FD_LBN 0
526#define GM_FD_WIDTH 1
527
528/* GMAC maximum frame length register */
529#define GM_MAX_FLEN_REG 0xe40
530#define GM_MAX_FLEN_LBN 0
531#define GM_MAX_FLEN_WIDTH 16
532
533/* GMAC station address register 1 */
534#define GM_ADR1_REG 0xf00
535#define GM_HWADDR_5_LBN 24
536#define GM_HWADDR_5_WIDTH 8
537#define GM_HWADDR_4_LBN 16
538#define GM_HWADDR_4_WIDTH 8
539#define GM_HWADDR_3_LBN 8
540#define GM_HWADDR_3_WIDTH 8
541#define GM_HWADDR_2_LBN 0
542#define GM_HWADDR_2_WIDTH 8
543
544/* GMAC station address register 2 */
545#define GM_ADR2_REG 0xf10
546#define GM_HWADDR_1_LBN 24
547#define GM_HWADDR_1_WIDTH 8
548#define GM_HWADDR_0_LBN 16
549#define GM_HWADDR_0_WIDTH 8
550
551/* GMAC FIFO configuration register 0 */
552#define GMF_CFG0_REG 0xf20
553#define GMF_FTFENREQ_LBN 12
554#define GMF_FTFENREQ_WIDTH 1
555#define GMF_STFENREQ_LBN 11
556#define GMF_STFENREQ_WIDTH 1
557#define GMF_FRFENREQ_LBN 10
558#define GMF_FRFENREQ_WIDTH 1
559#define GMF_SRFENREQ_LBN 9
560#define GMF_SRFENREQ_WIDTH 1
561#define GMF_WTMENREQ_LBN 8
562#define GMF_WTMENREQ_WIDTH 1
563
564/* GMAC FIFO configuration register 1 */
565#define GMF_CFG1_REG 0xf30
566#define GMF_CFGFRTH_LBN 16
567#define GMF_CFGFRTH_WIDTH 5
568#define GMF_CFGXOFFRTX_LBN 0
569#define GMF_CFGXOFFRTX_WIDTH 16
570
571/* GMAC FIFO configuration register 2 */
572#define GMF_CFG2_REG 0xf40
573#define GMF_CFGHWM_LBN 16
574#define GMF_CFGHWM_WIDTH 6
575#define GMF_CFGLWM_LBN 0
576#define GMF_CFGLWM_WIDTH 6
577
578/* GMAC FIFO configuration register 3 */
579#define GMF_CFG3_REG 0xf50
580#define GMF_CFGHWMFT_LBN 16
581#define GMF_CFGHWMFT_WIDTH 6
582#define GMF_CFGFTTH_LBN 0
583#define GMF_CFGFTTH_WIDTH 6
584
585/* GMAC FIFO configuration register 4 */
586#define GMF_CFG4_REG 0xf60
587#define GMF_HSTFLTRFRM_PAUSE_LBN 12
588#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
589
590/* GMAC FIFO configuration register 5 */
591#define GMF_CFG5_REG 0xf70
592#define GMF_CFGHDPLX_LBN 22
593#define GMF_CFGHDPLX_WIDTH 1
594#define GMF_CFGBYTMODE_LBN 19
595#define GMF_CFGBYTMODE_WIDTH 1
596#define GMF_HSTDRPLT64_LBN 18
597#define GMF_HSTDRPLT64_WIDTH 1
598#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
599#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
600
601/* XGMAC address register low */
602#define XM_ADR_LO_REG 0x1200
603#define XM_ADR_3_LBN 24
604#define XM_ADR_3_WIDTH 8
605#define XM_ADR_2_LBN 16
606#define XM_ADR_2_WIDTH 8
607#define XM_ADR_1_LBN 8
608#define XM_ADR_1_WIDTH 8
609#define XM_ADR_0_LBN 0
610#define XM_ADR_0_WIDTH 8
611
612/* XGMAC address register high */
613#define XM_ADR_HI_REG 0x1210
614#define XM_ADR_5_LBN 8
615#define XM_ADR_5_WIDTH 8
616#define XM_ADR_4_LBN 0
617#define XM_ADR_4_WIDTH 8
618
619/* XGMAC global configuration */
620#define XM_GLB_CFG_REG 0x1220
621#define XM_RX_STAT_EN_LBN 11
622#define XM_RX_STAT_EN_WIDTH 1
623#define XM_TX_STAT_EN_LBN 10
624#define XM_TX_STAT_EN_WIDTH 1
625#define XM_RX_JUMBO_MODE_LBN 6
626#define XM_RX_JUMBO_MODE_WIDTH 1
627#define XM_INTCLR_MODE_LBN 3
628#define XM_INTCLR_MODE_WIDTH 1
629#define XM_CORE_RST_LBN 0
630#define XM_CORE_RST_WIDTH 1
631
632/* XGMAC transmit configuration */
633#define XM_TX_CFG_REG 0x1230
634#define XM_IPG_LBN 16
635#define XM_IPG_WIDTH 4
636#define XM_FCNTL_LBN 10
637#define XM_FCNTL_WIDTH 1
638#define XM_TXCRC_LBN 8
639#define XM_TXCRC_WIDTH 1
640#define XM_AUTO_PAD_LBN 5
641#define XM_AUTO_PAD_WIDTH 1
642#define XM_TX_PRMBL_LBN 2
643#define XM_TX_PRMBL_WIDTH 1
644#define XM_TXEN_LBN 1
645#define XM_TXEN_WIDTH 1
646
647/* XGMAC receive configuration */
648#define XM_RX_CFG_REG 0x1240
649#define XM_PASS_CRC_ERR_LBN 25
650#define XM_PASS_CRC_ERR_WIDTH 1
651#define XM_ACPT_ALL_MCAST_LBN 11
652#define XM_ACPT_ALL_MCAST_WIDTH 1
653#define XM_ACPT_ALL_UCAST_LBN 9
654#define XM_ACPT_ALL_UCAST_WIDTH 1
655#define XM_AUTO_DEPAD_LBN 8
656#define XM_AUTO_DEPAD_WIDTH 1
657#define XM_RXEN_LBN 1
658#define XM_RXEN_WIDTH 1
659
660/* XGMAC management interrupt mask register */
661#define XM_MGT_INT_MSK_REG_B0 0x1250
662#define XM_MSK_PRMBLE_ERR_LBN 2
663#define XM_MSK_PRMBLE_ERR_WIDTH 1
664#define XM_MSK_RMTFLT_LBN 1
665#define XM_MSK_RMTFLT_WIDTH 1
666#define XM_MSK_LCLFLT_LBN 0
667#define XM_MSK_LCLFLT_WIDTH 1
668
669/* XGMAC flow control register */
670#define XM_FC_REG 0x1270
671#define XM_PAUSE_TIME_LBN 16
672#define XM_PAUSE_TIME_WIDTH 16
673#define XM_DIS_FCNTL_LBN 0
674#define XM_DIS_FCNTL_WIDTH 1
675
676/* XGMAC pause time count register */
677#define XM_PAUSE_TIME_REG 0x1290
678
679/* XGMAC transmit parameter register */
680#define XM_TX_PARAM_REG 0x012d0
681#define XM_TX_JUMBO_MODE_LBN 31
682#define XM_TX_JUMBO_MODE_WIDTH 1
683#define XM_MAX_TX_FRM_SIZE_LBN 16
684#define XM_MAX_TX_FRM_SIZE_WIDTH 14
685
686/* XGMAC receive parameter register */
687#define XM_RX_PARAM_REG 0x12e0
688#define XM_MAX_RX_FRM_SIZE_LBN 0
689#define XM_MAX_RX_FRM_SIZE_WIDTH 14
690
691/* XGMAC management interrupt status register */
692#define XM_MGT_INT_REG_B0 0x12f0
693#define XM_PRMBLE_ERR 2
694#define XM_PRMBLE_WIDTH 1
695#define XM_RMTFLT_LBN 1
696#define XM_RMTFLT_WIDTH 1
697#define XM_LCLFLT_LBN 0
698#define XM_LCLFLT_WIDTH 1
699
700/* XGXS/XAUI powerdown/reset register */
701#define XX_PWR_RST_REG 0x1300
702
703#define XX_SD_RST_ACT_LBN 16
704#define XX_SD_RST_ACT_WIDTH 1
705#define XX_PWRDND_EN_LBN 15
706#define XX_PWRDND_EN_WIDTH 1
707#define XX_PWRDNC_EN_LBN 14
708#define XX_PWRDNC_EN_WIDTH 1
709#define XX_PWRDNB_EN_LBN 13
710#define XX_PWRDNB_EN_WIDTH 1
711#define XX_PWRDNA_EN_LBN 12
712#define XX_PWRDNA_EN_WIDTH 1
713#define XX_RSTPLLCD_EN_LBN 9
714#define XX_RSTPLLCD_EN_WIDTH 1
715#define XX_RSTPLLAB_EN_LBN 8
716#define XX_RSTPLLAB_EN_WIDTH 1
717#define XX_RESETD_EN_LBN 7
718#define XX_RESETD_EN_WIDTH 1
719#define XX_RESETC_EN_LBN 6
720#define XX_RESETC_EN_WIDTH 1
721#define XX_RESETB_EN_LBN 5
722#define XX_RESETB_EN_WIDTH 1
723#define XX_RESETA_EN_LBN 4
724#define XX_RESETA_EN_WIDTH 1
725#define XX_RSTXGXSRX_EN_LBN 2
726#define XX_RSTXGXSRX_EN_WIDTH 1
727#define XX_RSTXGXSTX_EN_LBN 1
728#define XX_RSTXGXSTX_EN_WIDTH 1
729#define XX_RST_XX_EN_LBN 0
730#define XX_RST_XX_EN_WIDTH 1
731
732/* XGXS/XAUI powerdown/reset control register */
733#define XX_SD_CTL_REG 0x1310
734#define XX_HIDRVD_LBN 15
735#define XX_HIDRVD_WIDTH 1
736#define XX_LODRVD_LBN 14
737#define XX_LODRVD_WIDTH 1
738#define XX_HIDRVC_LBN 13
739#define XX_HIDRVC_WIDTH 1
740#define XX_LODRVC_LBN 12
741#define XX_LODRVC_WIDTH 1
742#define XX_HIDRVB_LBN 11
743#define XX_HIDRVB_WIDTH 1
744#define XX_LODRVB_LBN 10
745#define XX_LODRVB_WIDTH 1
746#define XX_HIDRVA_LBN 9
747#define XX_HIDRVA_WIDTH 1
748#define XX_LODRVA_LBN 8
749#define XX_LODRVA_WIDTH 1
750#define XX_LPBKD_LBN 3
751#define XX_LPBKD_WIDTH 1
752#define XX_LPBKC_LBN 2
753#define XX_LPBKC_WIDTH 1
754#define XX_LPBKB_LBN 1
755#define XX_LPBKB_WIDTH 1
756#define XX_LPBKA_LBN 0
757#define XX_LPBKA_WIDTH 1
758
759#define XX_TXDRV_CTL_REG 0x1320
760#define XX_DEQD_LBN 28
761#define XX_DEQD_WIDTH 4
762#define XX_DEQC_LBN 24
763#define XX_DEQC_WIDTH 4
764#define XX_DEQB_LBN 20
765#define XX_DEQB_WIDTH 4
766#define XX_DEQA_LBN 16
767#define XX_DEQA_WIDTH 4
768#define XX_DTXD_LBN 12
769#define XX_DTXD_WIDTH 4
770#define XX_DTXC_LBN 8
771#define XX_DTXC_WIDTH 4
772#define XX_DTXB_LBN 4
773#define XX_DTXB_WIDTH 4
774#define XX_DTXA_LBN 0
775#define XX_DTXA_WIDTH 4
776
777/* XAUI XGXS core status register */
778#define XX_CORE_STAT_REG 0x1360
779#define XX_FORCE_SIG_LBN 24
780#define XX_FORCE_SIG_WIDTH 8
781#define XX_FORCE_SIG_DECODE_FORCED 0xff
782#define XX_XGXS_LB_EN_LBN 23
783#define XX_XGXS_LB_EN_WIDTH 1
784#define XX_XGMII_LB_EN_LBN 22
785#define XX_XGMII_LB_EN_WIDTH 1
786#define XX_ALIGN_DONE_LBN 20
787#define XX_ALIGN_DONE_WIDTH 1
788#define XX_SYNC_STAT_LBN 16
789#define XX_SYNC_STAT_WIDTH 4
790#define XX_SYNC_STAT_DECODE_SYNCED 0xf
791#define XX_COMMA_DET_LBN 12
792#define XX_COMMA_DET_WIDTH 4
793#define XX_COMMA_DET_DECODE_DETECTED 0xf
794#define XX_COMMA_DET_RESET 0xf
795#define XX_CHARERR_LBN 4
796#define XX_CHARERR_WIDTH 4
797#define XX_CHARERR_RESET 0xf
798#define XX_DISPERR_LBN 0
799#define XX_DISPERR_WIDTH 4
800#define XX_DISPERR_RESET 0xf
801
802/* Receive filter table */
803#define RX_FILTER_TBL0 0xF00000
804
805/* Receive descriptor pointer table */
806#define RX_DESC_PTR_TBL_KER_A1 0x11800
807#define RX_DESC_PTR_TBL_KER_B0 0xF40000
808#define RX_DESC_PTR_TBL_KER_P0 0x900
809#define RX_ISCSI_DDIG_EN_LBN 88
810#define RX_ISCSI_DDIG_EN_WIDTH 1
811#define RX_ISCSI_HDIG_EN_LBN 87
812#define RX_ISCSI_HDIG_EN_WIDTH 1
813#define RX_DESCQ_BUF_BASE_ID_LBN 36
814#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
815#define RX_DESCQ_EVQ_ID_LBN 24
816#define RX_DESCQ_EVQ_ID_WIDTH 12
817#define RX_DESCQ_OWNER_ID_LBN 10
818#define RX_DESCQ_OWNER_ID_WIDTH 14
819#define RX_DESCQ_LABEL_LBN 5
820#define RX_DESCQ_LABEL_WIDTH 5
821#define RX_DESCQ_SIZE_LBN 3
822#define RX_DESCQ_SIZE_WIDTH 2
823#define RX_DESCQ_SIZE_4K 3
824#define RX_DESCQ_SIZE_2K 2
825#define RX_DESCQ_SIZE_1K 1
826#define RX_DESCQ_SIZE_512 0
827#define RX_DESCQ_TYPE_LBN 2
828#define RX_DESCQ_TYPE_WIDTH 1
829#define RX_DESCQ_JUMBO_LBN 1
830#define RX_DESCQ_JUMBO_WIDTH 1
831#define RX_DESCQ_EN_LBN 0
832#define RX_DESCQ_EN_WIDTH 1
833
834/* Transmit descriptor pointer table */
835#define TX_DESC_PTR_TBL_KER_A1 0x11900
836#define TX_DESC_PTR_TBL_KER_B0 0xF50000
837#define TX_DESC_PTR_TBL_KER_P0 0xa40
838#define TX_NON_IP_DROP_DIS_B0_LBN 91
839#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
840#define TX_IP_CHKSM_DIS_B0_LBN 90
841#define TX_IP_CHKSM_DIS_B0_WIDTH 1
842#define TX_TCP_CHKSM_DIS_B0_LBN 89
843#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
844#define TX_DESCQ_EN_LBN 88
845#define TX_DESCQ_EN_WIDTH 1
846#define TX_ISCSI_DDIG_EN_LBN 87
847#define TX_ISCSI_DDIG_EN_WIDTH 1
848#define TX_ISCSI_HDIG_EN_LBN 86
849#define TX_ISCSI_HDIG_EN_WIDTH 1
850#define TX_DESCQ_BUF_BASE_ID_LBN 36
851#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
852#define TX_DESCQ_EVQ_ID_LBN 24
853#define TX_DESCQ_EVQ_ID_WIDTH 12
854#define TX_DESCQ_OWNER_ID_LBN 10
855#define TX_DESCQ_OWNER_ID_WIDTH 14
856#define TX_DESCQ_LABEL_LBN 5
857#define TX_DESCQ_LABEL_WIDTH 5
858#define TX_DESCQ_SIZE_LBN 3
859#define TX_DESCQ_SIZE_WIDTH 2
860#define TX_DESCQ_SIZE_4K 3
861#define TX_DESCQ_SIZE_2K 2
862#define TX_DESCQ_SIZE_1K 1
863#define TX_DESCQ_SIZE_512 0
864#define TX_DESCQ_TYPE_LBN 1
865#define TX_DESCQ_TYPE_WIDTH 2
866
867/* Event queue pointer */
868#define EVQ_PTR_TBL_KER_A1 0x11a00
869#define EVQ_PTR_TBL_KER_B0 0xf60000
870#define EVQ_PTR_TBL_KER_P0 0x500
871#define EVQ_EN_LBN 23
872#define EVQ_EN_WIDTH 1
873#define EVQ_SIZE_LBN 20
874#define EVQ_SIZE_WIDTH 3
875#define EVQ_SIZE_32K 6
876#define EVQ_SIZE_16K 5
877#define EVQ_SIZE_8K 4
878#define EVQ_SIZE_4K 3
879#define EVQ_SIZE_2K 2
880#define EVQ_SIZE_1K 1
881#define EVQ_SIZE_512 0
882#define EVQ_BUF_BASE_ID_LBN 0
883#define EVQ_BUF_BASE_ID_WIDTH 20
884
885/* Event queue read pointer */
886#define EVQ_RPTR_REG_KER_A1 0x11b00
887#define EVQ_RPTR_REG_KER_B0 0xfa0000
888#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
889#define EVQ_RPTR_DWORD_LBN 0
890#define EVQ_RPTR_DWORD_WIDTH 14
891
892/* RSS indirection table */
893#define RX_RSS_INDIR_TBL_B0 0xFB0000
894#define RX_RSS_INDIR_ENT_B0_LBN 0
895#define RX_RSS_INDIR_ENT_B0_WIDTH 6
896
897/* Special buffer descriptors (full-mode) */
898#define BUF_FULL_TBL_KER_A1 0x8000
899#define BUF_FULL_TBL_KER_B0 0x800000
900#define IP_DAT_BUF_SIZE_LBN 50
901#define IP_DAT_BUF_SIZE_WIDTH 1
902#define IP_DAT_BUF_SIZE_8K 1
903#define IP_DAT_BUF_SIZE_4K 0
904#define BUF_ADR_REGION_LBN 48
905#define BUF_ADR_REGION_WIDTH 2
906#define BUF_ADR_FBUF_LBN 14
907#define BUF_ADR_FBUF_WIDTH 34
908#define BUF_OWNER_ID_FBUF_LBN 0
909#define BUF_OWNER_ID_FBUF_WIDTH 14
910
911/* Transmit descriptor */
912#define TX_KER_PORT_LBN 63
913#define TX_KER_PORT_WIDTH 1
914#define TX_KER_CONT_LBN 62
915#define TX_KER_CONT_WIDTH 1
916#define TX_KER_BYTE_CNT_LBN 48
917#define TX_KER_BYTE_CNT_WIDTH 14
918#define TX_KER_BUF_REGION_LBN 46
919#define TX_KER_BUF_REGION_WIDTH 2
920#define TX_KER_BUF_REGION0_DECODE 0
921#define TX_KER_BUF_REGION1_DECODE 1
922#define TX_KER_BUF_REGION2_DECODE 2
923#define TX_KER_BUF_REGION3_DECODE 3
924#define TX_KER_BUF_ADR_LBN 0
925#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
926
927/* Receive descriptor */
928#define RX_KER_BUF_SIZE_LBN 48
929#define RX_KER_BUF_SIZE_WIDTH 14
930#define RX_KER_BUF_REGION_LBN 46
931#define RX_KER_BUF_REGION_WIDTH 2
932#define RX_KER_BUF_REGION0_DECODE 0
933#define RX_KER_BUF_REGION1_DECODE 1
934#define RX_KER_BUF_REGION2_DECODE 2
935#define RX_KER_BUF_REGION3_DECODE 3
936#define RX_KER_BUF_ADR_LBN 0
937#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
938
939/**************************************************************************
940 *
941 * Falcon events
942 *
943 **************************************************************************
944 */
945
946/* Event queue entries */
947#define EV_CODE_LBN 60
948#define EV_CODE_WIDTH 4
949#define RX_IP_EV_DECODE 0
950#define TX_IP_EV_DECODE 2
951#define DRIVER_EV_DECODE 5
952#define GLOBAL_EV_DECODE 6
953#define DRV_GEN_EV_DECODE 7
954#define WHOLE_EVENT_LBN 0
955#define WHOLE_EVENT_WIDTH 64
956
957/* Receive events */
958#define RX_EV_PKT_OK_LBN 56
959#define RX_EV_PKT_OK_WIDTH 1
960#define RX_EV_PAUSE_FRM_ERR_LBN 55
961#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
962#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
963#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
964#define RX_EV_IF_FRAG_ERR_LBN 53
965#define RX_EV_IF_FRAG_ERR_WIDTH 1
966#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
967#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
968#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
969#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
970#define RX_EV_ETH_CRC_ERR_LBN 50
971#define RX_EV_ETH_CRC_ERR_WIDTH 1
972#define RX_EV_FRM_TRUNC_LBN 49
973#define RX_EV_FRM_TRUNC_WIDTH 1
974#define RX_EV_DRIB_NIB_LBN 48
975#define RX_EV_DRIB_NIB_WIDTH 1
976#define RX_EV_TOBE_DISC_LBN 47
977#define RX_EV_TOBE_DISC_WIDTH 1
978#define RX_EV_PKT_TYPE_LBN 44
979#define RX_EV_PKT_TYPE_WIDTH 3
980#define RX_EV_PKT_TYPE_ETH_DECODE 0
981#define RX_EV_PKT_TYPE_LLC_DECODE 1
982#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
983#define RX_EV_PKT_TYPE_VLAN_DECODE 3
984#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
985#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
986#define RX_EV_HDR_TYPE_LBN 42
987#define RX_EV_HDR_TYPE_WIDTH 2
988#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
989#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
990#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
991#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
992#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
993 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
994#define RX_EV_MCAST_HASH_MATCH_LBN 40
995#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
996#define RX_EV_MCAST_PKT_LBN 39
997#define RX_EV_MCAST_PKT_WIDTH 1
998#define RX_EV_Q_LABEL_LBN 32
999#define RX_EV_Q_LABEL_WIDTH 5
1000#define RX_EV_JUMBO_CONT_LBN 31
1001#define RX_EV_JUMBO_CONT_WIDTH 1
1002#define RX_EV_BYTE_CNT_LBN 16
1003#define RX_EV_BYTE_CNT_WIDTH 14
1004#define RX_EV_SOP_LBN 15
1005#define RX_EV_SOP_WIDTH 1
1006#define RX_EV_DESC_PTR_LBN 0
1007#define RX_EV_DESC_PTR_WIDTH 12
1008
1009/* Transmit events */
1010#define TX_EV_PKT_ERR_LBN 38
1011#define TX_EV_PKT_ERR_WIDTH 1
1012#define TX_EV_Q_LABEL_LBN 32
1013#define TX_EV_Q_LABEL_WIDTH 5
1014#define TX_EV_WQ_FF_FULL_LBN 15
1015#define TX_EV_WQ_FF_FULL_WIDTH 1
1016#define TX_EV_COMP_LBN 12
1017#define TX_EV_COMP_WIDTH 1
1018#define TX_EV_DESC_PTR_LBN 0
1019#define TX_EV_DESC_PTR_WIDTH 12
1020
1021/* Driver events */
1022#define DRIVER_EV_SUB_CODE_LBN 56
1023#define DRIVER_EV_SUB_CODE_WIDTH 4
1024#define DRIVER_EV_SUB_DATA_LBN 0
1025#define DRIVER_EV_SUB_DATA_WIDTH 14
1026#define TX_DESCQ_FLS_DONE_EV_DECODE 0
1027#define RX_DESCQ_FLS_DONE_EV_DECODE 1
1028#define EVQ_INIT_DONE_EV_DECODE 2
1029#define EVQ_NOT_EN_EV_DECODE 3
1030#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
1031#define SRM_UPD_DONE_EV_DECODE 5
1032#define WAKE_UP_EV_DECODE 6
1033#define TX_PKT_NON_TCP_UDP_DECODE 9
1034#define TIMER_EV_DECODE 10
1035#define RX_RECOVERY_EV_DECODE 11
1036#define RX_DSC_ERROR_EV_DECODE 14
1037#define TX_DSC_ERROR_EV_DECODE 15
1038#define DRIVER_EV_TX_DESCQ_ID_LBN 0
1039#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
1040#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
1041#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
1042#define DRIVER_EV_RX_DESCQ_ID_LBN 0
1043#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
1044#define SRM_CLR_EV_DECODE 0
1045#define SRM_UPD_EV_DECODE 1
1046#define SRM_ILLCLR_EV_DECODE 2
1047
1048/* Global events */
1049#define RX_RECOVERY_B0_LBN 12
1050#define RX_RECOVERY_B0_WIDTH 1
1051#define XG_MNT_INTR_B0_LBN 11
1052#define XG_MNT_INTR_B0_WIDTH 1
1053#define RX_RECOVERY_A1_LBN 11
1054#define RX_RECOVERY_A1_WIDTH 1
1055#define XFP_PHY_INTR_LBN 10
1056#define XFP_PHY_INTR_WIDTH 1
1057#define XG_PHY_INTR_LBN 9
1058#define XG_PHY_INTR_WIDTH 1
1059#define G_PHY1_INTR_LBN 8
1060#define G_PHY1_INTR_WIDTH 1
1061#define G_PHY0_INTR_LBN 7
1062#define G_PHY0_INTR_WIDTH 1
1063
1064/* Driver-generated test events */
1065#define EVQ_MAGIC_LBN 0
1066#define EVQ_MAGIC_WIDTH 32
1067
1068/**************************************************************************
1069 *
1070 * Falcon MAC stats
1071 *
1072 **************************************************************************
1073 *
1074 */
1075
1076#define GRxGoodOct_offset 0x0
1077#define GRxGoodOct_WIDTH 48
1078#define GRxBadOct_offset 0x8
1079#define GRxBadOct_WIDTH 48
1080#define GRxMissPkt_offset 0x10
1081#define GRxMissPkt_WIDTH 32
1082#define GRxFalseCRS_offset 0x14
1083#define GRxFalseCRS_WIDTH 32
1084#define GRxPausePkt_offset 0x18
1085#define GRxPausePkt_WIDTH 32
1086#define GRxBadPkt_offset 0x1C
1087#define GRxBadPkt_WIDTH 32
1088#define GRxUcastPkt_offset 0x20
1089#define GRxUcastPkt_WIDTH 32
1090#define GRxMcastPkt_offset 0x24
1091#define GRxMcastPkt_WIDTH 32
1092#define GRxBcastPkt_offset 0x28
1093#define GRxBcastPkt_WIDTH 32
1094#define GRxGoodLt64Pkt_offset 0x2C
1095#define GRxGoodLt64Pkt_WIDTH 32
1096#define GRxBadLt64Pkt_offset 0x30
1097#define GRxBadLt64Pkt_WIDTH 32
1098#define GRx64Pkt_offset 0x34
1099#define GRx64Pkt_WIDTH 32
1100#define GRx65to127Pkt_offset 0x38
1101#define GRx65to127Pkt_WIDTH 32
1102#define GRx128to255Pkt_offset 0x3C
1103#define GRx128to255Pkt_WIDTH 32
1104#define GRx256to511Pkt_offset 0x40
1105#define GRx256to511Pkt_WIDTH 32
1106#define GRx512to1023Pkt_offset 0x44
1107#define GRx512to1023Pkt_WIDTH 32
1108#define GRx1024to15xxPkt_offset 0x48
1109#define GRx1024to15xxPkt_WIDTH 32
1110#define GRx15xxtoJumboPkt_offset 0x4C
1111#define GRx15xxtoJumboPkt_WIDTH 32
1112#define GRxGtJumboPkt_offset 0x50
1113#define GRxGtJumboPkt_WIDTH 32
1114#define GRxFcsErr64to15xxPkt_offset 0x54
1115#define GRxFcsErr64to15xxPkt_WIDTH 32
1116#define GRxFcsErr15xxtoJumboPkt_offset 0x58
1117#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
1118#define GRxFcsErrGtJumboPkt_offset 0x5C
1119#define GRxFcsErrGtJumboPkt_WIDTH 32
1120#define GTxGoodBadOct_offset 0x80
1121#define GTxGoodBadOct_WIDTH 48
1122#define GTxGoodOct_offset 0x88
1123#define GTxGoodOct_WIDTH 48
1124#define GTxSglColPkt_offset 0x90
1125#define GTxSglColPkt_WIDTH 32
1126#define GTxMultColPkt_offset 0x94
1127#define GTxMultColPkt_WIDTH 32
1128#define GTxExColPkt_offset 0x98
1129#define GTxExColPkt_WIDTH 32
1130#define GTxDefPkt_offset 0x9C
1131#define GTxDefPkt_WIDTH 32
1132#define GTxLateCol_offset 0xA0
1133#define GTxLateCol_WIDTH 32
1134#define GTxExDefPkt_offset 0xA4
1135#define GTxExDefPkt_WIDTH 32
1136#define GTxPausePkt_offset 0xA8
1137#define GTxPausePkt_WIDTH 32
1138#define GTxBadPkt_offset 0xAC
1139#define GTxBadPkt_WIDTH 32
1140#define GTxUcastPkt_offset 0xB0
1141#define GTxUcastPkt_WIDTH 32
1142#define GTxMcastPkt_offset 0xB4
1143#define GTxMcastPkt_WIDTH 32
1144#define GTxBcastPkt_offset 0xB8
1145#define GTxBcastPkt_WIDTH 32
1146#define GTxLt64Pkt_offset 0xBC
1147#define GTxLt64Pkt_WIDTH 32
1148#define GTx64Pkt_offset 0xC0
1149#define GTx64Pkt_WIDTH 32
1150#define GTx65to127Pkt_offset 0xC4
1151#define GTx65to127Pkt_WIDTH 32
1152#define GTx128to255Pkt_offset 0xC8
1153#define GTx128to255Pkt_WIDTH 32
1154#define GTx256to511Pkt_offset 0xCC
1155#define GTx256to511Pkt_WIDTH 32
1156#define GTx512to1023Pkt_offset 0xD0
1157#define GTx512to1023Pkt_WIDTH 32
1158#define GTx1024to15xxPkt_offset 0xD4
1159#define GTx1024to15xxPkt_WIDTH 32
1160#define GTx15xxtoJumboPkt_offset 0xD8
1161#define GTx15xxtoJumboPkt_WIDTH 32
1162#define GTxGtJumboPkt_offset 0xDC
1163#define GTxGtJumboPkt_WIDTH 32
1164#define GTxNonTcpUdpPkt_offset 0xE0
1165#define GTxNonTcpUdpPkt_WIDTH 16
1166#define GTxMacSrcErrPkt_offset 0xE4
1167#define GTxMacSrcErrPkt_WIDTH 16
1168#define GTxIpSrcErrPkt_offset 0xE8
1169#define GTxIpSrcErrPkt_WIDTH 16
1170#define GDmaDone_offset 0xEC
1171#define GDmaDone_WIDTH 32
1172
1173#define XgRxOctets_offset 0x0
1174#define XgRxOctets_WIDTH 48
1175#define XgRxOctetsOK_offset 0x8
1176#define XgRxOctetsOK_WIDTH 48
1177#define XgRxPkts_offset 0x10
1178#define XgRxPkts_WIDTH 32
1179#define XgRxPktsOK_offset 0x14
1180#define XgRxPktsOK_WIDTH 32
1181#define XgRxBroadcastPkts_offset 0x18
1182#define XgRxBroadcastPkts_WIDTH 32
1183#define XgRxMulticastPkts_offset 0x1C
1184#define XgRxMulticastPkts_WIDTH 32
1185#define XgRxUnicastPkts_offset 0x20
1186#define XgRxUnicastPkts_WIDTH 32
1187#define XgRxUndersizePkts_offset 0x24
1188#define XgRxUndersizePkts_WIDTH 32
1189#define XgRxOversizePkts_offset 0x28
1190#define XgRxOversizePkts_WIDTH 32
1191#define XgRxJabberPkts_offset 0x2C
1192#define XgRxJabberPkts_WIDTH 32
1193#define XgRxUndersizeFCSerrorPkts_offset 0x30
1194#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1195#define XgRxDropEvents_offset 0x34
1196#define XgRxDropEvents_WIDTH 32
1197#define XgRxFCSerrorPkts_offset 0x38
1198#define XgRxFCSerrorPkts_WIDTH 32
1199#define XgRxAlignError_offset 0x3C
1200#define XgRxAlignError_WIDTH 32
1201#define XgRxSymbolError_offset 0x40
1202#define XgRxSymbolError_WIDTH 32
1203#define XgRxInternalMACError_offset 0x44
1204#define XgRxInternalMACError_WIDTH 32
1205#define XgRxControlPkts_offset 0x48
1206#define XgRxControlPkts_WIDTH 32
1207#define XgRxPausePkts_offset 0x4C
1208#define XgRxPausePkts_WIDTH 32
1209#define XgRxPkts64Octets_offset 0x50
1210#define XgRxPkts64Octets_WIDTH 32
1211#define XgRxPkts65to127Octets_offset 0x54
1212#define XgRxPkts65to127Octets_WIDTH 32
1213#define XgRxPkts128to255Octets_offset 0x58
1214#define XgRxPkts128to255Octets_WIDTH 32
1215#define XgRxPkts256to511Octets_offset 0x5C
1216#define XgRxPkts256to511Octets_WIDTH 32
1217#define XgRxPkts512to1023Octets_offset 0x60
1218#define XgRxPkts512to1023Octets_WIDTH 32
1219#define XgRxPkts1024to15xxOctets_offset 0x64
1220#define XgRxPkts1024to15xxOctets_WIDTH 32
1221#define XgRxPkts15xxtoMaxOctets_offset 0x68
1222#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1223#define XgRxLengthError_offset 0x6C
1224#define XgRxLengthError_WIDTH 32
1225#define XgTxPkts_offset 0x80
1226#define XgTxPkts_WIDTH 32
1227#define XgTxOctets_offset 0x88
1228#define XgTxOctets_WIDTH 48
1229#define XgTxMulticastPkts_offset 0x90
1230#define XgTxMulticastPkts_WIDTH 32
1231#define XgTxBroadcastPkts_offset 0x94
1232#define XgTxBroadcastPkts_WIDTH 32
1233#define XgTxUnicastPkts_offset 0x98
1234#define XgTxUnicastPkts_WIDTH 32
1235#define XgTxControlPkts_offset 0x9C
1236#define XgTxControlPkts_WIDTH 32
1237#define XgTxPausePkts_offset 0xA0
1238#define XgTxPausePkts_WIDTH 32
1239#define XgTxPkts64Octets_offset 0xA4
1240#define XgTxPkts64Octets_WIDTH 32
1241#define XgTxPkts65to127Octets_offset 0xA8
1242#define XgTxPkts65to127Octets_WIDTH 32
1243#define XgTxPkts128to255Octets_offset 0xAC
1244#define XgTxPkts128to255Octets_WIDTH 32
1245#define XgTxPkts256to511Octets_offset 0xB0
1246#define XgTxPkts256to511Octets_WIDTH 32
1247#define XgTxPkts512to1023Octets_offset 0xB4
1248#define XgTxPkts512to1023Octets_WIDTH 32
1249#define XgTxPkts1024to15xxOctets_offset 0xB8
1250#define XgTxPkts1024to15xxOctets_WIDTH 32
1251#define XgTxPkts1519toMaxOctets_offset 0xBC
1252#define XgTxPkts1519toMaxOctets_WIDTH 32
1253#define XgTxUndersizePkts_offset 0xC0
1254#define XgTxUndersizePkts_WIDTH 32
1255#define XgTxOversizePkts_offset 0xC4
1256#define XgTxOversizePkts_WIDTH 32
1257#define XgTxNonTcpUdpPkt_offset 0xC8
1258#define XgTxNonTcpUdpPkt_WIDTH 16
1259#define XgTxMacSrcErrPkt_offset 0xCC
1260#define XgTxMacSrcErrPkt_WIDTH 16
1261#define XgTxIpSrcErrPkt_offset 0xD0
1262#define XgTxIpSrcErrPkt_WIDTH 16
1263#define XgDmaDone_offset 0xD4
1264
1265#define FALCON_STATS_NOT_DONE 0x00000000
1266#define FALCON_STATS_DONE 0xffffffff
1267
1268/* Interrupt status register bits */
1269#define FATAL_INT_LBN 64
1270#define FATAL_INT_WIDTH 1
1271#define INT_EVQS_LBN 40
1272#define INT_EVQS_WIDTH 4
1273
1274/**************************************************************************
1275 *
1276 * Falcon non-volatile configuration
1277 *
1278 **************************************************************************
1279 */
1280
1281/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1282struct falcon_nvconfig_board_v2 {
1283 __le16 nports;
1284 u8 port0_phy_addr;
1285 u8 port0_phy_type;
1286 u8 port1_phy_addr;
1287 u8 port1_phy_type;
1288 __le16 asic_sub_revision;
1289 __le16 board_revision;
1290} __packed;
1291
1292/* Board configuration v3 extra information */
1293struct falcon_nvconfig_board_v3 {
1294 __le32 spi_device_type[2];
1295} __packed;
1296
1297/* Bit numbers for spi_device_type */
1298#define SPI_DEV_TYPE_SIZE_LBN 0
1299#define SPI_DEV_TYPE_SIZE_WIDTH 5
1300#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
1301#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
1302#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
1303#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
1304#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
1305#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
1306#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
1307#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
1308#define SPI_DEV_TYPE_FIELD(type, field) \
1309 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1310
1311#define NVCONFIG_OFFSET 0x300
1312
1313#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1314struct falcon_nvconfig {
1315 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1316 u8 mac_address[2][8]; /* 0x310 */
1317 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1318 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1319 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1320 efx_oword_t hw_init_reg; /* 0x350 */
1321 efx_oword_t nic_stat_reg; /* 0x360 */
1322 efx_oword_t glb_ctl_reg; /* 0x370 */
1323 efx_oword_t srm_cfg_reg; /* 0x380 */
1324 efx_oword_t spare_reg; /* 0x390 */
1325 __le16 board_magic_num; /* 0x3A0 */
1326 __le16 board_struct_ver;
1327 __le16 board_checksum;
1328 struct falcon_nvconfig_board_v2 board_v2;
1329 efx_oword_t ee_base_page_reg; /* 0x3B0 */
1330 struct falcon_nvconfig_board_v3 board_v3;
1331} __packed;
1332
1333#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
deleted file mode 100644
index 8883092dae97..000000000000
--- a/drivers/net/sfc/falcon_io.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * Falcon hardware access
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
26 * registers) atomic writes which necessitates locking.
27 * Under normal operation few writes to the Falcon BAR are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the Falcon BIU
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49/* Special buffer descriptors (Falcon SRAM) */
50#define BUF_TBL_KER_A1 0x18000
51#define BUF_TBL_KER_B0 0x800000
52
53
54#if BITS_PER_LONG == 64
55#define FALCON_USE_QWORD_IO 1
56#endif
57
58#ifdef FALCON_USE_QWORD_IO
59static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
60 unsigned int reg)
61{
62 __raw_writeq((__force u64)value, efx->membase + reg);
63}
64static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
65{
66 return (__force __le64)__raw_readq(efx->membase + reg);
67}
68#endif
69
70static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
71 unsigned int reg)
72{
73 __raw_writel((__force u32)value, efx->membase + reg);
74}
75static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
76{
77 return (__force __le32)__raw_readl(efx->membase + reg);
78}
79
80/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
81static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
82 unsigned int reg)
83{
84 unsigned long flags;
85
86 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
87 EFX_OWORD_VAL(*value));
88
89 spin_lock_irqsave(&efx->biu_lock, flags);
90#ifdef FALCON_USE_QWORD_IO
91 _falcon_writeq(efx, value->u64[0], reg + 0);
92 wmb();
93 _falcon_writeq(efx, value->u64[1], reg + 8);
94#else
95 _falcon_writel(efx, value->u32[0], reg + 0);
96 _falcon_writel(efx, value->u32[1], reg + 4);
97 _falcon_writel(efx, value->u32[2], reg + 8);
98 wmb();
99 _falcon_writel(efx, value->u32[3], reg + 12);
100#endif
101 mmiowb();
102 spin_unlock_irqrestore(&efx->biu_lock, flags);
103}
104
105/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
106static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
107 unsigned int index)
108{
109 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
110 unsigned long flags;
111
112 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
113 reg, EFX_QWORD_VAL(*value));
114
115 spin_lock_irqsave(&efx->biu_lock, flags);
116#ifdef FALCON_USE_QWORD_IO
117 _falcon_writeq(efx, value->u64[0], reg + 0);
118#else
119 _falcon_writel(efx, value->u32[0], reg + 0);
120 wmb();
121 _falcon_writel(efx, value->u32[1], reg + 4);
122#endif
123 mmiowb();
124 spin_unlock_irqrestore(&efx->biu_lock, flags);
125}
126
127/* Write dword to Falcon register that allows partial writes
128 *
129 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
130 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
131 * for lockless writes.
132 */
133static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
134 unsigned int reg)
135{
136 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
137 reg, EFX_DWORD_VAL(*value));
138
139 /* No lock required */
140 _falcon_writel(efx, value->u32[0], reg);
141}
142
143/* Read from a Falcon register
144 *
145 * This reads an entire 16-byte Falcon register in one go, locking as
146 * appropriate. It is essential to read the first dword first, as this
147 * prompts Falcon to load the current value into the shadow register.
148 */
149static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
150 unsigned int reg)
151{
152 unsigned long flags;
153
154 spin_lock_irqsave(&efx->biu_lock, flags);
155 value->u32[0] = _falcon_readl(efx, reg + 0);
156 rmb();
157 value->u32[1] = _falcon_readl(efx, reg + 4);
158 value->u32[2] = _falcon_readl(efx, reg + 8);
159 value->u32[3] = _falcon_readl(efx, reg + 12);
160 spin_unlock_irqrestore(&efx->biu_lock, flags);
161
162 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
163 EFX_OWORD_VAL(*value));
164}
165
166/* This reads an 8-byte Falcon SRAM entry in one go. */
167static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
168 unsigned int index)
169{
170 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
171 unsigned long flags;
172
173 spin_lock_irqsave(&efx->biu_lock, flags);
174#ifdef FALCON_USE_QWORD_IO
175 value->u64[0] = _falcon_readq(efx, reg + 0);
176#else
177 value->u32[0] = _falcon_readl(efx, reg + 0);
178 rmb();
179 value->u32[1] = _falcon_readl(efx, reg + 4);
180#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags);
182
183 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
184 reg, EFX_QWORD_VAL(*value));
185}
186
187/* Read dword from Falcon register that allows partial writes (sic) */
188static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
189 unsigned int reg)
190{
191 value->u32[0] = _falcon_readl(efx, reg);
192 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
193 reg, EFX_DWORD_VAL(*value));
194}
195
196/* Write to a register forming part of a table */
197static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
198 unsigned int reg, unsigned int index)
199{
200 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
201}
202
203/* Read to a register forming part of a table */
204static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
205 unsigned int reg, unsigned int index)
206{
207 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
208}
209
210/* Write to a dword register forming part of a table */
211static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
212 unsigned int reg, unsigned int index)
213{
214 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
215}
216
217/* Page-mapped register block size */
218#define FALCON_PAGE_BLOCK_SIZE 0x2000
219
220/* Calculate offset to page-mapped register block */
221#define FALCON_PAGED_REG(page, reg) \
222 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
223
224/* As for falcon_write(), but for a page-mapped register. */
225static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
226 unsigned int reg, unsigned int page)
227{
228 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
229}
230
231/* As for falcon_writel(), but for a page-mapped register. */
232static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
233 unsigned int reg, unsigned int page)
234{
235 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
236}
237
238/* Write dword to Falcon page-mapped register with an extra lock.
239 *
240 * As for falcon_writel_page(), but for a register that suffers from
241 * SFC bug 3181. If writing to page 0, take out a lock so the BIU
242 * collector cannot be confused.
243 */
244static inline void falcon_writel_page_locked(struct efx_nic *efx,
245 efx_dword_t *value,
246 unsigned int reg,
247 unsigned int page)
248{
249 unsigned long flags = 0;
250
251 if (page == 0)
252 spin_lock_irqsave(&efx->biu_lock, flags);
253 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
254 if (page == 0)
255 spin_unlock_irqrestore(&efx->biu_lock, flags);
256}
257
258#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bec52ca37eee..1523efdcefe6 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -12,12 +12,11 @@
12#include "net_driver.h" 12#include "net_driver.h"
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "falcon.h"
15#include "falcon_hwdefs.h" 15#include "regs.h"
16#include "falcon_io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h" 19#include "phy.h"
20#include "boards.h"
21#include "workarounds.h" 20#include "workarounds.h"
22 21
23/************************************************************************** 22/**************************************************************************
@@ -36,43 +35,47 @@ static void falcon_setup_xaui(struct efx_nic *efx)
36 if (efx->phy_type == PHY_TYPE_NONE) 35 if (efx->phy_type == PHY_TYPE_NONE)
37 return; 36 return;
38 37
39 falcon_read(efx, &sdctl, XX_SD_CTL_REG); 38 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
40 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); 39 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
41 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); 40 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
42 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); 41 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
43 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); 42 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
44 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); 43 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
45 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); 44 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
46 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); 45 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
47 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); 46 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
48 falcon_write(efx, &sdctl, XX_SD_CTL_REG); 47 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
49 48
50 EFX_POPULATE_OWORD_8(txdrv, 49 EFX_POPULATE_OWORD_8(txdrv,
51 XX_DEQD, XX_TXDRV_DEQ_DEFAULT, 50 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
52 XX_DEQC, XX_TXDRV_DEQ_DEFAULT, 51 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
53 XX_DEQB, XX_TXDRV_DEQ_DEFAULT, 52 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
54 XX_DEQA, XX_TXDRV_DEQ_DEFAULT, 53 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
55 XX_DTXD, XX_TXDRV_DTX_DEFAULT, 54 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
56 XX_DTXC, XX_TXDRV_DTX_DEFAULT, 55 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
57 XX_DTXB, XX_TXDRV_DTX_DEFAULT, 56 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
58 XX_DTXA, XX_TXDRV_DTX_DEFAULT); 57 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
59 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); 58 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
60} 59}
61 60
62int falcon_reset_xaui(struct efx_nic *efx) 61int falcon_reset_xaui(struct efx_nic *efx)
63{ 62{
63 struct falcon_nic_data *nic_data = efx->nic_data;
64 efx_oword_t reg; 64 efx_oword_t reg;
65 int count; 65 int count;
66 66
67 /* Don't fetch MAC statistics over an XMAC reset */
68 WARN_ON(nic_data->stats_disable_count == 0);
69
67 /* Start reset sequence */ 70 /* Start reset sequence */
68 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); 71 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
69 falcon_write(efx, &reg, XX_PWR_RST_REG); 72 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
70 73
71 /* Wait up to 10 ms for completion, then reinitialise */ 74 /* Wait up to 10 ms for completion, then reinitialise */
72 for (count = 0; count < 1000; count++) { 75 for (count = 0; count < 1000; count++) {
73 falcon_read(efx, &reg, XX_PWR_RST_REG); 76 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
74 if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && 77 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
75 EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { 78 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
76 falcon_setup_xaui(efx); 79 falcon_setup_xaui(efx);
77 return 0; 80 return 0;
78 } 81 }
@@ -86,26 +89,26 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
86{ 89{
87 efx_oword_t reg; 90 efx_oword_t reg;
88 91
89 if ((falcon_rev(efx) != FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
90 return; 93 return;
91 94
92 /* We expect xgmii faults if the wireside link is up */ 95 /* We expect xgmii faults if the wireside link is up */
93 if (!EFX_WORKAROUND_5147(efx) || !efx->link_up) 96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
94 return; 97 return;
95 98
96 /* We can only use this interrupt to signal the negative edge of 99 /* We can only use this interrupt to signal the negative edge of
97 * xaui_align [we have to poll the positive edge]. */ 100 * xaui_align [we have to poll the positive edge]. */
98 if (!efx->mac_up) 101 if (efx->xmac_poll_required)
99 return; 102 return;
100 103
101 /* Flush the ISR */ 104 /* Flush the ISR */
102 if (enable) 105 if (enable)
103 falcon_read(efx, &reg, XM_MGT_INT_REG_B0); 106 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
104 107
105 EFX_POPULATE_OWORD_2(reg, 108 EFX_POPULATE_OWORD_2(reg,
106 XM_MSK_RMTFLT, !enable, 109 FRF_AB_XM_MSK_RMTFLT, !enable,
107 XM_MSK_LCLFLT, !enable); 110 FRF_AB_XM_MSK_LCLFLT, !enable);
108 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0); 111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
109} 112}
110 113
111/* Get status of XAUI link */ 114/* Get status of XAUI link */
@@ -119,21 +122,21 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
119 return true; 122 return true;
120 123
121 /* Read link status */ 124 /* Read link status */
122 falcon_read(efx, &reg, XX_CORE_STAT_REG); 125 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
123 126
124 align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE); 127 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
125 sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT); 128 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
126 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) 129 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
127 link_ok = true; 130 link_ok = true;
128 131
129 /* Clear link status ready for next read */ 132 /* Clear link status ready for next read */
130 EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); 133 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
131 EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); 134 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
132 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 135 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
133 falcon_write(efx, &reg, XX_CORE_STAT_REG); 136 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
134 137
135 /* If the link is up, then check the phy side of the xaui link */ 138 /* If the link is up, then check the phy side of the xaui link */
136 if (efx->link_up && link_ok) 139 if (efx->link_state.up && link_ok)
137 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS)) 140 if (efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS))
138 link_ok = efx_mdio_phyxgxs_lane_sync(efx); 141 link_ok = efx_mdio_phyxgxs_lane_sync(efx);
139 142
@@ -144,59 +147,53 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
144{ 147{
145 unsigned int max_frame_len; 148 unsigned int max_frame_len;
146 efx_oword_t reg; 149 efx_oword_t reg;
147 bool rx_fc = !!(efx->link_fc & EFX_FC_RX); 150 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
148 151
149 /* Configure MAC - cut-thru mode is hard wired on */ 152 /* Configure MAC - cut-thru mode is hard wired on */
150 EFX_POPULATE_DWORD_3(reg, 153 EFX_POPULATE_OWORD_3(reg,
151 XM_RX_JUMBO_MODE, 1, 154 FRF_AB_XM_RX_JUMBO_MODE, 1,
152 XM_TX_STAT_EN, 1, 155 FRF_AB_XM_TX_STAT_EN, 1,
153 XM_RX_STAT_EN, 1); 156 FRF_AB_XM_RX_STAT_EN, 1);
154 falcon_write(efx, &reg, XM_GLB_CFG_REG); 157 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
155 158
156 /* Configure TX */ 159 /* Configure TX */
157 EFX_POPULATE_DWORD_6(reg, 160 EFX_POPULATE_OWORD_6(reg,
158 XM_TXEN, 1, 161 FRF_AB_XM_TXEN, 1,
159 XM_TX_PRMBL, 1, 162 FRF_AB_XM_TX_PRMBL, 1,
160 XM_AUTO_PAD, 1, 163 FRF_AB_XM_AUTO_PAD, 1,
161 XM_TXCRC, 1, 164 FRF_AB_XM_TXCRC, 1,
162 XM_FCNTL, 1, 165 FRF_AB_XM_FCNTL, 1,
163 XM_IPG, 0x3); 166 FRF_AB_XM_IPG, 0x3);
164 falcon_write(efx, &reg, XM_TX_CFG_REG); 167 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
165 168
166 /* Configure RX */ 169 /* Configure RX */
167 EFX_POPULATE_DWORD_5(reg, 170 EFX_POPULATE_OWORD_5(reg,
168 XM_RXEN, 1, 171 FRF_AB_XM_RXEN, 1,
169 XM_AUTO_DEPAD, 0, 172 FRF_AB_XM_AUTO_DEPAD, 0,
170 XM_ACPT_ALL_MCAST, 1, 173 FRF_AB_XM_ACPT_ALL_MCAST, 1,
171 XM_ACPT_ALL_UCAST, efx->promiscuous, 174 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
172 XM_PASS_CRC_ERR, 1); 175 FRF_AB_XM_PASS_CRC_ERR, 1);
173 falcon_write(efx, &reg, XM_RX_CFG_REG); 176 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
174 177
175 /* Set frame length */ 178 /* Set frame length */
176 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 179 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
177 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); 180 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
178 falcon_write(efx, &reg, XM_RX_PARAM_REG); 181 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
179 EFX_POPULATE_DWORD_2(reg, 182 EFX_POPULATE_OWORD_2(reg,
180 XM_MAX_TX_FRM_SIZE, max_frame_len, 183 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
181 XM_TX_JUMBO_MODE, 1); 184 FRF_AB_XM_TX_JUMBO_MODE, 1);
182 falcon_write(efx, &reg, XM_TX_PARAM_REG); 185 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
183 186
184 EFX_POPULATE_DWORD_2(reg, 187 EFX_POPULATE_OWORD_2(reg,
185 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 188 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
186 XM_DIS_FCNTL, !rx_fc); 189 FRF_AB_XM_DIS_FCNTL, !rx_fc);
187 falcon_write(efx, &reg, XM_FC_REG); 190 efx_writeo(efx, &reg, FR_AB_XM_FC);
188 191
189 /* Set MAC address */ 192 /* Set MAC address */
190 EFX_POPULATE_DWORD_4(reg, 193 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
191 XM_ADR_0, efx->net_dev->dev_addr[0], 194 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
192 XM_ADR_1, efx->net_dev->dev_addr[1], 195 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
193 XM_ADR_2, efx->net_dev->dev_addr[2], 196 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
194 XM_ADR_3, efx->net_dev->dev_addr[3]);
195 falcon_write(efx, &reg, XM_ADR_LO_REG);
196 EFX_POPULATE_DWORD_2(reg,
197 XM_ADR_4, efx->net_dev->dev_addr[4],
198 XM_ADR_5, efx->net_dev->dev_addr[5]);
199 falcon_write(efx, &reg, XM_ADR_HI_REG);
200} 197}
201 198
202static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 199static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
@@ -212,12 +209,13 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
212 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 209 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
213 bool reset_xgxs; 210 bool reset_xgxs;
214 211
215 falcon_read(efx, &reg, XX_CORE_STAT_REG); 212 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
216 old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN); 213 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
217 old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN); 214 old_xgmii_loopback =
215 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
218 216
219 falcon_read(efx, &reg, XX_SD_CTL_REG); 217 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
220 old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA); 218 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
221 219
222 /* The PHY driver may have turned XAUI off */ 220 /* The PHY driver may have turned XAUI off */
223 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 221 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
@@ -228,42 +226,52 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
228 falcon_reset_xaui(efx); 226 falcon_reset_xaui(efx);
229 } 227 }
230 228
231 falcon_read(efx, &reg, XX_CORE_STAT_REG); 229 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
232 EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG, 230 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
233 (xgxs_loopback || xaui_loopback) ? 231 (xgxs_loopback || xaui_loopback) ?
234 XX_FORCE_SIG_DECODE_FORCED : 0); 232 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
235 EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 233 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
236 EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 234 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
237 falcon_write(efx, &reg, XX_CORE_STAT_REG); 235 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
238 236
239 falcon_read(efx, &reg, XX_SD_CTL_REG); 237 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
240 EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 238 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
241 EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 239 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
242 EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 240 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
243 EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 241 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
244 falcon_write(efx, &reg, XX_SD_CTL_REG); 242 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
245} 243}
246 244
247 245
248/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails 246/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
249 * to come back up. Bash it until it comes back up */ 247static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
250static void falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
251{ 248{
252 efx->mac_up = falcon_xaui_link_ok(efx); 249 bool mac_up = falcon_xaui_link_ok(efx);
253 250
254 if ((efx->loopback_mode == LOOPBACK_NETWORK) || 251 if ((efx->loopback_mode == LOOPBACK_NETWORK) ||
255 efx_phy_mode_disabled(efx->phy_mode)) 252 efx_phy_mode_disabled(efx->phy_mode))
256 /* XAUI link is expected to be down */ 253 /* XAUI link is expected to be down */
257 return; 254 return mac_up;
255
256 falcon_stop_nic_stats(efx);
258 257
259 while (!efx->mac_up && tries) { 258 while (!mac_up && tries) {
260 EFX_LOG(efx, "bashing xaui\n"); 259 EFX_LOG(efx, "bashing xaui\n");
261 falcon_reset_xaui(efx); 260 falcon_reset_xaui(efx);
262 udelay(200); 261 udelay(200);
263 262
264 efx->mac_up = falcon_xaui_link_ok(efx); 263 mac_up = falcon_xaui_link_ok(efx);
265 --tries; 264 --tries;
266 } 265 }
266
267 falcon_start_nic_stats(efx);
268
269 return mac_up;
270}
271
272static bool falcon_xmac_check_fault(struct efx_nic *efx)
273{
274 return !falcon_check_xaui_link_up(efx, 5);
267} 275}
268 276
269static void falcon_reconfigure_xmac(struct efx_nic *efx) 277static void falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -275,18 +283,13 @@ static void falcon_reconfigure_xmac(struct efx_nic *efx)
275 283
276 falcon_reconfigure_mac_wrapper(efx); 284 falcon_reconfigure_mac_wrapper(efx);
277 285
278 falcon_check_xaui_link_up(efx, 5); 286 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
279 falcon_mask_status_intr(efx, true); 287 falcon_mask_status_intr(efx, true);
280} 288}
281 289
282static void falcon_update_stats_xmac(struct efx_nic *efx) 290static void falcon_update_stats_xmac(struct efx_nic *efx)
283{ 291{
284 struct efx_mac_stats *mac_stats = &efx->mac_stats; 292 struct efx_mac_stats *mac_stats = &efx->mac_stats;
285 int rc;
286
287 rc = falcon_dma_stats(efx, XgDmaDone_offset);
288 if (rc)
289 return;
290 293
291 /* Update MAC stats from DMAed values */ 294 /* Update MAC stats from DMAed values */
292 FALCON_STAT(efx, XgRxOctets, rx_bytes); 295 FALCON_STAT(efx, XgRxOctets, rx_bytes);
@@ -344,35 +347,19 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
344 mac_stats->rx_control * 64); 347 mac_stats->rx_control * 64);
345} 348}
346 349
347static void falcon_xmac_irq(struct efx_nic *efx) 350void falcon_poll_xmac(struct efx_nic *efx)
348{
349 /* The XGMII link has a transient fault, which indicates either:
350 * - there's a transient xgmii fault
351 * - falcon's end of the xaui link may need a kick
352 * - the wire-side link may have gone down, but the lasi/poll()
353 * hasn't noticed yet.
354 *
355 * We only want to even bother polling XAUI if we're confident it's
356 * not (1) or (3). In both cases, the only reliable way to spot this
357 * is to wait a bit. We do this here by forcing the mac link state
358 * to down, and waiting for the mac poll to come round and check
359 */
360 efx->mac_up = false;
361}
362
363static void falcon_poll_xmac(struct efx_nic *efx)
364{ 351{
365 if (!EFX_WORKAROUND_5147(efx) || !efx->link_up || efx->mac_up) 352 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
353 !efx->xmac_poll_required)
366 return; 354 return;
367 355
368 falcon_mask_status_intr(efx, false); 356 falcon_mask_status_intr(efx, false);
369 falcon_check_xaui_link_up(efx, 1); 357 efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
370 falcon_mask_status_intr(efx, true); 358 falcon_mask_status_intr(efx, true);
371} 359}
372 360
373struct efx_mac_operations falcon_xmac_operations = { 361struct efx_mac_operations falcon_xmac_operations = {
374 .reconfigure = falcon_reconfigure_xmac, 362 .reconfigure = falcon_reconfigure_xmac,
375 .update_stats = falcon_update_stats_xmac, 363 .update_stats = falcon_update_stats_xmac,
376 .irq = falcon_xmac_irq, 364 .check_fault = falcon_xmac_check_fault,
377 .poll = falcon_poll_xmac,
378}; 365};
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
deleted file mode 100644
index dfccaa7b573e..000000000000
--- a/drivers/net/sfc/gmii.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
new file mode 100644
index 000000000000..b89177c27f4a
--- /dev/null
+++ b/drivers/net/sfc/io.h
@@ -0,0 +1,256 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_IO_H
12#define EFX_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * NIC register I/O
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
26 * which necessitates locking.
27 * Under normal operation few writes to NIC registers are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the BIU.
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49#if BITS_PER_LONG == 64
50#define EFX_USE_QWORD_IO 1
51#endif
52
53#ifdef EFX_USE_QWORD_IO
54static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
55 unsigned int reg)
56{
57 __raw_writeq((__force u64)value, efx->membase + reg);
58}
59static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
60{
61 return (__force __le64)__raw_readq(efx->membase + reg);
62}
63#endif
64
65static inline void _efx_writed(struct efx_nic *efx, __le32 value,
66 unsigned int reg)
67{
68 __raw_writel((__force u32)value, efx->membase + reg);
69}
70static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
71{
72 return (__force __le32)__raw_readl(efx->membase + reg);
73}
74
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg)
78{
79 unsigned long flags __attribute__ ((unused));
80
81 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
82 EFX_OWORD_VAL(*value));
83
84 spin_lock_irqsave(&efx->biu_lock, flags);
85#ifdef EFX_USE_QWORD_IO
86 _efx_writeq(efx, value->u64[0], reg + 0);
87 wmb();
88 _efx_writeq(efx, value->u64[1], reg + 8);
89#else
90 _efx_writed(efx, value->u32[0], reg + 0);
91 _efx_writed(efx, value->u32[1], reg + 4);
92 _efx_writed(efx, value->u32[2], reg + 8);
93 wmb();
94 _efx_writed(efx, value->u32[3], reg + 12);
95#endif
96 mmiowb();
97 spin_unlock_irqrestore(&efx->biu_lock, flags);
98}
99
100/* Write an 8-byte NIC SRAM entry through the supplied mapping,
101 * locking as appropriate. */
102static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
103 efx_qword_t *value, unsigned int index)
104{
105 unsigned int addr = index * sizeof(*value);
106 unsigned long flags __attribute__ ((unused));
107
108 EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
109 addr, EFX_QWORD_VAL(*value));
110
111 spin_lock_irqsave(&efx->biu_lock, flags);
112#ifdef EFX_USE_QWORD_IO
113 __raw_writeq((__force u64)value->u64[0], membase + addr);
114#else
115 __raw_writel((__force u32)value->u32[0], membase + addr);
116 wmb();
117 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
118#endif
119 mmiowb();
120 spin_unlock_irqrestore(&efx->biu_lock, flags);
121}
122
123/* Write dword to NIC register that allows partial writes
124 *
125 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
126 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
127 * for lockless writes.
128 */
129static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
130 unsigned int reg)
131{
132 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
133 reg, EFX_DWORD_VAL(*value));
134
135 /* No lock required */
136 _efx_writed(efx, value->u32[0], reg);
137}
138
139/* Read from a NIC register
140 *
141 * This reads an entire 16-byte register in one go, locking as
142 * appropriate. It is essential to read the first dword first, as this
143 * prompts the NIC to load the current value into the shadow register.
144 */
145static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
146 unsigned int reg)
147{
148 unsigned long flags __attribute__ ((unused));
149
150 spin_lock_irqsave(&efx->biu_lock, flags);
151 value->u32[0] = _efx_readd(efx, reg + 0);
152 rmb();
153 value->u32[1] = _efx_readd(efx, reg + 4);
154 value->u32[2] = _efx_readd(efx, reg + 8);
155 value->u32[3] = _efx_readd(efx, reg + 12);
156 spin_unlock_irqrestore(&efx->biu_lock, flags);
157
158 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
159 EFX_OWORD_VAL(*value));
160}
161
162/* Read an 8-byte SRAM entry through supplied mapping,
163 * locking as appropriate. */
164static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
165 efx_qword_t *value, unsigned int index)
166{
167 unsigned int addr = index * sizeof(*value);
168 unsigned long flags __attribute__ ((unused));
169
170 spin_lock_irqsave(&efx->biu_lock, flags);
171#ifdef EFX_USE_QWORD_IO
172 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
173#else
174 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
175 rmb();
176 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
177#endif
178 spin_unlock_irqrestore(&efx->biu_lock, flags);
179
180 EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
181 addr, EFX_QWORD_VAL(*value));
182}
183
184/* Read dword from register that allows partial writes (sic) */
185static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
186 unsigned int reg)
187{
188 value->u32[0] = _efx_readd(efx, reg);
189 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
190 reg, EFX_DWORD_VAL(*value));
191}
192
193/* Write to a register forming part of a table */
194static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
195 unsigned int reg, unsigned int index)
196{
197 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
198}
199
200/* Read to a register forming part of a table */
201static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
202 unsigned int reg, unsigned int index)
203{
204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
205}
206
207/* Write to a dword register forming part of a table */
208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
209 unsigned int reg, unsigned int index)
210{
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212}
213
214/* Page-mapped register block size */
215#define EFX_PAGE_BLOCK_SIZE 0x2000
216
217/* Calculate offset to page-mapped register block */
218#define EFX_PAGED_REG(page, reg) \
219 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
220
221/* As for efx_writeo(), but for a page-mapped register. */
222static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
223 unsigned int reg, unsigned int page)
224{
225 efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
226}
227
228/* As for efx_writed(), but for a page-mapped register. */
229static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
230 unsigned int reg, unsigned int page)
231{
232 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
233}
234
235/* Write dword to page-mapped register with an extra lock.
236 *
237 * As for efx_writed_page(), but for a register that suffers from
238 * SFC bug 3181. Take out a lock so the BIU collector cannot be
239 * confused. */
240static inline void efx_writed_page_locked(struct efx_nic *efx,
241 efx_dword_t *value,
242 unsigned int reg,
243 unsigned int page)
244{
245 unsigned long flags __attribute__ ((unused));
246
247 if (page == 0) {
248 spin_lock_irqsave(&efx->biu_lock, flags);
249 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
250 spin_unlock_irqrestore(&efx->biu_lock, flags);
251 } else {
252 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
253 }
254}
255
256#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 6c33459f9ea9..231e580acc9a 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "boards.h"
18#include "workarounds.h" 17#include "workarounds.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
@@ -249,7 +248,7 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
249int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 248int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
250{ 249{
251 struct ethtool_cmd prev; 250 struct ethtool_cmd prev;
252 u32 required; 251 bool xnp;
253 int reg; 252 int reg;
254 253
255 efx->phy_op->get_settings(efx, &prev); 254 efx->phy_op->get_settings(efx, &prev);
@@ -266,86 +265,60 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
266 return -EINVAL; 265 return -EINVAL;
267 266
268 /* Check that PHY supports these settings */ 267 /* Check that PHY supports these settings */
269 if (ecmd->autoneg) { 268 if (!ecmd->autoneg ||
270 required = SUPPORTED_Autoneg; 269 (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
271 } else if (ecmd->duplex) {
272 switch (ecmd->speed) {
273 case SPEED_10: required = SUPPORTED_10baseT_Full; break;
274 case SPEED_100: required = SUPPORTED_100baseT_Full; break;
275 default: return -EINVAL;
276 }
277 } else {
278 switch (ecmd->speed) {
279 case SPEED_10: required = SUPPORTED_10baseT_Half; break;
280 case SPEED_100: required = SUPPORTED_100baseT_Half; break;
281 default: return -EINVAL;
282 }
283 }
284 required |= ecmd->advertising;
285 if (required & ~prev.supported)
286 return -EINVAL; 270 return -EINVAL;
287 271
288 if (ecmd->autoneg) { 272 xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
289 bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full 273 || EFX_WORKAROUND_13204(efx));
290 || EFX_WORKAROUND_13204(efx)); 274
291 275 /* Set up the base page */
292 /* Set up the base page */ 276 reg = ADVERTISE_CSMA;
293 reg = ADVERTISE_CSMA; 277 if (ecmd->advertising & ADVERTISED_10baseT_Half)
294 if (ecmd->advertising & ADVERTISED_10baseT_Half) 278 reg |= ADVERTISE_10HALF;
295 reg |= ADVERTISE_10HALF; 279 if (ecmd->advertising & ADVERTISED_10baseT_Full)
296 if (ecmd->advertising & ADVERTISED_10baseT_Full) 280 reg |= ADVERTISE_10FULL;
297 reg |= ADVERTISE_10FULL; 281 if (ecmd->advertising & ADVERTISED_100baseT_Half)
298 if (ecmd->advertising & ADVERTISED_100baseT_Half) 282 reg |= ADVERTISE_100HALF;
299 reg |= ADVERTISE_100HALF; 283 if (ecmd->advertising & ADVERTISED_100baseT_Full)
300 if (ecmd->advertising & ADVERTISED_100baseT_Full) 284 reg |= ADVERTISE_100FULL;
301 reg |= ADVERTISE_100FULL; 285 if (xnp)
302 if (xnp) 286 reg |= ADVERTISE_RESV;
303 reg |= ADVERTISE_RESV; 287 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
304 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | 288 ADVERTISED_1000baseT_Full))
305 ADVERTISED_1000baseT_Full)) 289 reg |= ADVERTISE_NPAGE;
306 reg |= ADVERTISE_NPAGE; 290 reg |= mii_advertise_flowctrl(efx->wanted_fc);
307 reg |= mii_advertise_flowctrl(efx->wanted_fc); 291 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
308 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); 292
309 293 /* Set up the (extended) next page if necessary */
310 /* Set up the (extended) next page if necessary */ 294 if (efx->phy_op->set_npage_adv)
311 if (efx->phy_op->set_npage_adv) 295 efx->phy_op->set_npage_adv(efx, ecmd->advertising);
312 efx->phy_op->set_npage_adv(efx, ecmd->advertising); 296
313 297 /* Enable and restart AN */
314 /* Enable and restart AN */ 298 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
315 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); 299 reg |= MDIO_AN_CTRL1_ENABLE;
316 reg |= MDIO_AN_CTRL1_ENABLE; 300 if (!(EFX_WORKAROUND_15195(efx) &&
317 if (!(EFX_WORKAROUND_15195(efx) && 301 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
318 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) 302 reg |= MDIO_AN_CTRL1_RESTART;
319 reg |= MDIO_AN_CTRL1_RESTART; 303 if (xnp)
320 if (xnp) 304 reg |= MDIO_AN_CTRL1_XNP;
321 reg |= MDIO_AN_CTRL1_XNP; 305 else
322 else 306 reg &= ~MDIO_AN_CTRL1_XNP;
323 reg &= ~MDIO_AN_CTRL1_XNP; 307 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
324 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
325 } else {
326 /* Disable AN */
327 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
328 MDIO_AN_CTRL1_ENABLE, false);
329
330 /* Set the basic control bits */
331 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
332 reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
333 if (ecmd->speed == SPEED_100)
334 reg |= MDIO_PMA_CTRL1_SPEED100;
335 if (ecmd->duplex)
336 reg |= MDIO_CTRL1_FULLDPLX;
337 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
338 }
339 308
340 return 0; 309 return 0;
341} 310}
342 311
343enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) 312enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
344{ 313{
345 int lpa; 314 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
346 315
347 if (!(efx->phy_op->mmds & MDIO_DEVS_AN)) 316 if (!(efx->wanted_fc & EFX_FC_AUTO))
348 return efx->wanted_fc; 317 return efx->wanted_fc;
349 lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA); 318
350 return efx_fc_resolve(efx->wanted_fc, lpa); 319 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
320
321 return mii_resolve_flowctrl_fdx(
322 mii_advertise_flowctrl(efx->wanted_fc),
323 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
351} 324}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 6b14421a7444..75b37f101231 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include "efx.h" 19#include "efx.h"
20#include "boards.h"
21 20
22static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } 21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
23static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } 22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 298566da638b..fddf8f5870ce 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -113,6 +113,13 @@ struct efx_special_buffer {
113 int entries; 113 int entries;
114}; 114};
115 115
116enum efx_flush_state {
117 FLUSH_NONE,
118 FLUSH_PENDING,
119 FLUSH_FAILED,
120 FLUSH_DONE,
121};
122
116/** 123/**
117 * struct efx_tx_buffer - An Efx TX buffer 124 * struct efx_tx_buffer - An Efx TX buffer
118 * @skb: The associated socket buffer. 125 * @skb: The associated socket buffer.
@@ -189,7 +196,7 @@ struct efx_tx_queue {
189 struct efx_nic *nic; 196 struct efx_nic *nic;
190 struct efx_tx_buffer *buffer; 197 struct efx_tx_buffer *buffer;
191 struct efx_special_buffer txd; 198 struct efx_special_buffer txd;
192 bool flushed; 199 enum efx_flush_state flushed;
193 200
194 /* Members used mainly on the completion path */ 201 /* Members used mainly on the completion path */
195 unsigned int read_count ____cacheline_aligned_in_smp; 202 unsigned int read_count ____cacheline_aligned_in_smp;
@@ -284,7 +291,7 @@ struct efx_rx_queue {
284 struct page *buf_page; 291 struct page *buf_page;
285 dma_addr_t buf_dma_addr; 292 dma_addr_t buf_dma_addr;
286 char *buf_data; 293 char *buf_data;
287 bool flushed; 294 enum efx_flush_state flushed;
288}; 295};
289 296
290/** 297/**
@@ -327,7 +334,7 @@ enum efx_rx_alloc_method {
327 * @used_flags: Channel is used by net driver 334 * @used_flags: Channel is used by net driver
328 * @enabled: Channel enabled indicator 335 * @enabled: Channel enabled indicator
329 * @irq: IRQ number (MSI and MSI-X only) 336 * @irq: IRQ number (MSI and MSI-X only)
330 * @irq_moderation: IRQ moderation value (in us) 337 * @irq_moderation: IRQ moderation value (in hardware ticks)
331 * @napi_dev: Net device used with NAPI 338 * @napi_dev: Net device used with NAPI
332 * @napi_str: NAPI control structure 339 * @napi_str: NAPI control structure
333 * @reset_work: Scheduled reset work thread 340 * @reset_work: Scheduled reset work thread
@@ -343,9 +350,9 @@ enum efx_rx_alloc_method {
343 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 350 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
344 * descriptors 351 * descriptors
345 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 352 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
346 * @n_rx_ip_frag_err: Count of RX IP fragment errors
347 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 353 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
348 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 354 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
355 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
349 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 356 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
350 * @n_rx_overlength: Count of RX_OVERLENGTH errors 357 * @n_rx_overlength: Count of RX_OVERLENGTH errors
351 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 358 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
@@ -373,9 +380,9 @@ struct efx_channel {
373 int rx_alloc_push_pages; 380 int rx_alloc_push_pages;
374 381
375 unsigned n_rx_tobe_disc; 382 unsigned n_rx_tobe_disc;
376 unsigned n_rx_ip_frag_err;
377 unsigned n_rx_ip_hdr_chksum_err; 383 unsigned n_rx_ip_hdr_chksum_err;
378 unsigned n_rx_tcp_udp_chksum_err; 384 unsigned n_rx_tcp_udp_chksum_err;
385 unsigned n_rx_mcast_mismatch;
379 unsigned n_rx_frm_trunc; 386 unsigned n_rx_frm_trunc;
380 unsigned n_rx_overlength; 387 unsigned n_rx_overlength;
381 unsigned n_skbuff_leaks; 388 unsigned n_skbuff_leaks;
@@ -388,53 +395,29 @@ struct efx_channel {
388 395
389}; 396};
390 397
391/** 398enum efx_led_mode {
392 * struct efx_blinker - S/W LED blinking context 399 EFX_LED_OFF = 0,
393 * @state: Current state - on or off 400 EFX_LED_ON = 1,
394 * @resubmit: Timer resubmission flag 401 EFX_LED_DEFAULT = 2
395 * @timer: Control timer for blinking
396 */
397struct efx_blinker {
398 bool state;
399 bool resubmit;
400 struct timer_list timer;
401}; 402};
402 403
404#define STRING_TABLE_LOOKUP(val, member) \
405 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
403 406
404/** 407extern const char *efx_loopback_mode_names[];
405 * struct efx_board - board information 408extern const unsigned int efx_loopback_mode_max;
406 * @type: Board model type 409#define LOOPBACK_MODE(efx) \
407 * @major: Major rev. ('A', 'B' ...) 410 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
408 * @minor: Minor rev. (0, 1, ...) 411
409 * @init: Initialisation function 412extern const char *efx_interrupt_mode_names[];
410 * @init_leds: Sets up board LEDs. May be called repeatedly. 413extern const unsigned int efx_interrupt_mode_max;
411 * @set_id_led: Turns the identification LED on or off 414#define INT_MODE(efx) \
412 * @blink: Starts/stops blinking 415 STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
413 * @monitor: Board-specific health check function
414 * @fini: Cleanup function
415 * @blinker: used to blink LEDs in software
416 * @hwmon_client: I2C client for hardware monitor
417 * @ioexp_client: I2C client for power/port control
418 */
419struct efx_board {
420 int type;
421 int major;
422 int minor;
423 int (*init) (struct efx_nic *nic);
424 /* As the LEDs are typically attached to the PHY, LEDs
425 * have a separate init callback that happens later than
426 * board init. */
427 void (*init_leds)(struct efx_nic *efx);
428 void (*set_id_led) (struct efx_nic *efx, bool state);
429 int (*monitor) (struct efx_nic *nic);
430 void (*blink) (struct efx_nic *efx, bool start);
431 void (*fini) (struct efx_nic *nic);
432 struct efx_blinker blinker;
433 struct i2c_client *hwmon_client, *ioexp_client;
434};
435 416
436#define STRING_TABLE_LOOKUP(val, member) \ 417extern const char *efx_reset_type_names[];
437 member ## _names[val] 418extern const unsigned int efx_reset_type_max;
419#define RESET_TYPE(type) \
420 STRING_TABLE_LOOKUP(type, efx_reset_type)
438 421
439enum efx_int_mode { 422enum efx_int_mode {
440 /* Be careful if altering to correct macro below */ 423 /* Be careful if altering to correct macro below */
@@ -458,7 +441,7 @@ enum phy_type {
458 PHY_TYPE_MAX /* Insert any new items before this */ 441 PHY_TYPE_MAX /* Insert any new items before this */
459}; 442};
460 443
461#define EFX_IS10G(efx) ((efx)->link_speed == 10000) 444#define EFX_IS10G(efx) ((efx)->link_state.speed == 10000)
462 445
463enum nic_state { 446enum nic_state {
464 STATE_INIT = 0, 447 STATE_INIT = 0,
@@ -506,29 +489,37 @@ enum efx_mac_type {
506 EFX_XMAC = 2, 489 EFX_XMAC = 2,
507}; 490};
508 491
509static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc, 492/**
510 unsigned int lpa) 493 * struct efx_link_state - Current state of the link
511{ 494 * @up: Link is up
512 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX)); 495 * @fd: Link is full-duplex
513 496 * @fc: Actual flow control flags
514 if (!(wanted_fc & EFX_FC_AUTO)) 497 * @speed: Link speed (Mbps)
515 return wanted_fc; 498 */
499struct efx_link_state {
500 bool up;
501 bool fd;
502 enum efx_fc_type fc;
503 unsigned int speed;
504};
516 505
517 return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa); 506static inline bool efx_link_state_equal(const struct efx_link_state *left,
507 const struct efx_link_state *right)
508{
509 return left->up == right->up && left->fd == right->fd &&
510 left->fc == right->fc && left->speed == right->speed;
518} 511}
519 512
520/** 513/**
521 * struct efx_mac_operations - Efx MAC operations table 514 * struct efx_mac_operations - Efx MAC operations table
522 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock 515 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
523 * @update_stats: Update statistics 516 * @update_stats: Update statistics
524 * @irq: Hardware MAC event callback. Serialised by the mac_lock 517 * @check_fault: Check fault state. True if fault present.
525 * @poll: Poll for hardware state. Serialised by the mac_lock
526 */ 518 */
527struct efx_mac_operations { 519struct efx_mac_operations {
528 void (*reconfigure) (struct efx_nic *efx); 520 void (*reconfigure) (struct efx_nic *efx);
529 void (*update_stats) (struct efx_nic *efx); 521 void (*update_stats) (struct efx_nic *efx);
530 void (*irq) (struct efx_nic *efx); 522 bool (*check_fault)(struct efx_nic *efx);
531 void (*poll) (struct efx_nic *efx);
532}; 523};
533 524
534/** 525/**
@@ -536,9 +527,8 @@ struct efx_mac_operations {
536 * @init: Initialise PHY 527 * @init: Initialise PHY
537 * @fini: Shut down PHY 528 * @fini: Shut down PHY
538 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 529 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
539 * @clear_interrupt: Clear down interrupt 530 * @poll: Update @link_state and report whether it changed.
540 * @blink: Blink LEDs 531 * Serialised by the mac_lock.
541 * @poll: Poll for hardware state. Serialised by the mac_lock.
542 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 532 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
543 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 533 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
544 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 534 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
@@ -555,8 +545,7 @@ struct efx_phy_operations {
555 int (*init) (struct efx_nic *efx); 545 int (*init) (struct efx_nic *efx);
556 void (*fini) (struct efx_nic *efx); 546 void (*fini) (struct efx_nic *efx);
557 void (*reconfigure) (struct efx_nic *efx); 547 void (*reconfigure) (struct efx_nic *efx);
558 void (*clear_interrupt) (struct efx_nic *efx); 548 bool (*poll) (struct efx_nic *efx);
559 void (*poll) (struct efx_nic *efx);
560 void (*get_settings) (struct efx_nic *efx, 549 void (*get_settings) (struct efx_nic *efx,
561 struct ethtool_cmd *ecmd); 550 struct ethtool_cmd *ecmd);
562 int (*set_settings) (struct efx_nic *efx, 551 int (*set_settings) (struct efx_nic *efx,
@@ -690,17 +679,18 @@ union efx_multicast_hash {
690 * @interrupt_mode: Interrupt mode 679 * @interrupt_mode: Interrupt mode
691 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 680 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
692 * @irq_rx_moderation: IRQ moderation time for RX event queues 681 * @irq_rx_moderation: IRQ moderation time for RX event queues
693 * @i2c_adap: I2C adapter
694 * @board_info: Board-level information
695 * @state: Device state flag. Serialised by the rtnl_lock. 682 * @state: Device state flag. Serialised by the rtnl_lock.
696 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) 683 * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
697 * @tx_queue: TX DMA queues 684 * @tx_queue: TX DMA queues
698 * @rx_queue: RX DMA queues 685 * @rx_queue: RX DMA queues
699 * @channel: Channels 686 * @channel: Channels
687 * @next_buffer_table: First available buffer table id
700 * @n_rx_queues: Number of RX queues 688 * @n_rx_queues: Number of RX queues
701 * @n_channels: Number of channels in use 689 * @n_channels: Number of channels in use
702 * @rx_buffer_len: RX buffer length 690 * @rx_buffer_len: RX buffer length
703 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 691 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
692 * @int_error_count: Number of internal errors seen recently
693 * @int_error_expire: Time at which error count will be expired
704 * @irq_status: Interrupt status buffer 694 * @irq_status: Interrupt status buffer
705 * @last_irq_cpu: Last CPU to handle interrupt. 695 * @last_irq_cpu: Last CPU to handle interrupt.
706 * This register is written with the SMP processor ID whenever an 696 * This register is written with the SMP processor ID whenever an
@@ -716,10 +706,10 @@ union efx_multicast_hash {
716 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 706 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
717 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 707 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
718 * @port_enabled: Port enabled indicator. 708 * @port_enabled: Port enabled indicator.
719 * Serialises efx_stop_all(), efx_start_all(), efx_monitor(), 709 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
720 * efx_phy_work(), and efx_mac_work() with kernel interfaces. Safe to read 710 * efx_mac_work() with kernel interfaces. Safe to read under any
721 * under any one of the rtnl_lock, mac_lock, or netif_tx_lock, but all 711 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
722 * three must be held to modify it. 712 * be held to modify it.
723 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock 713 * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
724 * @port_initialized: Port initialized? 714 * @port_initialized: Port initialized?
725 * @net_dev: Operating system network device. Consider holding the rtnl lock 715 * @net_dev: Operating system network device. Consider holding the rtnl lock
@@ -731,26 +721,21 @@ union efx_multicast_hash {
731 * &struct net_device_stats. 721 * &struct net_device_stats.
732 * @stats_buffer: DMA buffer for statistics 722 * @stats_buffer: DMA buffer for statistics
733 * @stats_lock: Statistics update lock. Serialises statistics fetches 723 * @stats_lock: Statistics update lock. Serialises statistics fetches
734 * @stats_disable_count: Nest count for disabling statistics fetches
735 * @mac_op: MAC interface 724 * @mac_op: MAC interface
736 * @mac_address: Permanent MAC address 725 * @mac_address: Permanent MAC address
737 * @phy_type: PHY type 726 * @phy_type: PHY type
738 * @phy_lock: PHY access lock 727 * @mdio_lock: MDIO lock
739 * @phy_op: PHY interface 728 * @phy_op: PHY interface
740 * @phy_data: PHY private data (including PHY-specific stats) 729 * @phy_data: PHY private data (including PHY-specific stats)
741 * @mdio: PHY MDIO interface 730 * @mdio: PHY MDIO interface
742 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 731 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
743 * @mac_up: MAC link state 732 * @xmac_poll_required: XMAC link state needs polling
744 * @link_up: Link status 733 * @link_state: Current state of the link
745 * @link_fd: Link is full duplex
746 * @link_fc: Actualy flow control flags
747 * @link_speed: Link speed (Mbps)
748 * @n_link_state_changes: Number of times the link has changed state 734 * @n_link_state_changes: Number of times the link has changed state
749 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 735 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
750 * @multicast_hash: Multicast hash table 736 * @multicast_hash: Multicast hash table
751 * @wanted_fc: Wanted flow control flags 737 * @wanted_fc: Wanted flow control flags
752 * @phy_work: work item for dealing with PHY events 738 * @mac_work: Work item for changing MAC promiscuity and multicast hash
753 * @mac_work: work item for dealing with MAC events
754 * @loopback_mode: Loopback status 739 * @loopback_mode: Loopback status
755 * @loopback_modes: Supported loopback mode bitmask 740 * @loopback_modes: Supported loopback mode bitmask
756 * @loopback_selftest: Offline self-test private state 741 * @loopback_selftest: Offline self-test private state
@@ -774,9 +759,6 @@ struct efx_nic {
774 bool irq_rx_adaptive; 759 bool irq_rx_adaptive;
775 unsigned int irq_rx_moderation; 760 unsigned int irq_rx_moderation;
776 761
777 struct i2c_adapter i2c_adap;
778 struct efx_board board_info;
779
780 enum nic_state state; 762 enum nic_state state;
781 enum reset_type reset_pending; 763 enum reset_type reset_pending;
782 764
@@ -784,11 +766,15 @@ struct efx_nic {
784 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 766 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
785 struct efx_channel channel[EFX_MAX_CHANNELS]; 767 struct efx_channel channel[EFX_MAX_CHANNELS];
786 768
769 unsigned next_buffer_table;
787 int n_rx_queues; 770 int n_rx_queues;
788 int n_channels; 771 int n_channels;
789 unsigned int rx_buffer_len; 772 unsigned int rx_buffer_len;
790 unsigned int rx_buffer_order; 773 unsigned int rx_buffer_order;
791 774
775 unsigned int_error_count;
776 unsigned long int_error_expire;
777
792 struct efx_buffer irq_status; 778 struct efx_buffer irq_status;
793 volatile signed int last_irq_cpu; 779 volatile signed int last_irq_cpu;
794 780
@@ -815,24 +801,19 @@ struct efx_nic {
815 struct efx_mac_stats mac_stats; 801 struct efx_mac_stats mac_stats;
816 struct efx_buffer stats_buffer; 802 struct efx_buffer stats_buffer;
817 spinlock_t stats_lock; 803 spinlock_t stats_lock;
818 unsigned int stats_disable_count;
819 804
820 struct efx_mac_operations *mac_op; 805 struct efx_mac_operations *mac_op;
821 unsigned char mac_address[ETH_ALEN]; 806 unsigned char mac_address[ETH_ALEN];
822 807
823 enum phy_type phy_type; 808 enum phy_type phy_type;
824 spinlock_t phy_lock; 809 struct mutex mdio_lock;
825 struct work_struct phy_work;
826 struct efx_phy_operations *phy_op; 810 struct efx_phy_operations *phy_op;
827 void *phy_data; 811 void *phy_data;
828 struct mdio_if_info mdio; 812 struct mdio_if_info mdio;
829 enum efx_phy_mode phy_mode; 813 enum efx_phy_mode phy_mode;
830 814
831 bool mac_up; 815 bool xmac_poll_required;
832 bool link_up; 816 struct efx_link_state link_state;
833 bool link_fd;
834 enum efx_fc_type link_fc;
835 unsigned int link_speed;
836 unsigned int n_link_state_changes; 817 unsigned int n_link_state_changes;
837 818
838 bool promiscuous; 819 bool promiscuous;
@@ -862,48 +843,39 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
862 843
863/** 844/**
864 * struct efx_nic_type - Efx device type definition 845 * struct efx_nic_type - Efx device type definition
865 * @mem_bar: Memory BAR number 846 * @default_mac_ops: efx_mac_operations to set at startup
847 * @revision: Hardware architecture revision
866 * @mem_map_size: Memory BAR mapped size 848 * @mem_map_size: Memory BAR mapped size
867 * @txd_ptr_tbl_base: TX descriptor ring base address 849 * @txd_ptr_tbl_base: TX descriptor ring base address
868 * @rxd_ptr_tbl_base: RX descriptor ring base address 850 * @rxd_ptr_tbl_base: RX descriptor ring base address
869 * @buf_tbl_base: Buffer table base address 851 * @buf_tbl_base: Buffer table base address
870 * @evq_ptr_tbl_base: Event queue pointer table base address 852 * @evq_ptr_tbl_base: Event queue pointer table base address
871 * @evq_rptr_tbl_base: Event queue read-pointer table base address 853 * @evq_rptr_tbl_base: Event queue read-pointer table base address
872 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
873 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
874 * @evq_size: Event queue size (must be a power of two)
875 * @max_dma_mask: Maximum possible DMA mask 854 * @max_dma_mask: Maximum possible DMA mask
876 * @tx_dma_mask: TX DMA mask
877 * @bug5391_mask: Address mask for bug 5391 workaround
878 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
879 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
880 * @rx_buffer_padding: Padding added to each RX buffer 855 * @rx_buffer_padding: Padding added to each RX buffer
881 * @max_interrupt_mode: Highest capability interrupt mode supported 856 * @max_interrupt_mode: Highest capability interrupt mode supported
882 * from &enum efx_init_mode. 857 * from &enum efx_init_mode.
883 * @phys_addr_channels: Number of channels with physically addressed 858 * @phys_addr_channels: Number of channels with physically addressed
884 * descriptors 859 * descriptors
860 * @tx_dc_base: Base address in SRAM of TX queue descriptor caches
861 * @rx_dc_base: Base address in SRAM of RX queue descriptor caches
885 */ 862 */
886struct efx_nic_type { 863struct efx_nic_type {
887 unsigned int mem_bar; 864 struct efx_mac_operations *default_mac_ops;
865
866 int revision;
888 unsigned int mem_map_size; 867 unsigned int mem_map_size;
889 unsigned int txd_ptr_tbl_base; 868 unsigned int txd_ptr_tbl_base;
890 unsigned int rxd_ptr_tbl_base; 869 unsigned int rxd_ptr_tbl_base;
891 unsigned int buf_tbl_base; 870 unsigned int buf_tbl_base;
892 unsigned int evq_ptr_tbl_base; 871 unsigned int evq_ptr_tbl_base;
893 unsigned int evq_rptr_tbl_base; 872 unsigned int evq_rptr_tbl_base;
894
895 unsigned int txd_ring_mask;
896 unsigned int rxd_ring_mask;
897 unsigned int evq_size;
898 u64 max_dma_mask; 873 u64 max_dma_mask;
899 unsigned int tx_dma_mask;
900 unsigned bug5391_mask;
901
902 int rx_xoff_thresh;
903 int rx_xon_thresh;
904 unsigned int rx_buffer_padding; 874 unsigned int rx_buffer_padding;
905 unsigned int max_interrupt_mode; 875 unsigned int max_interrupt_mode;
906 unsigned int phys_addr_channels; 876 unsigned int phys_addr_channels;
877 unsigned int tx_dc_base;
878 unsigned int rx_dc_base;
907}; 879};
908 880
909/************************************************************************** 881/**************************************************************************
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index c1cff9c0c173..2ad1cec2c720 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -16,16 +16,16 @@
16extern struct efx_phy_operations falcon_sfx7101_phy_ops; 16extern struct efx_phy_operations falcon_sfx7101_phy_ops;
17extern struct efx_phy_operations falcon_sft9001_phy_ops; 17extern struct efx_phy_operations falcon_sft9001_phy_ops;
18 18
19extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); 19extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
20 20
21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed 21/* Wait for the PHY to boot. Return 0 on success, -EINVAL if the PHY failed
22 * to boot due to corrupt flash, or some other negative error code. */ 22 * to boot due to corrupt flash, or some other negative error code. */
23extern int sft9001_wait_boot(struct efx_nic *efx); 23extern int sft9001_wait_boot(struct efx_nic *efx);
24 24
25/**************************************************************************** 25/****************************************************************************
26 * AMCC/Quake QT20xx PHYs 26 * AMCC/Quake QT202x PHYs
27 */ 27 */
28extern struct efx_phy_operations falcon_xfp_phy_ops; 28extern struct efx_phy_operations falcon_qt202x_phy_ops;
29 29
30/* These PHYs provide various H/W control states for LEDs */ 30/* These PHYs provide various H/W control states for LEDs */
31#define QUAKE_LED_LINK_INVAL (0) 31#define QUAKE_LED_LINK_INVAL (0)
@@ -39,6 +39,6 @@ extern struct efx_phy_operations falcon_xfp_phy_ops;
39#define QUAKE_LED_TXLINK (0) 39#define QUAKE_LED_TXLINK (0)
40#define QUAKE_LED_RXLINK (8) 40#define QUAKE_LED_RXLINK (8)
41 41
42extern void xfp_set_led(struct efx_nic *p, int led, int state); 42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43 43
44#endif 44#endif
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/qt202x_phy.c
index e6b3d5eaddba..3d7370e39787 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -7,8 +7,7 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9/* 9/*
10 * Driver for SFP+ and XFP optical PHYs plus some support specific to the 10 * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
11 * AMCC QT20xx adapters; see www.amcc.com for details
12 */ 11 */
13 12
14#include <linux/timer.h> 13#include <linux/timer.h>
@@ -18,13 +17,13 @@
18#include "phy.h" 17#include "phy.h"
19#include "falcon.h" 18#include "falcon.h"
20 19
21#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS | \ 20#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
22 MDIO_DEVS_PMAPMD | \ 21 MDIO_DEVS_PMAPMD | \
23 MDIO_DEVS_PHYXS) 22 MDIO_DEVS_PHYXS)
24 23
25#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 24#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
26 (1 << LOOPBACK_PMAPMD) | \ 25 (1 << LOOPBACK_PMAPMD) | \
27 (1 << LOOPBACK_NETWORK)) 26 (1 << LOOPBACK_NETWORK))
28 27
29/****************************************************************************/ 28/****************************************************************************/
30/* Quake-specific MDIO registers */ 29/* Quake-specific MDIO registers */
@@ -45,18 +44,18 @@
45#define PCS_VEND1_REG 0xc000 44#define PCS_VEND1_REG 0xc000
46#define PCS_VEND1_LBTXD_LBN 5 45#define PCS_VEND1_LBTXD_LBN 5
47 46
48void xfp_set_led(struct efx_nic *p, int led, int mode) 47void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
49{ 48{
50 int addr = MDIO_QUAKE_LED0_REG + led; 49 int addr = MDIO_QUAKE_LED0_REG + led;
51 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); 50 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
52} 51}
53 52
54struct xfp_phy_data { 53struct qt202x_phy_data {
55 enum efx_phy_mode phy_mode; 54 enum efx_phy_mode phy_mode;
56}; 55};
57 56
58#define XFP_MAX_RESET_TIME 500 57#define QT2022C2_MAX_RESET_TIME 500
59#define XFP_RESET_WAIT 10 58#define QT2022C2_RESET_WAIT 10
60 59
61static int qt2025c_wait_reset(struct efx_nic *efx) 60static int qt2025c_wait_reset(struct efx_nic *efx)
62{ 61{
@@ -97,7 +96,7 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
97 return 0; 96 return 0;
98} 97}
99 98
100static int xfp_reset_phy(struct efx_nic *efx) 99static int qt202x_reset_phy(struct efx_nic *efx)
101{ 100{
102 int rc; 101 int rc;
103 102
@@ -111,8 +110,9 @@ static int xfp_reset_phy(struct efx_nic *efx)
111 /* Reset the PHYXS MMD. This is documented as doing 110 /* Reset the PHYXS MMD. This is documented as doing
112 * a complete soft reset. */ 111 * a complete soft reset. */
113 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, 112 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
114 XFP_MAX_RESET_TIME / XFP_RESET_WAIT, 113 QT2022C2_MAX_RESET_TIME /
115 XFP_RESET_WAIT); 114 QT2022C2_RESET_WAIT,
115 QT2022C2_RESET_WAIT);
116 if (rc < 0) 116 if (rc < 0)
117 goto fail; 117 goto fail;
118 } 118 }
@@ -122,11 +122,11 @@ static int xfp_reset_phy(struct efx_nic *efx)
122 122
123 /* Check that all the MMDs we expect are present and responding. We 123 /* Check that all the MMDs we expect are present and responding. We
124 * expect faults on some if the link is down, but not on the PHY XS */ 124 * expect faults on some if the link is down, but not on the PHY XS */
125 rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS); 125 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
126 if (rc < 0) 126 if (rc < 0)
127 goto fail; 127 goto fail;
128 128
129 efx->board_info.init_leds(efx); 129 falcon_board(efx)->type->init_phy(efx);
130 130
131 return rc; 131 return rc;
132 132
@@ -135,60 +135,52 @@ static int xfp_reset_phy(struct efx_nic *efx)
135 return rc; 135 return rc;
136} 136}
137 137
138static int xfp_phy_init(struct efx_nic *efx) 138static int qt202x_phy_init(struct efx_nic *efx)
139{ 139{
140 struct xfp_phy_data *phy_data; 140 struct qt202x_phy_data *phy_data;
141 u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 141 u32 devid;
142 int rc; 142 int rc;
143 143
144 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 144 rc = qt202x_reset_phy(efx);
145 if (rc) {
146 EFX_ERR(efx, "PHY init failed\n");
147 return rc;
148 }
149
150 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
145 if (!phy_data) 151 if (!phy_data)
146 return -ENOMEM; 152 return -ENOMEM;
147 efx->phy_data = phy_data; 153 efx->phy_data = phy_data;
148 154
155 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
149 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", 156 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
150 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), 157 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
151 efx_mdio_id_rev(devid)); 158 efx_mdio_id_rev(devid));
152 159
153 phy_data->phy_mode = efx->phy_mode; 160 phy_data->phy_mode = efx->phy_mode;
154
155 rc = xfp_reset_phy(efx);
156
157 EFX_INFO(efx, "PHY init %s.\n",
158 rc ? "failed" : "successful");
159 if (rc < 0)
160 goto fail;
161
162 return 0; 161 return 0;
163
164 fail:
165 kfree(efx->phy_data);
166 efx->phy_data = NULL;
167 return rc;
168} 162}
169 163
170static void xfp_phy_clear_interrupt(struct efx_nic *efx) 164static int qt202x_link_ok(struct efx_nic *efx)
171{ 165{
172 /* Read to clear link status alarm */ 166 return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
173 efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
174} 167}
175 168
176static int xfp_link_ok(struct efx_nic *efx) 169static bool qt202x_phy_poll(struct efx_nic *efx)
177{ 170{
178 return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS); 171 bool was_up = efx->link_state.up;
179}
180 172
181static void xfp_phy_poll(struct efx_nic *efx) 173 efx->link_state.up = qt202x_link_ok(efx);
182{ 174 efx->link_state.speed = 10000;
183 int link_up = xfp_link_ok(efx); 175 efx->link_state.fd = true;
184 /* Simulate a PHY event if link state has changed */ 176 efx->link_state.fc = efx->wanted_fc;
185 if (link_up != efx->link_up) 177
186 falcon_sim_phy_event(efx); 178 return efx->link_state.up != was_up;
187} 179}
188 180
189static void xfp_phy_reconfigure(struct efx_nic *efx) 181static void qt202x_phy_reconfigure(struct efx_nic *efx)
190{ 182{
191 struct xfp_phy_data *phy_data = efx->phy_data; 183 struct qt202x_phy_data *phy_data = efx->phy_data;
192 184
193 if (efx->phy_type == PHY_TYPE_QT2025C) { 185 if (efx->phy_type == PHY_TYPE_QT2025C) {
194 /* There are several different register bits which can 186 /* There are several different register bits which can
@@ -207,7 +199,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
207 /* Reset the PHY when moving from tx off to tx on */ 199 /* Reset the PHY when moving from tx off to tx on */
208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && 200 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) 201 (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
210 xfp_reset_phy(efx); 202 qt202x_reset_phy(efx);
211 203
212 efx_mdio_transmit_disable(efx); 204 efx_mdio_transmit_disable(efx);
213 } 205 }
@@ -215,36 +207,28 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
215 efx_mdio_phy_reconfigure(efx); 207 efx_mdio_phy_reconfigure(efx);
216 208
217 phy_data->phy_mode = efx->phy_mode; 209 phy_data->phy_mode = efx->phy_mode;
218 efx->link_up = xfp_link_ok(efx);
219 efx->link_speed = 10000;
220 efx->link_fd = true;
221 efx->link_fc = efx->wanted_fc;
222} 210}
223 211
224static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 212static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
225{ 213{
226 mdio45_ethtool_gset(&efx->mdio, ecmd); 214 mdio45_ethtool_gset(&efx->mdio, ecmd);
227} 215}
228 216
229static void xfp_phy_fini(struct efx_nic *efx) 217static void qt202x_phy_fini(struct efx_nic *efx)
230{ 218{
231 /* Clobber the LED if it was blinking */
232 efx->board_info.blink(efx, false);
233
234 /* Free the context block */ 219 /* Free the context block */
235 kfree(efx->phy_data); 220 kfree(efx->phy_data);
236 efx->phy_data = NULL; 221 efx->phy_data = NULL;
237} 222}
238 223
239struct efx_phy_operations falcon_xfp_phy_ops = { 224struct efx_phy_operations falcon_qt202x_phy_ops = {
240 .macs = EFX_XMAC, 225 .macs = EFX_XMAC,
241 .init = xfp_phy_init, 226 .init = qt202x_phy_init,
242 .reconfigure = xfp_phy_reconfigure, 227 .reconfigure = qt202x_phy_reconfigure,
243 .poll = xfp_phy_poll, 228 .poll = qt202x_phy_poll,
244 .fini = xfp_phy_fini, 229 .fini = qt202x_phy_fini,
245 .clear_interrupt = xfp_phy_clear_interrupt, 230 .get_settings = qt202x_phy_get_settings,
246 .get_settings = xfp_phy_get_settings,
247 .set_settings = efx_mdio_set_settings, 231 .set_settings = efx_mdio_set_settings,
248 .mmds = XFP_REQUIRED_DEVS, 232 .mmds = QT202X_REQUIRED_DEVS,
249 .loopbacks = XFP_LOOPBACKS, 233 .loopbacks = QT202X_LOOPBACKS,
250}; 234};
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
new file mode 100644
index 000000000000..f336d83d5fa0
--- /dev/null
+++ b/drivers/net/sfc/regs.h
@@ -0,0 +1,3180 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_REGS_H
12#define EFX_REGS_H
13
14/*
15 * Falcon hardware architecture definitions have a name prefix following
16 * the format:
17 *
18 * F<type>_<min-rev><max-rev>_
19 *
20 * The following <type> strings are used:
21 *
22 * MMIO register MC register Host memory structure
23 * -------------------------------------------------------------
24 * Address R MCR
25 * Bitfield RF MCRF SF
26 * Enumerator FE MCFE SE
27 *
28 * <min-rev> is the first revision to which the definition applies:
29 *
30 * A: Falcon A1 (SFC4000AB)
31 * B: Falcon B0 (SFC4000BA)
32 * C: Siena A0 (SFL9021AA)
33 *
34 * If the definition has been changed or removed in later revisions
35 * then <max-rev> is the last revision to which the definition applies;
36 * otherwise it is "Z".
37 */
38
39/**************************************************************************
40 *
41 * Falcon/Siena registers and descriptors
42 *
43 **************************************************************************
44 */
45
46/* ADR_REGION_REG: Address region register */
47#define FR_AZ_ADR_REGION 0x00000000
48#define FRF_AZ_ADR_REGION3_LBN 96
49#define FRF_AZ_ADR_REGION3_WIDTH 18
50#define FRF_AZ_ADR_REGION2_LBN 64
51#define FRF_AZ_ADR_REGION2_WIDTH 18
52#define FRF_AZ_ADR_REGION1_LBN 32
53#define FRF_AZ_ADR_REGION1_WIDTH 18
54#define FRF_AZ_ADR_REGION0_LBN 0
55#define FRF_AZ_ADR_REGION0_WIDTH 18
56
57/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
58#define FR_AZ_INT_EN_KER 0x00000010
59#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
60#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
61#define FRF_AZ_KER_INT_CHAR_LBN 4
62#define FRF_AZ_KER_INT_CHAR_WIDTH 1
63#define FRF_AZ_KER_INT_KER_LBN 3
64#define FRF_AZ_KER_INT_KER_WIDTH 1
65#define FRF_AZ_DRV_INT_EN_KER_LBN 0
66#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
67
68/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
69#define FR_BZ_INT_EN_CHAR 0x00000020
70#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
71#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
72#define FRF_BZ_CHAR_INT_CHAR_LBN 4
73#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
74#define FRF_BZ_CHAR_INT_KER_LBN 3
75#define FRF_BZ_CHAR_INT_KER_WIDTH 1
76#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
77#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
78
79/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
80#define FR_AZ_INT_ADR_KER 0x00000030
81#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
82#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
83#define FRF_AZ_INT_ADR_KER_LBN 0
84#define FRF_AZ_INT_ADR_KER_WIDTH 64
85
86/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
87#define FR_BZ_INT_ADR_CHAR 0x00000040
88#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
89#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
90#define FRF_BZ_INT_ADR_CHAR_LBN 0
91#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
92
93/* INT_ACK_KER: Kernel interrupt acknowledge register */
94#define FR_AA_INT_ACK_KER 0x00000050
95#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
96#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
97
98/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
99#define FR_BZ_INT_ISR0 0x00000090
100#define FRF_BZ_INT_ISR_REG_LBN 0
101#define FRF_BZ_INT_ISR_REG_WIDTH 64
102
103/* HW_INIT_REG: Hardware initialization register */
104#define FR_AZ_HW_INIT 0x000000c0
105#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
106#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
107#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
108#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
109#define FRF_CZ_TX_MRG_TAGS_LBN 120
110#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
111#define FRF_AB_TRGT_MASK_ALL_LBN 100
112#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
113#define FRF_AZ_DOORBELL_DROP_LBN 92
114#define FRF_AZ_DOORBELL_DROP_WIDTH 8
115#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
116#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
117#define FRF_AB_PE_EIDLE_DIS_LBN 75
118#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
119#define FRF_AA_FC_BLOCKING_EN_LBN 45
120#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
121#define FRF_BZ_B2B_REQ_EN_LBN 45
122#define FRF_BZ_B2B_REQ_EN_WIDTH 1
123#define FRF_AA_B2B_REQ_EN_LBN 44
124#define FRF_AA_B2B_REQ_EN_WIDTH 1
125#define FRF_BB_FC_BLOCKING_EN_LBN 44
126#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
127#define FRF_AZ_POST_WR_MASK_LBN 40
128#define FRF_AZ_POST_WR_MASK_WIDTH 4
129#define FRF_AZ_TLP_TC_LBN 34
130#define FRF_AZ_TLP_TC_WIDTH 3
131#define FRF_AZ_TLP_ATTR_LBN 32
132#define FRF_AZ_TLP_ATTR_WIDTH 2
133#define FRF_AB_INTB_VEC_LBN 24
134#define FRF_AB_INTB_VEC_WIDTH 5
135#define FRF_AB_INTA_VEC_LBN 16
136#define FRF_AB_INTA_VEC_WIDTH 5
137#define FRF_AZ_WD_TIMER_LBN 8
138#define FRF_AZ_WD_TIMER_WIDTH 8
139#define FRF_AZ_US_DISABLE_LBN 5
140#define FRF_AZ_US_DISABLE_WIDTH 1
141#define FRF_AZ_TLP_EP_LBN 4
142#define FRF_AZ_TLP_EP_WIDTH 1
143#define FRF_AZ_ATTR_SEL_LBN 3
144#define FRF_AZ_ATTR_SEL_WIDTH 1
145#define FRF_AZ_TD_SEL_LBN 1
146#define FRF_AZ_TD_SEL_WIDTH 1
147#define FRF_AZ_TLP_TD_LBN 0
148#define FRF_AZ_TLP_TD_WIDTH 1
149
150/* EE_SPI_HCMD_REG: SPI host command register */
151#define FR_AB_EE_SPI_HCMD 0x00000100
152#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
153#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
154#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
155#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
156#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
157#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
158#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
159#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
160#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
161#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
162#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
163#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
164#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
165#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
166#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
167#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
168
169/* USR_EV_CFG: User Level Event Configuration register */
170#define FR_CZ_USR_EV_CFG 0x00000100
171#define FRF_CZ_USREV_DIS_LBN 16
172#define FRF_CZ_USREV_DIS_WIDTH 1
173#define FRF_CZ_DFLT_EVQ_LBN 0
174#define FRF_CZ_DFLT_EVQ_WIDTH 10
175
176/* EE_SPI_HADR_REG: SPI host address register */
177#define FR_AB_EE_SPI_HADR 0x00000110
178#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
179#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
180#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
181#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
182
183/* EE_SPI_HDATA_REG: SPI host data register */
184#define FR_AB_EE_SPI_HDATA 0x00000120
185#define FRF_AB_EE_SPI_HDATA3_LBN 96
186#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
187#define FRF_AB_EE_SPI_HDATA2_LBN 64
188#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
189#define FRF_AB_EE_SPI_HDATA1_LBN 32
190#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
191#define FRF_AB_EE_SPI_HDATA0_LBN 0
192#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
193
194/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
195#define FR_AB_EE_BASE_PAGE 0x00000130
196#define FRF_AB_EE_EXPROM_MASK_LBN 16
197#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
198#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
199#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
200
201/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
202#define FR_AB_EE_VPD_CFG0 0x00000140
203#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
204#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
205#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
206#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
207#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
208#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
209#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
210#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
211#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
212#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
213#define FRF_AB_EE_VPDW_LENGTH_LBN 80
214#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
215#define FRF_AB_EE_VPDW_BASE_LBN 64
216#define FRF_AB_EE_VPDW_BASE_WIDTH 15
217#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
218#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
219#define FRF_AB_EE_VPD_BASE_LBN 32
220#define FRF_AB_EE_VPD_BASE_WIDTH 24
221#define FRF_AB_EE_VPD_LENGTH_LBN 16
222#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
223#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
224#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
225#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
226#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
227#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
228#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
229#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
230#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
231#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
232#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
233#define FRF_AB_EE_VPD_EN_LBN 0
234#define FRF_AB_EE_VPD_EN_WIDTH 1
235
236/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
237#define FR_AB_EE_VPD_SW_CNTL 0x00000150
238#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
239#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
240#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
241#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
242#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
243#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
244
245/* EE_VPD_SW_DATA_REG: VPD access SW data register */
246#define FR_AB_EE_VPD_SW_DATA 0x00000160
247#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
248#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
249
250/* PBMX_DBG_IADDR_REG: Capture Module address register */
251#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
252#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
253#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
254
255/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
256#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
257#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
258#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
259#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
260#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
261#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
262#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
263
264/* PBMX_DBG_IDATA_REG: Capture Module data register */
265#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
266#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
267#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
268
269/* NIC_STAT_REG: NIC status register */
270#define FR_AB_NIC_STAT 0x00000200
271#define FRF_BB_AER_DIS_LBN 34
272#define FRF_BB_AER_DIS_WIDTH 1
273#define FRF_BB_EE_STRAP_EN_LBN 31
274#define FRF_BB_EE_STRAP_EN_WIDTH 1
275#define FRF_BB_EE_STRAP_LBN 24
276#define FRF_BB_EE_STRAP_WIDTH 4
277#define FRF_BB_REVISION_ID_LBN 17
278#define FRF_BB_REVISION_ID_WIDTH 7
279#define FRF_AB_ONCHIP_SRAM_LBN 16
280#define FRF_AB_ONCHIP_SRAM_WIDTH 1
281#define FRF_AB_SF_PRST_LBN 9
282#define FRF_AB_SF_PRST_WIDTH 1
283#define FRF_AB_EE_PRST_LBN 8
284#define FRF_AB_EE_PRST_WIDTH 1
285#define FRF_AB_ATE_MODE_LBN 3
286#define FRF_AB_ATE_MODE_WIDTH 1
287#define FRF_AB_STRAP_PINS_LBN 0
288#define FRF_AB_STRAP_PINS_WIDTH 3
289
290/* GPIO_CTL_REG: GPIO control register */
291#define FR_AB_GPIO_CTL 0x00000210
292#define FRF_AB_GPIO_OUT3_LBN 112
293#define FRF_AB_GPIO_OUT3_WIDTH 16
294#define FRF_AB_GPIO_IN3_LBN 104
295#define FRF_AB_GPIO_IN3_WIDTH 8
296#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
297#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
298#define FRF_AB_GPIO_OUT2_LBN 80
299#define FRF_AB_GPIO_OUT2_WIDTH 16
300#define FRF_AB_GPIO_IN2_LBN 72
301#define FRF_AB_GPIO_IN2_WIDTH 8
302#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
303#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
304#define FRF_AB_GPIO15_OEN_LBN 63
305#define FRF_AB_GPIO15_OEN_WIDTH 1
306#define FRF_AB_GPIO14_OEN_LBN 62
307#define FRF_AB_GPIO14_OEN_WIDTH 1
308#define FRF_AB_GPIO13_OEN_LBN 61
309#define FRF_AB_GPIO13_OEN_WIDTH 1
310#define FRF_AB_GPIO12_OEN_LBN 60
311#define FRF_AB_GPIO12_OEN_WIDTH 1
312#define FRF_AB_GPIO11_OEN_LBN 59
313#define FRF_AB_GPIO11_OEN_WIDTH 1
314#define FRF_AB_GPIO10_OEN_LBN 58
315#define FRF_AB_GPIO10_OEN_WIDTH 1
316#define FRF_AB_GPIO9_OEN_LBN 57
317#define FRF_AB_GPIO9_OEN_WIDTH 1
318#define FRF_AB_GPIO8_OEN_LBN 56
319#define FRF_AB_GPIO8_OEN_WIDTH 1
320#define FRF_AB_GPIO15_OUT_LBN 55
321#define FRF_AB_GPIO15_OUT_WIDTH 1
322#define FRF_AB_GPIO14_OUT_LBN 54
323#define FRF_AB_GPIO14_OUT_WIDTH 1
324#define FRF_AB_GPIO13_OUT_LBN 53
325#define FRF_AB_GPIO13_OUT_WIDTH 1
326#define FRF_AB_GPIO12_OUT_LBN 52
327#define FRF_AB_GPIO12_OUT_WIDTH 1
328#define FRF_AB_GPIO11_OUT_LBN 51
329#define FRF_AB_GPIO11_OUT_WIDTH 1
330#define FRF_AB_GPIO10_OUT_LBN 50
331#define FRF_AB_GPIO10_OUT_WIDTH 1
332#define FRF_AB_GPIO9_OUT_LBN 49
333#define FRF_AB_GPIO9_OUT_WIDTH 1
334#define FRF_AB_GPIO8_OUT_LBN 48
335#define FRF_AB_GPIO8_OUT_WIDTH 1
336#define FRF_AB_GPIO15_IN_LBN 47
337#define FRF_AB_GPIO15_IN_WIDTH 1
338#define FRF_AB_GPIO14_IN_LBN 46
339#define FRF_AB_GPIO14_IN_WIDTH 1
340#define FRF_AB_GPIO13_IN_LBN 45
341#define FRF_AB_GPIO13_IN_WIDTH 1
342#define FRF_AB_GPIO12_IN_LBN 44
343#define FRF_AB_GPIO12_IN_WIDTH 1
344#define FRF_AB_GPIO11_IN_LBN 43
345#define FRF_AB_GPIO11_IN_WIDTH 1
346#define FRF_AB_GPIO10_IN_LBN 42
347#define FRF_AB_GPIO10_IN_WIDTH 1
348#define FRF_AB_GPIO9_IN_LBN 41
349#define FRF_AB_GPIO9_IN_WIDTH 1
350#define FRF_AB_GPIO8_IN_LBN 40
351#define FRF_AB_GPIO8_IN_WIDTH 1
352#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
353#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
354#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
355#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
356#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
357#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
358#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
359#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
360#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
361#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
362#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
363#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
364#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
365#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
366#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
367#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
368#define FRF_AB_CLK156_OUT_EN_LBN 31
369#define FRF_AB_CLK156_OUT_EN_WIDTH 1
370#define FRF_AB_USE_NIC_CLK_LBN 30
371#define FRF_AB_USE_NIC_CLK_WIDTH 1
372#define FRF_AB_GPIO5_OEN_LBN 29
373#define FRF_AB_GPIO5_OEN_WIDTH 1
374#define FRF_AB_GPIO4_OEN_LBN 28
375#define FRF_AB_GPIO4_OEN_WIDTH 1
376#define FRF_AB_GPIO3_OEN_LBN 27
377#define FRF_AB_GPIO3_OEN_WIDTH 1
378#define FRF_AB_GPIO2_OEN_LBN 26
379#define FRF_AB_GPIO2_OEN_WIDTH 1
380#define FRF_AB_GPIO1_OEN_LBN 25
381#define FRF_AB_GPIO1_OEN_WIDTH 1
382#define FRF_AB_GPIO0_OEN_LBN 24
383#define FRF_AB_GPIO0_OEN_WIDTH 1
384#define FRF_AB_GPIO7_OUT_LBN 23
385#define FRF_AB_GPIO7_OUT_WIDTH 1
386#define FRF_AB_GPIO6_OUT_LBN 22
387#define FRF_AB_GPIO6_OUT_WIDTH 1
388#define FRF_AB_GPIO5_OUT_LBN 21
389#define FRF_AB_GPIO5_OUT_WIDTH 1
390#define FRF_AB_GPIO4_OUT_LBN 20
391#define FRF_AB_GPIO4_OUT_WIDTH 1
392#define FRF_AB_GPIO3_OUT_LBN 19
393#define FRF_AB_GPIO3_OUT_WIDTH 1
394#define FRF_AB_GPIO2_OUT_LBN 18
395#define FRF_AB_GPIO2_OUT_WIDTH 1
396#define FRF_AB_GPIO1_OUT_LBN 17
397#define FRF_AB_GPIO1_OUT_WIDTH 1
398#define FRF_AB_GPIO0_OUT_LBN 16
399#define FRF_AB_GPIO0_OUT_WIDTH 1
400#define FRF_AB_GPIO7_IN_LBN 15
401#define FRF_AB_GPIO7_IN_WIDTH 1
402#define FRF_AB_GPIO6_IN_LBN 14
403#define FRF_AB_GPIO6_IN_WIDTH 1
404#define FRF_AB_GPIO5_IN_LBN 13
405#define FRF_AB_GPIO5_IN_WIDTH 1
406#define FRF_AB_GPIO4_IN_LBN 12
407#define FRF_AB_GPIO4_IN_WIDTH 1
408#define FRF_AB_GPIO3_IN_LBN 11
409#define FRF_AB_GPIO3_IN_WIDTH 1
410#define FRF_AB_GPIO2_IN_LBN 10
411#define FRF_AB_GPIO2_IN_WIDTH 1
412#define FRF_AB_GPIO1_IN_LBN 9
413#define FRF_AB_GPIO1_IN_WIDTH 1
414#define FRF_AB_GPIO0_IN_LBN 8
415#define FRF_AB_GPIO0_IN_WIDTH 1
416#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
417#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
418#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
419#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
420#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
421#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
422#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
423#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
424#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
425#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
426#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
427#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
428#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
429#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
430#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
431#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
432
433/* GLB_CTL_REG: Global control register */
434#define FR_AB_GLB_CTL 0x00000220
435#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
436#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
437#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
438#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
439#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
440#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
441#define FRF_AA_PCIX_RST_CTL_LBN 60
442#define FRF_AA_PCIX_RST_CTL_WIDTH 1
443#define FRF_BB_BIU_RST_CTL_LBN 60
444#define FRF_BB_BIU_RST_CTL_WIDTH 1
445#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
446#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
447#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
448#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
449#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
450#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
451#define FRF_AB_XGRX_RST_CTL_LBN 56
452#define FRF_AB_XGRX_RST_CTL_WIDTH 1
453#define FRF_AB_XGTX_RST_CTL_LBN 55
454#define FRF_AB_XGTX_RST_CTL_WIDTH 1
455#define FRF_AB_EM_RST_CTL_LBN 54
456#define FRF_AB_EM_RST_CTL_WIDTH 1
457#define FRF_AB_EV_RST_CTL_LBN 53
458#define FRF_AB_EV_RST_CTL_WIDTH 1
459#define FRF_AB_SR_RST_CTL_LBN 52
460#define FRF_AB_SR_RST_CTL_WIDTH 1
461#define FRF_AB_RX_RST_CTL_LBN 51
462#define FRF_AB_RX_RST_CTL_WIDTH 1
463#define FRF_AB_TX_RST_CTL_LBN 50
464#define FRF_AB_TX_RST_CTL_WIDTH 1
465#define FRF_AB_EE_RST_CTL_LBN 49
466#define FRF_AB_EE_RST_CTL_WIDTH 1
467#define FRF_AB_CS_RST_CTL_LBN 48
468#define FRF_AB_CS_RST_CTL_WIDTH 1
469#define FRF_AB_HOT_RST_CTL_LBN 40
470#define FRF_AB_HOT_RST_CTL_WIDTH 2
471#define FRF_AB_RST_EXT_PHY_LBN 31
472#define FRF_AB_RST_EXT_PHY_WIDTH 1
473#define FRF_AB_RST_XAUI_SD_LBN 30
474#define FRF_AB_RST_XAUI_SD_WIDTH 1
475#define FRF_AB_RST_PCIE_SD_LBN 29
476#define FRF_AB_RST_PCIE_SD_WIDTH 1
477#define FRF_AA_RST_PCIX_LBN 28
478#define FRF_AA_RST_PCIX_WIDTH 1
479#define FRF_BB_RST_BIU_LBN 28
480#define FRF_BB_RST_BIU_WIDTH 1
481#define FRF_AB_RST_PCIE_STKY_LBN 27
482#define FRF_AB_RST_PCIE_STKY_WIDTH 1
483#define FRF_AB_RST_PCIE_NSTKY_LBN 26
484#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
485#define FRF_AB_RST_PCIE_CORE_LBN 25
486#define FRF_AB_RST_PCIE_CORE_WIDTH 1
487#define FRF_AB_RST_XGRX_LBN 24
488#define FRF_AB_RST_XGRX_WIDTH 1
489#define FRF_AB_RST_XGTX_LBN 23
490#define FRF_AB_RST_XGTX_WIDTH 1
491#define FRF_AB_RST_EM_LBN 22
492#define FRF_AB_RST_EM_WIDTH 1
493#define FRF_AB_RST_EV_LBN 21
494#define FRF_AB_RST_EV_WIDTH 1
495#define FRF_AB_RST_SR_LBN 20
496#define FRF_AB_RST_SR_WIDTH 1
497#define FRF_AB_RST_RX_LBN 19
498#define FRF_AB_RST_RX_WIDTH 1
499#define FRF_AB_RST_TX_LBN 18
500#define FRF_AB_RST_TX_WIDTH 1
501#define FRF_AB_RST_SF_LBN 17
502#define FRF_AB_RST_SF_WIDTH 1
503#define FRF_AB_RST_CS_LBN 16
504#define FRF_AB_RST_CS_WIDTH 1
505#define FRF_AB_INT_RST_DUR_LBN 4
506#define FRF_AB_INT_RST_DUR_WIDTH 3
507#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
508#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
509#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
510#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
511#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
512#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
513#define FFE_AB_EXT_PHY_RST_DUR_640US 3
514#define FFE_AB_EXT_PHY_RST_DUR_320US 2
515#define FFE_AB_EXT_PHY_RST_DUR_160US 1
516#define FFE_AB_EXT_PHY_RST_DUR_80US 0
517#define FRF_AB_SWRST_LBN 0
518#define FRF_AB_SWRST_WIDTH 1
519
520/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
521#define FR_AZ_FATAL_INTR_KER 0x00000230
522#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
523#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
524#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
525#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
526#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
527#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
528#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
529#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
530#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
531#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
532#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
533#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
534#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
535#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
536#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
537#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
538#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
539#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
540#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
541#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
542#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
543#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
544#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
545#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
546#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
547#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
548#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
549#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
550#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
551#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
552#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
553#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
554#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
555#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
556#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
557#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
558#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
559#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
560#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
561#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
562#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
563#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
564#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
565#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
566#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
567#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
568#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
569#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
570#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
571#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
572#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
573#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
574#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
575#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
576#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
577#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
578
579/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
580#define FR_BZ_FATAL_INTR_CHAR 0x00000240
581#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
582#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
583#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
584#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
585#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
586#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
587#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
588#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
589#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
590#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
591#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
592#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
593#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
594#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
595#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
596#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
597#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
598#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
599#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
600#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
601#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
602#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
603#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
604#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
605#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
606#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
607#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
608#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
609#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
610#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
611#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
612#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
613#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
614#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
615#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
616#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
617#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
618#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
619#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
620#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
621#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
622#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
623#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
624#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
625#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
626#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
627#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
628#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
629#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
630#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
631#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
632#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
633#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
634#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
635#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
636#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
637
638/* DP_CTRL_REG: Datapath control register */
639#define FR_BZ_DP_CTRL 0x00000250
640#define FRF_BZ_FLS_EVQ_ID_LBN 0
641#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
642
643/* MEM_STAT_REG: Memory status register */
644#define FR_AZ_MEM_STAT 0x00000260
645#define FRF_AB_MEM_PERR_VEC_LBN 53
646#define FRF_AB_MEM_PERR_VEC_WIDTH 38
647#define FRF_AB_MBIST_CORR_LBN 38
648#define FRF_AB_MBIST_CORR_WIDTH 15
649#define FRF_AB_MBIST_ERR_LBN 0
650#define FRF_AB_MBIST_ERR_WIDTH 40
651#define FRF_CZ_MEM_PERR_VEC_LBN 0
652#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
653
654/* CS_DEBUG_REG: Debug register */
655#define FR_AZ_CS_DEBUG 0x00000270
656#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
657#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
658#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
659#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
660#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
661#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
662#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
663#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
664#define FRF_CZ_CS_PORT_NUM_LBN 40
665#define FRF_CZ_CS_PORT_NUM_WIDTH 2
666#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
667#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
668#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
669#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
670#define FRF_CZ_CS_PORT_FPE_LBN 1
671#define FRF_CZ_CS_PORT_FPE_WIDTH 35
672#define FRF_AB_EM_DEBUG_ADDR_LBN 26
673#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
674#define FRF_AB_SR_DEBUG_ADDR_LBN 21
675#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
676#define FRF_AB_EV_DEBUG_ADDR_LBN 16
677#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
678#define FRF_AB_RX_DEBUG_ADDR_LBN 11
679#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
680#define FRF_AB_TX_DEBUG_ADDR_LBN 6
681#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
682#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
683#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
684#define FRF_AZ_CS_DEBUG_EN_LBN 0
685#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
686
687/* DRIVER_REG: Driver scratch register [0-7] */
688#define FR_AZ_DRIVER 0x00000280
689#define FR_AZ_DRIVER_STEP 16
690#define FR_AZ_DRIVER_ROWS 8
691#define FRF_AZ_DRIVER_DW0_LBN 0
692#define FRF_AZ_DRIVER_DW0_WIDTH 32
693
694/* ALTERA_BUILD_REG: Altera build register */
695#define FR_AZ_ALTERA_BUILD 0x00000300
696#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
697#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
698
699/* CSR_SPARE_REG: Spare register */
700#define FR_AZ_CSR_SPARE 0x00000310
701#define FRF_AB_MEM_PERR_EN_LBN 64
702#define FRF_AB_MEM_PERR_EN_WIDTH 38
703#define FRF_CZ_MEM_PERR_EN_LBN 64
704#define FRF_CZ_MEM_PERR_EN_WIDTH 35
705#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
706#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
707#define FRF_AZ_CSR_SPARE_BITS_LBN 0
708#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
709
710/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
711#define FR_AB_PCIE_SD_CTL0123 0x00000320
712#define FRF_AB_PCIE_TESTSIG_H_LBN 96
713#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
714#define FRF_AB_PCIE_TESTSIG_L_LBN 64
715#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
716#define FRF_AB_PCIE_OFFSET_LBN 56
717#define FRF_AB_PCIE_OFFSET_WIDTH 8
718#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
719#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
720#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
721#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
722#define FRF_AB_PCIE_HIVMODE_H_LBN 53
723#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
724#define FRF_AB_PCIE_HIVMODE_L_LBN 52
725#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
726#define FRF_AB_PCIE_PARRESET_H_LBN 51
727#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
728#define FRF_AB_PCIE_PARRESET_L_LBN 50
729#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
730#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
731#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
732#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
733#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
734#define FRF_AB_PCIE_LPBK_LBN 40
735#define FRF_AB_PCIE_LPBK_WIDTH 8
736#define FRF_AB_PCIE_PARLPBK_LBN 32
737#define FRF_AB_PCIE_PARLPBK_WIDTH 8
738#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
739#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
740#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
741#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
742#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
743#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
744#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
745#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
746#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
747#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
748#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
749#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
750#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
751#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
752#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
753#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
754#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
755#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
756#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
757#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
758#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
759#define FFE_AB_PCIE_RXEQCTL_OFF 2
760#define FFE_AB_PCIE_RXEQCTL_MIN 1
761#define FFE_AB_PCIE_RXEQCTL_MAX 0
762#define FRF_AB_PCIE_HIDRV_LBN 8
763#define FRF_AB_PCIE_HIDRV_WIDTH 8
764#define FRF_AB_PCIE_LODRV_LBN 0
765#define FRF_AB_PCIE_LODRV_WIDTH 8
766
767/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
768#define FR_AB_PCIE_SD_CTL45 0x00000330
769#define FRF_AB_PCIE_DTX7_LBN 60
770#define FRF_AB_PCIE_DTX7_WIDTH 4
771#define FRF_AB_PCIE_DTX6_LBN 56
772#define FRF_AB_PCIE_DTX6_WIDTH 4
773#define FRF_AB_PCIE_DTX5_LBN 52
774#define FRF_AB_PCIE_DTX5_WIDTH 4
775#define FRF_AB_PCIE_DTX4_LBN 48
776#define FRF_AB_PCIE_DTX4_WIDTH 4
777#define FRF_AB_PCIE_DTX3_LBN 44
778#define FRF_AB_PCIE_DTX3_WIDTH 4
779#define FRF_AB_PCIE_DTX2_LBN 40
780#define FRF_AB_PCIE_DTX2_WIDTH 4
781#define FRF_AB_PCIE_DTX1_LBN 36
782#define FRF_AB_PCIE_DTX1_WIDTH 4
783#define FRF_AB_PCIE_DTX0_LBN 32
784#define FRF_AB_PCIE_DTX0_WIDTH 4
785#define FRF_AB_PCIE_DEQ7_LBN 28
786#define FRF_AB_PCIE_DEQ7_WIDTH 4
787#define FRF_AB_PCIE_DEQ6_LBN 24
788#define FRF_AB_PCIE_DEQ6_WIDTH 4
789#define FRF_AB_PCIE_DEQ5_LBN 20
790#define FRF_AB_PCIE_DEQ5_WIDTH 4
791#define FRF_AB_PCIE_DEQ4_LBN 16
792#define FRF_AB_PCIE_DEQ4_WIDTH 4
793#define FRF_AB_PCIE_DEQ3_LBN 12
794#define FRF_AB_PCIE_DEQ3_WIDTH 4
795#define FRF_AB_PCIE_DEQ2_LBN 8
796#define FRF_AB_PCIE_DEQ2_WIDTH 4
797#define FRF_AB_PCIE_DEQ1_LBN 4
798#define FRF_AB_PCIE_DEQ1_WIDTH 4
799#define FRF_AB_PCIE_DEQ0_LBN 0
800#define FRF_AB_PCIE_DEQ0_WIDTH 4
801
802/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
803#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
804#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
805#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
806#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
807#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
808#define FRF_AB_PCIE_PRBSERR_LBN 40
809#define FRF_AB_PCIE_PRBSERR_WIDTH 8
810#define FRF_AB_PCIE_PRBSERRH0_LBN 32
811#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
812#define FRF_AB_PCIE_FASTINIT_H_LBN 15
813#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
814#define FRF_AB_PCIE_FASTINIT_L_LBN 14
815#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
816#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
817#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
818#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
819#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
820#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
821#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
822#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
823#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
824#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
825#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
826#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
827#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
828#define FRF_AB_PCIE_PRBSSEL_LBN 0
829#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
830
831/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
832#define FR_BB_DEBUG_DATA_OUT 0x00000350
833#define FRF_BB_DEBUG2_PORT_LBN 25
834#define FRF_BB_DEBUG2_PORT_WIDTH 15
835#define FRF_BB_DEBUG1_PORT_LBN 0
836#define FRF_BB_DEBUG1_PORT_WIDTH 25
837
838/* EVQ_RPTR_REGP0: Event queue read pointer register */
839#define FR_BZ_EVQ_RPTR_P0 0x00000400
840#define FR_BZ_EVQ_RPTR_P0_STEP 8192
841#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
842/* EVQ_RPTR_REG_KER: Event queue read pointer register */
843#define FR_AA_EVQ_RPTR_KER 0x00011b00
844#define FR_AA_EVQ_RPTR_KER_STEP 4
845#define FR_AA_EVQ_RPTR_KER_ROWS 4
846/* EVQ_RPTR_REG: Event queue read pointer register */
847#define FR_BZ_EVQ_RPTR 0x00fa0000
848#define FR_BZ_EVQ_RPTR_STEP 16
849#define FR_BB_EVQ_RPTR_ROWS 4096
850#define FR_CZ_EVQ_RPTR_ROWS 1024
851/* EVQ_RPTR_REGP123: Event queue read pointer register */
852#define FR_BB_EVQ_RPTR_P123 0x01000400
853#define FR_BB_EVQ_RPTR_P123_STEP 8192
854#define FR_BB_EVQ_RPTR_P123_ROWS 3072
855#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
856#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
857#define FRF_AZ_EVQ_RPTR_LBN 0
858#define FRF_AZ_EVQ_RPTR_WIDTH 15
859
860/* TIMER_COMMAND_REGP0: Timer Command Registers */
861#define FR_BZ_TIMER_COMMAND_P0 0x00000420
862#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
863#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
864/* TIMER_COMMAND_REG_KER: Timer Command Registers */
865#define FR_AA_TIMER_COMMAND_KER 0x00000420
866#define FR_AA_TIMER_COMMAND_KER_STEP 8192
867#define FR_AA_TIMER_COMMAND_KER_ROWS 4
868/* TIMER_COMMAND_REGP123: Timer Command Registers */
869#define FR_BB_TIMER_COMMAND_P123 0x01000420
870#define FR_BB_TIMER_COMMAND_P123_STEP 8192
871#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
872#define FRF_CZ_TC_TIMER_MODE_LBN 14
873#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
874#define FRF_AB_TC_TIMER_MODE_LBN 12
875#define FRF_AB_TC_TIMER_MODE_WIDTH 2
876#define FRF_CZ_TC_TIMER_VAL_LBN 0
877#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
878#define FRF_AB_TC_TIMER_VAL_LBN 0
879#define FRF_AB_TC_TIMER_VAL_WIDTH 12
880
881/* DRV_EV_REG: Driver generated event register */
882#define FR_AZ_DRV_EV 0x00000440
883#define FRF_AZ_DRV_EV_QID_LBN 64
884#define FRF_AZ_DRV_EV_QID_WIDTH 12
885#define FRF_AZ_DRV_EV_DATA_LBN 0
886#define FRF_AZ_DRV_EV_DATA_WIDTH 64
887
888/* EVQ_CTL_REG: Event queue control register */
889#define FR_AZ_EVQ_CTL 0x00000450
890#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
891#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
892#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
893#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
894#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
895#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
896#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
897#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
898#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
899#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
900
901/* EVQ_CNT1_REG: Event counter 1 register */
902#define FR_AZ_EVQ_CNT1 0x00000460
903#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
904#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
905#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
906#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
907#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
908#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
909#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
910#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
911#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
912#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
913#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
914#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
915#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
916#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
917
918/* EVQ_CNT2_REG: Event counter 2 register */
919#define FR_AZ_EVQ_CNT2 0x00000470
920#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
921#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
922#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
923#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
924#define FRF_AZ_EVQ_RDY_CNT_LBN 80
925#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
926#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
927#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
928#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
929#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
930#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
931#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
932#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
933#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
934
935/* USR_EV_REG: Event mailbox register */
936#define FR_CZ_USR_EV 0x00000540
937#define FR_CZ_USR_EV_STEP 8192
938#define FR_CZ_USR_EV_ROWS 1024
939#define FRF_CZ_USR_EV_DATA_LBN 0
940#define FRF_CZ_USR_EV_DATA_WIDTH 32
941
942/* BUF_TBL_CFG_REG: Buffer table configuration register */
943#define FR_AZ_BUF_TBL_CFG 0x00000600
944#define FRF_AZ_BUF_TBL_MODE_LBN 3
945#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
946
947/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
948#define FR_AZ_SRM_RX_DC_CFG 0x00000610
949#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
950#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
951#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
952#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
953
954/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
955#define FR_AZ_SRM_TX_DC_CFG 0x00000620
956#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
957#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
958
959/* SRM_CFG_REG: SRAM configuration register */
960#define FR_AZ_SRM_CFG 0x00000630
961#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
962#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
963#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
964#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
965#define FRF_AZ_SRM_INIT_EN_LBN 3
966#define FRF_AZ_SRM_INIT_EN_WIDTH 1
967#define FRF_AZ_SRM_NUM_BANK_LBN 2
968#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
969#define FRF_AZ_SRM_BANK_SIZE_LBN 0
970#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
971
972/* BUF_TBL_UPD_REG: Buffer table update register */
973#define FR_AZ_BUF_TBL_UPD 0x00000650
974#define FRF_AZ_BUF_UPD_CMD_LBN 63
975#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
976#define FRF_AZ_BUF_CLR_CMD_LBN 62
977#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
978#define FRF_AZ_BUF_CLR_END_ID_LBN 32
979#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
980#define FRF_AZ_BUF_CLR_START_ID_LBN 0
981#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
982
983/* SRM_UPD_EVQ_REG: Buffer table update register */
984#define FR_AZ_SRM_UPD_EVQ 0x00000660
985#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
986#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
987
988/* SRAM_PARITY_REG: SRAM parity register. */
989#define FR_AZ_SRAM_PARITY 0x00000670
990#define FRF_CZ_BYPASS_ECC_LBN 3
991#define FRF_CZ_BYPASS_ECC_WIDTH 1
992#define FRF_CZ_SEC_INT_LBN 2
993#define FRF_CZ_SEC_INT_WIDTH 1
994#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
995#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
996#define FRF_AB_FORCE_SRAM_PERR_LBN 0
997#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
998#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
999#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
1000
1001/* RX_CFG_REG: Receive configuration register */
1002#define FR_AZ_RX_CFG 0x00000800
1003#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
1004#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
1005#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
1006#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
1007#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
1008#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
1009#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
1010#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
1011#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
1012#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
1013#define FRF_BZ_RX_TCP_SUP_LBN 48
1014#define FRF_BZ_RX_TCP_SUP_WIDTH 1
1015#define FRF_BZ_RX_INGR_EN_LBN 47
1016#define FRF_BZ_RX_INGR_EN_WIDTH 1
1017#define FRF_BZ_RX_IP_HASH_LBN 46
1018#define FRF_BZ_RX_IP_HASH_WIDTH 1
1019#define FRF_BZ_RX_HASH_ALG_LBN 45
1020#define FRF_BZ_RX_HASH_ALG_WIDTH 1
1021#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
1022#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
1023#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
1024#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
1025#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
1026#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
1027#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
1028#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
1029#define FRF_BZ_RX_OWNERR_CTL_LBN 38
1030#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
1031#define FRF_BZ_RX_XON_TX_TH_LBN 33
1032#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
1033#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
1034#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
1035#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
1036#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
1037#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
1038#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
1039#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
1040#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
1041#define FRF_AA_RX_OWNERR_CTL_LBN 30
1042#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
1043#define FRF_AA_RX_XON_TX_TH_LBN 25
1044#define FRF_AA_RX_XON_TX_TH_WIDTH 5
1045#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
1046#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
1047#define FRF_AA_RX_XOFF_TX_TH_LBN 20
1048#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
1049#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
1050#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
1051#define FRF_BZ_RX_XON_MAC_TH_LBN 10
1052#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
1053#define FRF_AA_RX_XON_MAC_TH_LBN 6
1054#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
1055#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
1056#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
1057#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
1058#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
1059#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
1060#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
1061
1062/* RX_FILTER_CTL_REG: Receive filter control registers */
1063#define FR_BZ_RX_FILTER_CTL 0x00000810
1064#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
1065#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
1066#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
1067#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
1068#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
1069#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
1070#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
1071#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
1072#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
1073#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
1074#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
1075#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1076#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
1077#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1078#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
1079#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
1080#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
1081#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1082#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
1083#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1084#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
1085#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
1086#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
1087#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
1088#define FRF_BZ_NUM_KER_LBN 24
1089#define FRF_BZ_NUM_KER_WIDTH 2
1090#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
1091#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
1092#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
1093#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
1094#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
1095#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
1096
1097/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
1098#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
1099#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
1100#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
1101#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
1102#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
1103
1104/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
1105#define FR_BZ_RX_DESC_UPD_P0 0x00000830
1106#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
1107#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
1108/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
1109#define FR_AA_RX_DESC_UPD_KER 0x00000830
1110#define FR_AA_RX_DESC_UPD_KER_STEP 8192
1111#define FR_AA_RX_DESC_UPD_KER_ROWS 4
1112/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
1113#define FR_BB_RX_DESC_UPD_P123 0x01000830
1114#define FR_BB_RX_DESC_UPD_P123_STEP 8192
1115#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
1116#define FRF_AZ_RX_DESC_WPTR_LBN 96
1117#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
1118#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
1119#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
1120#define FRF_AZ_RX_DESC_LBN 0
1121#define FRF_AZ_RX_DESC_WIDTH 64
1122
1123/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
1124#define FR_AZ_RX_DC_CFG 0x00000840
1125#define FRF_AB_RX_MAX_PF_LBN 2
1126#define FRF_AB_RX_MAX_PF_WIDTH 2
1127#define FRF_AZ_RX_DC_SIZE_LBN 0
1128#define FRF_AZ_RX_DC_SIZE_WIDTH 2
1129#define FFE_AZ_RX_DC_SIZE_64 3
1130#define FFE_AZ_RX_DC_SIZE_32 2
1131#define FFE_AZ_RX_DC_SIZE_16 1
1132#define FFE_AZ_RX_DC_SIZE_8 0
1133
1134/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
1135#define FR_AZ_RX_DC_PF_WM 0x00000850
1136#define FRF_AZ_RX_DC_PF_HWM_LBN 6
1137#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
1138#define FRF_AZ_RX_DC_PF_LWM_LBN 0
1139#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
1140
1141/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
1142#define FR_BZ_RX_RSS_TKEY 0x00000860
1143#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
1144#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
1145#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
1146#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
1147
1148/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
1149#define FR_AZ_RX_NODESC_DROP 0x00000880
1150#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
1151#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
1152#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
1153#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
1154
1155/* RX_SELF_RST_REG: Receive self reset register */
1156#define FR_AA_RX_SELF_RST 0x00000890
1157#define FRF_AA_RX_ISCSI_DIS_LBN 17
1158#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
1159#define FRF_AA_RX_SW_RST_REG_LBN 16
1160#define FRF_AA_RX_SW_RST_REG_WIDTH 1
1161#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
1162#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
1163#define FRF_AA_RX_SELF_RST_EN_LBN 8
1164#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
1165#define FRF_AA_RX_MAX_PF_LAT_LBN 4
1166#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
1167#define FRF_AA_RX_MAX_LU_LAT_LBN 0
1168#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
1169
1170/* RX_DEBUG_REG: undocumented register */
1171#define FR_AZ_RX_DEBUG 0x000008a0
1172#define FRF_AZ_RX_DEBUG_LBN 0
1173#define FRF_AZ_RX_DEBUG_WIDTH 64
1174
1175/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
1176#define FR_AZ_RX_PUSH_DROP 0x000008b0
1177#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
1178#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
1179
1180/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
1181#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
1182#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
1183#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
1184
1185/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
1186#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
1187#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
1188#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
1189
1190/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
1191#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
1192#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
1193#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
1194#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
1195#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
1196#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
1197#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
1198#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
1199#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
1200
1201/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
1202#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
1203#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
1204#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
1205#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
1206#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
1207
1208/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
1209#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
1210#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
1211#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
1212/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
1213#define FR_AA_TX_DESC_UPD_KER 0x00000a10
1214#define FR_AA_TX_DESC_UPD_KER_STEP 8192
1215#define FR_AA_TX_DESC_UPD_KER_ROWS 8
1216/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
1217#define FR_BB_TX_DESC_UPD_P123 0x01000a10
1218#define FR_BB_TX_DESC_UPD_P123_STEP 8192
1219#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
1220#define FRF_AZ_TX_DESC_WPTR_LBN 96
1221#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
1222#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
1223#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
1224#define FRF_AZ_TX_DESC_LBN 0
1225#define FRF_AZ_TX_DESC_WIDTH 95
1226
1227/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
1228#define FR_AZ_TX_DC_CFG 0x00000a20
1229#define FRF_AZ_TX_DC_SIZE_LBN 0
1230#define FRF_AZ_TX_DC_SIZE_WIDTH 2
1231#define FFE_AZ_TX_DC_SIZE_32 2
1232#define FFE_AZ_TX_DC_SIZE_16 1
1233#define FFE_AZ_TX_DC_SIZE_8 0
1234
1235/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
1236#define FR_AA_TX_CHKSM_CFG 0x00000a30
1237#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
1238#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
1239#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
1240#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
1241#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
1242#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
1243#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
1244#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
1245
1246/* TX_CFG_REG: Transmit configuration register */
1247#define FR_AZ_TX_CFG 0x00000a50
1248#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
1249#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
1250#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
1251#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
1252#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
1253#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1254#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
1255#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1256#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
1257#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1258#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
1259#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1260#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
1261#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1262#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
1263#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1264#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
1265#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
1266#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
1267#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
1268#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
1269#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
1270#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
1271#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
1272#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
1273#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
1274#define FRF_AZ_TX_P1_PRI_EN_LBN 4
1275#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
1276#define FRF_AZ_TX_OWNERR_CTL_LBN 2
1277#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
1278#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
1279#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
1280#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
1281#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
1282
1283/* TX_PUSH_DROP_REG: Transmit push dropped register */
1284#define FR_AZ_TX_PUSH_DROP 0x00000a60
1285#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
1286#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
1287
1288/* TX_RESERVED_REG: Transmit configuration register */
1289#define FR_AZ_TX_RESERVED 0x00000a80
1290#define FRF_AZ_TX_EVT_CNT_LBN 121
1291#define FRF_AZ_TX_EVT_CNT_WIDTH 7
1292#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
1293#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
1294#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
1295#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
1296#define FRF_AZ_TX_PUSH_EN_LBN 89
1297#define FRF_AZ_TX_PUSH_EN_WIDTH 1
1298#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
1299#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
1300#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
1301#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
1302#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
1303#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
1304#define FRF_AZ_TX_DMAQ_ST_LBN 78
1305#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
1306#define FRF_AZ_TX_RX_SPACER_LBN 64
1307#define FRF_AZ_TX_RX_SPACER_WIDTH 8
1308#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
1309#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
1310#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
1311#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
1312#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
1313#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
1314#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
1315#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
1316#define FRF_AZ_TX_XP_TIMER_LBN 52
1317#define FRF_AZ_TX_XP_TIMER_WIDTH 5
1318#define FRF_AZ_TX_PREF_SPACER_LBN 44
1319#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
1320#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
1321#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
1322#define FRF_AZ_TX_ONLY1TAG_LBN 21
1323#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
1324#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
1325#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
1326#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
1327#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
1328#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
1329#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
1330#define FRF_AA_TX_DMA_FF_THR_LBN 16
1331#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
1332#define FRF_AZ_TX_DMA_SPACER_LBN 8
1333#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
1334#define FRF_AA_TX_TCP_DIS_LBN 7
1335#define FRF_AA_TX_TCP_DIS_WIDTH 1
1336#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
1337#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
1338#define FRF_AA_TX_IP_DIS_LBN 6
1339#define FRF_AA_TX_IP_DIS_WIDTH 1
1340#define FRF_AZ_TX_MAX_CPL_LBN 2
1341#define FRF_AZ_TX_MAX_CPL_WIDTH 2
1342#define FFE_AZ_TX_MAX_CPL_16 3
1343#define FFE_AZ_TX_MAX_CPL_8 2
1344#define FFE_AZ_TX_MAX_CPL_4 1
1345#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
1346#define FRF_AZ_TX_MAX_PREF_LBN 0
1347#define FRF_AZ_TX_MAX_PREF_WIDTH 2
1348#define FFE_AZ_TX_MAX_PREF_32 3
1349#define FFE_AZ_TX_MAX_PREF_16 2
1350#define FFE_AZ_TX_MAX_PREF_8 1
1351#define FFE_AZ_TX_MAX_PREF_OFF 0
1352
1353/* TX_PACE_REG: Transmit pace control register */
1354#define FR_BZ_TX_PACE 0x00000a90
1355#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
1356#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
1357#define FRF_BZ_TX_PACE_SB_AF_LBN 9
1358#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
1359#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
1360#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
1361#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
1362#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
1363
1364/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
1365#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
1366#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
1367#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
1368
1369/* TX_VLAN_REG: Transmit VLAN tag register */
1370#define FR_BB_TX_VLAN 0x00000ae0
1371#define FRF_BB_TX_VLAN_EN_LBN 127
1372#define FRF_BB_TX_VLAN_EN_WIDTH 1
1373#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
1374#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
1375#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
1376#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
1377#define FRF_BB_TX_VLAN7_LBN 112
1378#define FRF_BB_TX_VLAN7_WIDTH 12
1379#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
1380#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
1381#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
1382#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
1383#define FRF_BB_TX_VLAN6_LBN 96
1384#define FRF_BB_TX_VLAN6_WIDTH 12
1385#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
1386#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
1387#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
1388#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
1389#define FRF_BB_TX_VLAN5_LBN 80
1390#define FRF_BB_TX_VLAN5_WIDTH 12
1391#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
1392#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
1393#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
1394#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
1395#define FRF_BB_TX_VLAN4_LBN 64
1396#define FRF_BB_TX_VLAN4_WIDTH 12
1397#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
1398#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
1399#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
1400#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
1401#define FRF_BB_TX_VLAN3_LBN 48
1402#define FRF_BB_TX_VLAN3_WIDTH 12
1403#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
1404#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
1405#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
1406#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
1407#define FRF_BB_TX_VLAN2_LBN 32
1408#define FRF_BB_TX_VLAN2_WIDTH 12
1409#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
1410#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
1411#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
1412#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
1413#define FRF_BB_TX_VLAN1_LBN 16
1414#define FRF_BB_TX_VLAN1_WIDTH 12
1415#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
1416#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
1417#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
1418#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
1419#define FRF_BB_TX_VLAN0_LBN 0
1420#define FRF_BB_TX_VLAN0_WIDTH 12
1421
1422/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
1423#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
1424#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
1425#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
1426#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
1427#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
1428#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
1429#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
1430#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
1431#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
1432#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
1433#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
1434#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
1435#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
1436#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
1437#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
1438#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
1439#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
1440#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
1441#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
1442#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
1443#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
1444#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
1445#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
1446#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
1447#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
1448#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
1449#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
1450#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
1451#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
1452#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
1453#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
1454#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
1455#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
1456#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
1457#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
1458#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
1459#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
1460#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
1461#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
1462#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
1463#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
1464#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
1465#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
1466#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
1467#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
1468#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
1469#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
1470#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
1471#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
1472#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
1473#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
1474#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
1475#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
1476#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
1477#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
1478#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
1479#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
1480#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
1481#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
1482#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
1483#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
1484#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
1485#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
1486#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
1487#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
1488#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
1489#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
1490
1491/* TX_IPFIL_TBL: Transmit IP source address filter table */
1492#define FR_BB_TX_IPFIL_TBL 0x00000b00
1493#define FR_BB_TX_IPFIL_TBL_STEP 16
1494#define FR_BB_TX_IPFIL_TBL_ROWS 16
1495#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
1496#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
1497#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
1498#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
1499#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
1500#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
1501#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
1502#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
1503
1504/* MD_TXD_REG: PHY management transmit data register */
1505#define FR_AB_MD_TXD 0x00000c00
1506#define FRF_AB_MD_TXD_LBN 0
1507#define FRF_AB_MD_TXD_WIDTH 16
1508
1509/* MD_RXD_REG: PHY management receive data register */
1510#define FR_AB_MD_RXD 0x00000c10
1511#define FRF_AB_MD_RXD_LBN 0
1512#define FRF_AB_MD_RXD_WIDTH 16
1513
1514/* MD_CS_REG: PHY management configuration & status register */
1515#define FR_AB_MD_CS 0x00000c20
1516#define FRF_AB_MD_RD_EN_CMD_LBN 15
1517#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
1518#define FRF_AB_MD_WR_EN_CMD_LBN 14
1519#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
1520#define FRF_AB_MD_ADDR_CMD_LBN 13
1521#define FRF_AB_MD_ADDR_CMD_WIDTH 1
1522#define FRF_AB_MD_PT_LBN 7
1523#define FRF_AB_MD_PT_WIDTH 3
1524#define FRF_AB_MD_PL_LBN 6
1525#define FRF_AB_MD_PL_WIDTH 1
1526#define FRF_AB_MD_INT_CLR_LBN 5
1527#define FRF_AB_MD_INT_CLR_WIDTH 1
1528#define FRF_AB_MD_GC_LBN 4
1529#define FRF_AB_MD_GC_WIDTH 1
1530#define FRF_AB_MD_PRSP_LBN 3
1531#define FRF_AB_MD_PRSP_WIDTH 1
1532#define FRF_AB_MD_RIC_LBN 2
1533#define FRF_AB_MD_RIC_WIDTH 1
1534#define FRF_AB_MD_RDC_LBN 1
1535#define FRF_AB_MD_RDC_WIDTH 1
1536#define FRF_AB_MD_WRC_LBN 0
1537#define FRF_AB_MD_WRC_WIDTH 1
1538
1539/* MD_PHY_ADR_REG: PHY management PHY address register */
1540#define FR_AB_MD_PHY_ADR 0x00000c30
1541#define FRF_AB_MD_PHY_ADR_LBN 0
1542#define FRF_AB_MD_PHY_ADR_WIDTH 16
1543
1544/* MD_ID_REG: PHY management ID register */
1545#define FR_AB_MD_ID 0x00000c40
1546#define FRF_AB_MD_PRT_ADR_LBN 11
1547#define FRF_AB_MD_PRT_ADR_WIDTH 5
1548#define FRF_AB_MD_DEV_ADR_LBN 6
1549#define FRF_AB_MD_DEV_ADR_WIDTH 5
1550
1551/* MD_STAT_REG: PHY management status & mask register */
1552#define FR_AB_MD_STAT 0x00000c50
1553#define FRF_AB_MD_PINT_LBN 4
1554#define FRF_AB_MD_PINT_WIDTH 1
1555#define FRF_AB_MD_DONE_LBN 3
1556#define FRF_AB_MD_DONE_WIDTH 1
1557#define FRF_AB_MD_BSERR_LBN 2
1558#define FRF_AB_MD_BSERR_WIDTH 1
1559#define FRF_AB_MD_LNFL_LBN 1
1560#define FRF_AB_MD_LNFL_WIDTH 1
1561#define FRF_AB_MD_BSY_LBN 0
1562#define FRF_AB_MD_BSY_WIDTH 1
1563
1564/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
1565#define FR_AB_MAC_STAT_DMA 0x00000c60
1566#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
1567#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
1568#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
1569#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
1570
1571/* MAC_CTRL_REG: Port MAC control register */
1572#define FR_AB_MAC_CTRL 0x00000c80
1573#define FRF_AB_MAC_XOFF_VAL_LBN 16
1574#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
1575#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
1576#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
1577#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
1578#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
1579#define FRF_AB_MAC_BCAD_ACPT_LBN 4
1580#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
1581#define FRF_AB_MAC_UC_PROM_LBN 3
1582#define FRF_AB_MAC_UC_PROM_WIDTH 1
1583#define FRF_AB_MAC_LINK_STATUS_LBN 2
1584#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
1585#define FRF_AB_MAC_SPEED_LBN 0
1586#define FRF_AB_MAC_SPEED_WIDTH 2
1587#define FFE_AB_MAC_SPEED_10G 3
1588#define FFE_AB_MAC_SPEED_1G 2
1589#define FFE_AB_MAC_SPEED_100M 1
1590#define FFE_AB_MAC_SPEED_10M 0
1591
1592/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
1593#define FR_BB_GEN_MODE 0x00000c90
1594#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
1595#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
1596#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
1597#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
1598#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
1599#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
1600#define FRF_BB_XG_PHY_INT_MASK_LBN 0
1601#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
1602
1603/* MAC_MC_HASH_REG0: Multicast address hash table */
1604#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
1605#define FRF_AB_MAC_MCAST_HASH0_LBN 0
1606#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
1607
1608/* MAC_MC_HASH_REG1: Multicast address hash table */
1609#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
1610#define FRF_AB_MAC_MCAST_HASH1_LBN 0
1611#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
1612
1613/* GM_CFG1_REG: GMAC configuration register 1 */
1614#define FR_AB_GM_CFG1 0x00000e00
1615#define FRF_AB_GM_SW_RST_LBN 31
1616#define FRF_AB_GM_SW_RST_WIDTH 1
1617#define FRF_AB_GM_SIM_RST_LBN 30
1618#define FRF_AB_GM_SIM_RST_WIDTH 1
1619#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
1620#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
1621#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
1622#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
1623#define FRF_AB_GM_RST_RX_FUNC_LBN 17
1624#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
1625#define FRF_AB_GM_RST_TX_FUNC_LBN 16
1626#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
1627#define FRF_AB_GM_LOOP_LBN 8
1628#define FRF_AB_GM_LOOP_WIDTH 1
1629#define FRF_AB_GM_RX_FC_EN_LBN 5
1630#define FRF_AB_GM_RX_FC_EN_WIDTH 1
1631#define FRF_AB_GM_TX_FC_EN_LBN 4
1632#define FRF_AB_GM_TX_FC_EN_WIDTH 1
1633#define FRF_AB_GM_SYNC_RXEN_LBN 3
1634#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
1635#define FRF_AB_GM_RX_EN_LBN 2
1636#define FRF_AB_GM_RX_EN_WIDTH 1
1637#define FRF_AB_GM_SYNC_TXEN_LBN 1
1638#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
1639#define FRF_AB_GM_TX_EN_LBN 0
1640#define FRF_AB_GM_TX_EN_WIDTH 1
1641
1642/* GM_CFG2_REG: GMAC configuration register 2 */
1643#define FR_AB_GM_CFG2 0x00000e10
1644#define FRF_AB_GM_PAMBL_LEN_LBN 12
1645#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
1646#define FRF_AB_GM_IF_MODE_LBN 8
1647#define FRF_AB_GM_IF_MODE_WIDTH 2
1648#define FFE_AB_IF_MODE_BYTE_MODE 2
1649#define FFE_AB_IF_MODE_NIBBLE_MODE 1
1650#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
1651#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
1652#define FRF_AB_GM_LEN_CHK_LBN 4
1653#define FRF_AB_GM_LEN_CHK_WIDTH 1
1654#define FRF_AB_GM_PAD_CRC_EN_LBN 2
1655#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
1656#define FRF_AB_GM_CRC_EN_LBN 1
1657#define FRF_AB_GM_CRC_EN_WIDTH 1
1658#define FRF_AB_GM_FD_LBN 0
1659#define FRF_AB_GM_FD_WIDTH 1
1660
1661/* GM_IPG_REG: GMAC IPG register */
1662#define FR_AB_GM_IPG 0x00000e20
1663#define FRF_AB_GM_NONB2B_IPG1_LBN 24
1664#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
1665#define FRF_AB_GM_NONB2B_IPG2_LBN 16
1666#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
1667#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
1668#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
1669#define FRF_AB_GM_B2B_IPG_LBN 0
1670#define FRF_AB_GM_B2B_IPG_WIDTH 7
1671
1672/* GM_HD_REG: GMAC half duplex register */
1673#define FR_AB_GM_HD 0x00000e30
1674#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
1675#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
1676#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
1677#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
1678#define FRF_AB_GM_BP_NO_BOFF_LBN 18
1679#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
1680#define FRF_AB_GM_DIS_BOFF_LBN 17
1681#define FRF_AB_GM_DIS_BOFF_WIDTH 1
1682#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
1683#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
1684#define FRF_AB_GM_RTRY_LIMIT_LBN 12
1685#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
1686#define FRF_AB_GM_COL_WIN_LBN 0
1687#define FRF_AB_GM_COL_WIN_WIDTH 10
1688
1689/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
1690#define FR_AB_GM_MAX_FLEN 0x00000e40
1691#define FRF_AB_GM_MAX_FLEN_LBN 0
1692#define FRF_AB_GM_MAX_FLEN_WIDTH 16
1693
1694/* GM_TEST_REG: GMAC test register */
1695#define FR_AB_GM_TEST 0x00000e70
1696#define FRF_AB_GM_MAX_BOFF_LBN 3
1697#define FRF_AB_GM_MAX_BOFF_WIDTH 1
1698#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
1699#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
1700#define FRF_AB_GM_TEST_PAUSE_LBN 1
1701#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
1702#define FRF_AB_GM_SHORT_SLOT_LBN 0
1703#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
1704
1705/* GM_ADR1_REG: GMAC station address register 1 */
1706#define FR_AB_GM_ADR1 0x00000f00
1707#define FRF_AB_GM_ADR_B0_LBN 24
1708#define FRF_AB_GM_ADR_B0_WIDTH 8
1709#define FRF_AB_GM_ADR_B1_LBN 16
1710#define FRF_AB_GM_ADR_B1_WIDTH 8
1711#define FRF_AB_GM_ADR_B2_LBN 8
1712#define FRF_AB_GM_ADR_B2_WIDTH 8
1713#define FRF_AB_GM_ADR_B3_LBN 0
1714#define FRF_AB_GM_ADR_B3_WIDTH 8
1715
1716/* GM_ADR2_REG: GMAC station address register 2 */
1717#define FR_AB_GM_ADR2 0x00000f10
1718#define FRF_AB_GM_ADR_B4_LBN 24
1719#define FRF_AB_GM_ADR_B4_WIDTH 8
1720#define FRF_AB_GM_ADR_B5_LBN 16
1721#define FRF_AB_GM_ADR_B5_WIDTH 8
1722
1723/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
1724#define FR_AB_GMF_CFG0 0x00000f20
1725#define FRF_AB_GMF_FTFENRPLY_LBN 20
1726#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
1727#define FRF_AB_GMF_STFENRPLY_LBN 19
1728#define FRF_AB_GMF_STFENRPLY_WIDTH 1
1729#define FRF_AB_GMF_FRFENRPLY_LBN 18
1730#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
1731#define FRF_AB_GMF_SRFENRPLY_LBN 17
1732#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
1733#define FRF_AB_GMF_WTMENRPLY_LBN 16
1734#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
1735#define FRF_AB_GMF_FTFENREQ_LBN 12
1736#define FRF_AB_GMF_FTFENREQ_WIDTH 1
1737#define FRF_AB_GMF_STFENREQ_LBN 11
1738#define FRF_AB_GMF_STFENREQ_WIDTH 1
1739#define FRF_AB_GMF_FRFENREQ_LBN 10
1740#define FRF_AB_GMF_FRFENREQ_WIDTH 1
1741#define FRF_AB_GMF_SRFENREQ_LBN 9
1742#define FRF_AB_GMF_SRFENREQ_WIDTH 1
1743#define FRF_AB_GMF_WTMENREQ_LBN 8
1744#define FRF_AB_GMF_WTMENREQ_WIDTH 1
1745#define FRF_AB_GMF_HSTRSTFT_LBN 4
1746#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
1747#define FRF_AB_GMF_HSTRSTST_LBN 3
1748#define FRF_AB_GMF_HSTRSTST_WIDTH 1
1749#define FRF_AB_GMF_HSTRSTFR_LBN 2
1750#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
1751#define FRF_AB_GMF_HSTRSTSR_LBN 1
1752#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
1753#define FRF_AB_GMF_HSTRSTWT_LBN 0
1754#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
1755
1756/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
1757#define FR_AB_GMF_CFG1 0x00000f30
1758#define FRF_AB_GMF_CFGFRTH_LBN 16
1759#define FRF_AB_GMF_CFGFRTH_WIDTH 5
1760#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
1761#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
1762
1763/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
1764#define FR_AB_GMF_CFG2 0x00000f40
1765#define FRF_AB_GMF_CFGHWM_LBN 16
1766#define FRF_AB_GMF_CFGHWM_WIDTH 6
1767#define FRF_AB_GMF_CFGLWM_LBN 0
1768#define FRF_AB_GMF_CFGLWM_WIDTH 6
1769
1770/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
1771#define FR_AB_GMF_CFG3 0x00000f50
1772#define FRF_AB_GMF_CFGHWMFT_LBN 16
1773#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
1774#define FRF_AB_GMF_CFGFTTH_LBN 0
1775#define FRF_AB_GMF_CFGFTTH_WIDTH 6
1776
1777/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
1778#define FR_AB_GMF_CFG4 0x00000f60
1779#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
1780#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
1781
1782/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
1783#define FR_AB_GMF_CFG5 0x00000f70
1784#define FRF_AB_GMF_CFGHDPLX_LBN 22
1785#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
1786#define FRF_AB_GMF_SRFULL_LBN 21
1787#define FRF_AB_GMF_SRFULL_WIDTH 1
1788#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
1789#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
1790#define FRF_AB_GMF_CFGBYTMODE_LBN 19
1791#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
1792#define FRF_AB_GMF_HSTDRPLT64_LBN 18
1793#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
1794#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
1795#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
1796
1797/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
1798#define FR_BB_TX_SRC_MAC_TBL 0x00001000
1799#define FR_BB_TX_SRC_MAC_TBL_STEP 16
1800#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
1801#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
1802#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
1803#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
1804#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
1805
1806/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
1807#define FR_BB_TX_SRC_MAC_CTL 0x00001100
1808#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
1809#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
1810#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
1811#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
1812#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
1813#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
1814#define FRF_BB_TX_MAC_QID_SEL_LBN 0
1815#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
1816
1817/* XM_ADR_LO_REG: XGMAC address register low */
1818#define FR_AB_XM_ADR_LO 0x00001200
1819#define FRF_AB_XM_ADR_LO_LBN 0
1820#define FRF_AB_XM_ADR_LO_WIDTH 32
1821
1822/* XM_ADR_HI_REG: XGMAC address register high */
1823#define FR_AB_XM_ADR_HI 0x00001210
1824#define FRF_AB_XM_ADR_HI_LBN 0
1825#define FRF_AB_XM_ADR_HI_WIDTH 16
1826
1827/* XM_GLB_CFG_REG: XGMAC global configuration */
1828#define FR_AB_XM_GLB_CFG 0x00001220
1829#define FRF_AB_XM_RMTFLT_GEN_LBN 17
1830#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
1831#define FRF_AB_XM_DEBUG_MODE_LBN 16
1832#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
1833#define FRF_AB_XM_RX_STAT_EN_LBN 11
1834#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
1835#define FRF_AB_XM_TX_STAT_EN_LBN 10
1836#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
1837#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
1838#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
1839#define FRF_AB_XM_WAN_MODE_LBN 5
1840#define FRF_AB_XM_WAN_MODE_WIDTH 1
1841#define FRF_AB_XM_INTCLR_MODE_LBN 3
1842#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
1843#define FRF_AB_XM_CORE_RST_LBN 0
1844#define FRF_AB_XM_CORE_RST_WIDTH 1
1845
1846/* XM_TX_CFG_REG: XGMAC transmit configuration */
1847#define FR_AB_XM_TX_CFG 0x00001230
1848#define FRF_AB_XM_TX_PROG_LBN 24
1849#define FRF_AB_XM_TX_PROG_WIDTH 1
1850#define FRF_AB_XM_IPG_LBN 16
1851#define FRF_AB_XM_IPG_WIDTH 4
1852#define FRF_AB_XM_FCNTL_LBN 10
1853#define FRF_AB_XM_FCNTL_WIDTH 1
1854#define FRF_AB_XM_TXCRC_LBN 8
1855#define FRF_AB_XM_TXCRC_WIDTH 1
1856#define FRF_AB_XM_EDRC_LBN 6
1857#define FRF_AB_XM_EDRC_WIDTH 1
1858#define FRF_AB_XM_AUTO_PAD_LBN 5
1859#define FRF_AB_XM_AUTO_PAD_WIDTH 1
1860#define FRF_AB_XM_TX_PRMBL_LBN 2
1861#define FRF_AB_XM_TX_PRMBL_WIDTH 1
1862#define FRF_AB_XM_TXEN_LBN 1
1863#define FRF_AB_XM_TXEN_WIDTH 1
1864#define FRF_AB_XM_TX_RST_LBN 0
1865#define FRF_AB_XM_TX_RST_WIDTH 1
1866
1867/* XM_RX_CFG_REG: XGMAC receive configuration */
1868#define FR_AB_XM_RX_CFG 0x00001240
1869#define FRF_AB_XM_PASS_LENERR_LBN 26
1870#define FRF_AB_XM_PASS_LENERR_WIDTH 1
1871#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
1872#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
1873#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
1874#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
1875#define FRF_AB_XM_REJ_BCAST_LBN 20
1876#define FRF_AB_XM_REJ_BCAST_WIDTH 1
1877#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
1878#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
1879#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
1880#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
1881#define FRF_AB_XM_AUTO_DEPAD_LBN 8
1882#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
1883#define FRF_AB_XM_RXCRC_LBN 3
1884#define FRF_AB_XM_RXCRC_WIDTH 1
1885#define FRF_AB_XM_RX_PRMBL_LBN 2
1886#define FRF_AB_XM_RX_PRMBL_WIDTH 1
1887#define FRF_AB_XM_RXEN_LBN 1
1888#define FRF_AB_XM_RXEN_WIDTH 1
1889#define FRF_AB_XM_RX_RST_LBN 0
1890#define FRF_AB_XM_RX_RST_WIDTH 1
1891
1892/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
1893#define FR_AB_XM_MGT_INT_MASK 0x00001250
1894#define FRF_AB_XM_MSK_STA_INTR_LBN 16
1895#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
1896#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
1897#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
1898#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
1899#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
1900#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
1901#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
1902#define FRF_AB_XM_MSK_RMTFLT_LBN 1
1903#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
1904#define FRF_AB_XM_MSK_LCLFLT_LBN 0
1905#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
1906
1907/* XM_FC_REG: XGMAC flow control register */
1908#define FR_AB_XM_FC 0x00001270
1909#define FRF_AB_XM_PAUSE_TIME_LBN 16
1910#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
1911#define FRF_AB_XM_RX_MAC_STAT_LBN 11
1912#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
1913#define FRF_AB_XM_TX_MAC_STAT_LBN 10
1914#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
1915#define FRF_AB_XM_MCNTL_PASS_LBN 8
1916#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
1917#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
1918#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
1919#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
1920#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
1921#define FRF_AB_XM_ZPAUSE_LBN 2
1922#define FRF_AB_XM_ZPAUSE_WIDTH 1
1923#define FRF_AB_XM_XMIT_PAUSE_LBN 1
1924#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
1925#define FRF_AB_XM_DIS_FCNTL_LBN 0
1926#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
1927
1928/* XM_PAUSE_TIME_REG: XGMAC pause time register */
1929#define FR_AB_XM_PAUSE_TIME 0x00001290
1930#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
1931#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
1932#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
1933#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
1934
1935/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
1936#define FR_AB_XM_TX_PARAM 0x000012d0
1937#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
1938#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
1939#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
1940#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
1941#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
1942#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
1943#define FRF_AB_XM_PAD_CHAR_LBN 0
1944#define FRF_AB_XM_PAD_CHAR_WIDTH 8
1945
1946/* XM_RX_PARAM_REG: XGMAC receive parameter register */
1947#define FR_AB_XM_RX_PARAM 0x000012e0
1948#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
1949#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
1950#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
1951#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
1952
1953/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
1954#define FR_AB_XM_MGT_INT_MSK 0x000012f0
1955#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
1956#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
1957#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
1958#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
1959#define FRF_AB_XM_PRMBLE_ERR_LBN 2
1960#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
1961#define FRF_AB_XM_RMTFLT_LBN 1
1962#define FRF_AB_XM_RMTFLT_WIDTH 1
1963#define FRF_AB_XM_LCLFLT_LBN 0
1964#define FRF_AB_XM_LCLFLT_WIDTH 1
1965
1966/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
1967#define FR_AB_XX_PWR_RST 0x00001300
1968#define FRF_AB_XX_PWRDND_SIG_LBN 31
1969#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
1970#define FRF_AB_XX_PWRDNC_SIG_LBN 30
1971#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
1972#define FRF_AB_XX_PWRDNB_SIG_LBN 29
1973#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
1974#define FRF_AB_XX_PWRDNA_SIG_LBN 28
1975#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
1976#define FRF_AB_XX_SIM_MODE_LBN 27
1977#define FRF_AB_XX_SIM_MODE_WIDTH 1
1978#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
1979#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
1980#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
1981#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
1982#define FRF_AB_XX_RESETD_SIG_LBN 23
1983#define FRF_AB_XX_RESETD_SIG_WIDTH 1
1984#define FRF_AB_XX_RESETC_SIG_LBN 22
1985#define FRF_AB_XX_RESETC_SIG_WIDTH 1
1986#define FRF_AB_XX_RESETB_SIG_LBN 21
1987#define FRF_AB_XX_RESETB_SIG_WIDTH 1
1988#define FRF_AB_XX_RESETA_SIG_LBN 20
1989#define FRF_AB_XX_RESETA_SIG_WIDTH 1
1990#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
1991#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
1992#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
1993#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
1994#define FRF_AB_XX_SD_RST_ACT_LBN 16
1995#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
1996#define FRF_AB_XX_PWRDND_EN_LBN 15
1997#define FRF_AB_XX_PWRDND_EN_WIDTH 1
1998#define FRF_AB_XX_PWRDNC_EN_LBN 14
1999#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
2000#define FRF_AB_XX_PWRDNB_EN_LBN 13
2001#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
2002#define FRF_AB_XX_PWRDNA_EN_LBN 12
2003#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
2004#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
2005#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
2006#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
2007#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
2008#define FRF_AB_XX_RESETD_EN_LBN 7
2009#define FRF_AB_XX_RESETD_EN_WIDTH 1
2010#define FRF_AB_XX_RESETC_EN_LBN 6
2011#define FRF_AB_XX_RESETC_EN_WIDTH 1
2012#define FRF_AB_XX_RESETB_EN_LBN 5
2013#define FRF_AB_XX_RESETB_EN_WIDTH 1
2014#define FRF_AB_XX_RESETA_EN_LBN 4
2015#define FRF_AB_XX_RESETA_EN_WIDTH 1
2016#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
2017#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
2018#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
2019#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
2020#define FRF_AB_XX_RST_XX_EN_LBN 0
2021#define FRF_AB_XX_RST_XX_EN_WIDTH 1
2022
2023/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
2024#define FR_AB_XX_SD_CTL 0x00001310
2025#define FRF_AB_XX_TERMADJ1_LBN 17
2026#define FRF_AB_XX_TERMADJ1_WIDTH 1
2027#define FRF_AB_XX_TERMADJ0_LBN 16
2028#define FRF_AB_XX_TERMADJ0_WIDTH 1
2029#define FRF_AB_XX_HIDRVD_LBN 15
2030#define FRF_AB_XX_HIDRVD_WIDTH 1
2031#define FRF_AB_XX_LODRVD_LBN 14
2032#define FRF_AB_XX_LODRVD_WIDTH 1
2033#define FRF_AB_XX_HIDRVC_LBN 13
2034#define FRF_AB_XX_HIDRVC_WIDTH 1
2035#define FRF_AB_XX_LODRVC_LBN 12
2036#define FRF_AB_XX_LODRVC_WIDTH 1
2037#define FRF_AB_XX_HIDRVB_LBN 11
2038#define FRF_AB_XX_HIDRVB_WIDTH 1
2039#define FRF_AB_XX_LODRVB_LBN 10
2040#define FRF_AB_XX_LODRVB_WIDTH 1
2041#define FRF_AB_XX_HIDRVA_LBN 9
2042#define FRF_AB_XX_HIDRVA_WIDTH 1
2043#define FRF_AB_XX_LODRVA_LBN 8
2044#define FRF_AB_XX_LODRVA_WIDTH 1
2045#define FRF_AB_XX_LPBKD_LBN 3
2046#define FRF_AB_XX_LPBKD_WIDTH 1
2047#define FRF_AB_XX_LPBKC_LBN 2
2048#define FRF_AB_XX_LPBKC_WIDTH 1
2049#define FRF_AB_XX_LPBKB_LBN 1
2050#define FRF_AB_XX_LPBKB_WIDTH 1
2051#define FRF_AB_XX_LPBKA_LBN 0
2052#define FRF_AB_XX_LPBKA_WIDTH 1
2053
2054/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2055#define FR_AB_XX_TXDRV_CTL 0x00001320
2056#define FRF_AB_XX_DEQD_LBN 28
2057#define FRF_AB_XX_DEQD_WIDTH 4
2058#define FRF_AB_XX_DEQC_LBN 24
2059#define FRF_AB_XX_DEQC_WIDTH 4
2060#define FRF_AB_XX_DEQB_LBN 20
2061#define FRF_AB_XX_DEQB_WIDTH 4
2062#define FRF_AB_XX_DEQA_LBN 16
2063#define FRF_AB_XX_DEQA_WIDTH 4
2064#define FRF_AB_XX_DTXD_LBN 12
2065#define FRF_AB_XX_DTXD_WIDTH 4
2066#define FRF_AB_XX_DTXC_LBN 8
2067#define FRF_AB_XX_DTXC_WIDTH 4
2068#define FRF_AB_XX_DTXB_LBN 4
2069#define FRF_AB_XX_DTXB_WIDTH 4
2070#define FRF_AB_XX_DTXA_LBN 0
2071#define FRF_AB_XX_DTXA_WIDTH 4
2072
2073/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
2074#define FR_AB_XX_PRBS_CTL 0x00001330
2075#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
2076#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
2077#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
2078#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
2079#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
2080#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
2081#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
2082#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
2083#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
2084#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
2085#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
2086#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
2087#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
2088#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
2089#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
2090#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
2091#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
2092#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
2093#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
2094#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
2095#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
2096#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
2097#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
2098#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
2099#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
2100#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
2101#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
2102#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
2103#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
2104#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
2105#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
2106#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
2107#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
2108#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
2109#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
2110#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
2111#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
2112#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
2113#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
2114#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
2115#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
2116#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
2117#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
2118#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
2119#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
2120#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
2121#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
2122#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
2123
2124/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
2125#define FR_AB_XX_PRBS_CHK 0x00001340
2126#define FRF_AB_XX_REV_LB_EN_LBN 16
2127#define FRF_AB_XX_REV_LB_EN_WIDTH 1
2128#define FRF_AB_XX_CH3_DEG_DET_LBN 15
2129#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
2130#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
2131#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
2132#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
2133#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
2134#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
2135#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
2136#define FRF_AB_XX_CH2_DEG_DET_LBN 11
2137#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
2138#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
2139#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
2140#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
2141#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
2142#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
2143#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
2144#define FRF_AB_XX_CH1_DEG_DET_LBN 7
2145#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
2146#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
2147#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
2148#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
2149#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
2150#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
2151#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
2152#define FRF_AB_XX_CH0_DEG_DET_LBN 3
2153#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
2154#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
2155#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
2156#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
2157#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
2158#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
2159#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
2160
2161/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
2162#define FR_AB_XX_PRBS_ERR 0x00001350
2163#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
2164#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
2165#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
2166#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
2167#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
2168#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
2169#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
2170#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
2171
2172/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2173#define FR_AB_XX_CORE_STAT 0x00001360
2174#define FRF_AB_XX_FORCE_SIG3_LBN 31
2175#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
2176#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
2177#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
2178#define FRF_AB_XX_FORCE_SIG2_LBN 29
2179#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
2180#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
2181#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
2182#define FRF_AB_XX_FORCE_SIG1_LBN 27
2183#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
2184#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
2185#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
2186#define FRF_AB_XX_FORCE_SIG0_LBN 25
2187#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
2188#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
2189#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
2190#define FRF_AB_XX_XGXS_LB_EN_LBN 23
2191#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
2192#define FRF_AB_XX_XGMII_LB_EN_LBN 22
2193#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
2194#define FRF_AB_XX_MATCH_FAULT_LBN 21
2195#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
2196#define FRF_AB_XX_ALIGN_DONE_LBN 20
2197#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
2198#define FRF_AB_XX_SYNC_STAT3_LBN 19
2199#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
2200#define FRF_AB_XX_SYNC_STAT2_LBN 18
2201#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
2202#define FRF_AB_XX_SYNC_STAT1_LBN 17
2203#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
2204#define FRF_AB_XX_SYNC_STAT0_LBN 16
2205#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
2206#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
2207#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
2208#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
2209#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
2210#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
2211#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
2212#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
2213#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
2214#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
2215#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
2216#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
2217#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
2218#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
2219#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
2220#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
2221#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
2222#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
2223#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
2224#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
2225#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
2226#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
2227#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
2228#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
2229#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
2230#define FRF_AB_XX_DISPERR_CH3_LBN 3
2231#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
2232#define FRF_AB_XX_DISPERR_CH2_LBN 2
2233#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
2234#define FRF_AB_XX_DISPERR_CH1_LBN 1
2235#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
2236#define FRF_AB_XX_DISPERR_CH0_LBN 0
2237#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
2238
2239/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
2240#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
2241#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
2242#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
2243/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
2244#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
2245#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
2246#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
2247#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
2248#define FRF_CZ_RX_HDR_SPLIT_LBN 90
2249#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
2250#define FRF_AA_RX_RESET_LBN 89
2251#define FRF_AA_RX_RESET_WIDTH 1
2252#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
2253#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
2254#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
2255#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
2256#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
2257#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
2258#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
2259#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
2260#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
2261#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
2262#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
2263#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
2264#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
2265#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
2266#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
2267#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
2268#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
2269#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
2270#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
2271#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
2272#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
2273#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
2274#define FFE_AZ_RX_DESCQ_SIZE_4K 3
2275#define FFE_AZ_RX_DESCQ_SIZE_2K 2
2276#define FFE_AZ_RX_DESCQ_SIZE_1K 1
2277#define FFE_AZ_RX_DESCQ_SIZE_512 0
2278#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
2279#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
2280#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
2281#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
2282#define FRF_AZ_RX_DESCQ_EN_LBN 0
2283#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
2284
2285/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
2286#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
2287#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
2288#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
2289/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
2290#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
2291#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
2292#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
2293#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
2294#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
2295#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
2296#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
2297#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
2298#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
2299#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
2300#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
2301#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
2302#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
2303#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
2304#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
2305#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
2306#define FRF_AZ_TX_DESCQ_EN_LBN 88
2307#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
2308#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
2309#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
2310#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
2311#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
2312#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
2313#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
2314#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
2315#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
2316#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
2317#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
2318#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
2319#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
2320#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
2321#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
2322#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
2323#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
2324#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
2325#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
2326#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
2327#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
2328#define FFE_AZ_TX_DESCQ_SIZE_4K 3
2329#define FFE_AZ_TX_DESCQ_SIZE_2K 2
2330#define FFE_AZ_TX_DESCQ_SIZE_1K 1
2331#define FFE_AZ_TX_DESCQ_SIZE_512 0
2332#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
2333#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
2334#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
2335#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
2336
2337/* EVQ_PTR_TBL_KER: Event queue pointer table */
2338#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
2339#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
2340#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
2341/* EVQ_PTR_TBL: Event queue pointer table */
2342#define FR_BZ_EVQ_PTR_TBL 0x00f60000
2343#define FR_BZ_EVQ_PTR_TBL_STEP 16
2344#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
2345#define FR_BB_EVQ_PTR_TBL_ROWS 4096
2346#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
2347#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
2348#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
2349#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
2350#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
2351#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
2352#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
2353#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
2354#define FRF_AZ_EVQ_EN_LBN 23
2355#define FRF_AZ_EVQ_EN_WIDTH 1
2356#define FRF_AZ_EVQ_SIZE_LBN 20
2357#define FRF_AZ_EVQ_SIZE_WIDTH 3
2358#define FFE_AZ_EVQ_SIZE_32K 6
2359#define FFE_AZ_EVQ_SIZE_16K 5
2360#define FFE_AZ_EVQ_SIZE_8K 4
2361#define FFE_AZ_EVQ_SIZE_4K 3
2362#define FFE_AZ_EVQ_SIZE_2K 2
2363#define FFE_AZ_EVQ_SIZE_1K 1
2364#define FFE_AZ_EVQ_SIZE_512 0
2365#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
2366#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
2367
2368/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
2369#define FR_AA_BUF_HALF_TBL_KER 0x00018000
2370#define FR_AA_BUF_HALF_TBL_KER_STEP 8
2371#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
2372/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
2373#define FR_BZ_BUF_HALF_TBL 0x00800000
2374#define FR_BZ_BUF_HALF_TBL_STEP 8
2375#define FR_CZ_BUF_HALF_TBL_ROWS 147456
2376#define FR_BB_BUF_HALF_TBL_ROWS 524288
2377#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
2378#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
2379#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
2380#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
2381#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
2382#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
2383#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
2384#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
2385
2386/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
2387#define FR_AA_BUF_FULL_TBL_KER 0x00018000
2388#define FR_AA_BUF_FULL_TBL_KER_STEP 8
2389#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
2390/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
2391#define FR_BZ_BUF_FULL_TBL 0x00800000
2392#define FR_BZ_BUF_FULL_TBL_STEP 8
2393#define FR_CZ_BUF_FULL_TBL_ROWS 147456
2394#define FR_BB_BUF_FULL_TBL_ROWS 917504
2395#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
2396#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
2397#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
2398#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
2399#define FRF_AZ_BUF_ADR_REGION_LBN 48
2400#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
2401#define FFE_AZ_BUF_ADR_REGN3 3
2402#define FFE_AZ_BUF_ADR_REGN2 2
2403#define FFE_AZ_BUF_ADR_REGN1 1
2404#define FFE_AZ_BUF_ADR_REGN0 0
2405#define FRF_AZ_BUF_ADR_FBUF_LBN 14
2406#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
2407#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
2408#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
2409
2410/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
2411#define FR_BZ_RX_FILTER_TBL0 0x00f00000
2412#define FR_BZ_RX_FILTER_TBL0_STEP 32
2413#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
2414/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
2415#define FR_BB_RX_FILTER_TBL1 0x00f00010
2416#define FR_BB_RX_FILTER_TBL1_STEP 32
2417#define FR_BB_RX_FILTER_TBL1_ROWS 8192
2418#define FRF_BZ_RSS_EN_LBN 110
2419#define FRF_BZ_RSS_EN_WIDTH 1
2420#define FRF_BZ_SCATTER_EN_LBN 109
2421#define FRF_BZ_SCATTER_EN_WIDTH 1
2422#define FRF_BZ_TCP_UDP_LBN 108
2423#define FRF_BZ_TCP_UDP_WIDTH 1
2424#define FRF_BZ_RXQ_ID_LBN 96
2425#define FRF_BZ_RXQ_ID_WIDTH 12
2426#define FRF_BZ_DEST_IP_LBN 64
2427#define FRF_BZ_DEST_IP_WIDTH 32
2428#define FRF_BZ_DEST_PORT_TCP_LBN 48
2429#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
2430#define FRF_BZ_SRC_IP_LBN 16
2431#define FRF_BZ_SRC_IP_WIDTH 32
2432#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
2433#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
2434
2435/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
2436#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
2437#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
2438#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
2439#define FRF_CZ_RMFT_RSS_EN_LBN 75
2440#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
2441#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
2442#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
2443#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
2444#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
2445#define FRF_CZ_RMFT_RXQ_ID_LBN 61
2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
2449#define FRF_CZ_RMFT_DEST_MAC_LBN 16
2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44
2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0
2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
2453
2454/* TIMER_TBL: Timer table */
2455#define FR_BZ_TIMER_TBL 0x00f70000
2456#define FR_BZ_TIMER_TBL_STEP 16
2457#define FR_CZ_TIMER_TBL_ROWS 1024
2458#define FR_BB_TIMER_TBL_ROWS 4096
2459#define FRF_CZ_TIMER_Q_EN_LBN 33
2460#define FRF_CZ_TIMER_Q_EN_WIDTH 1
2461#define FRF_CZ_INT_ARMD_LBN 32
2462#define FRF_CZ_INT_ARMD_WIDTH 1
2463#define FRF_CZ_INT_PEND_LBN 31
2464#define FRF_CZ_INT_PEND_WIDTH 1
2465#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
2466#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
2467#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
2468#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
2469#define FRF_CZ_TIMER_MODE_LBN 14
2470#define FRF_CZ_TIMER_MODE_WIDTH 2
2471#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
2472#define FFE_CZ_TIMER_MODE_TRIG_START 2
2473#define FFE_CZ_TIMER_MODE_IMMED_START 1
2474#define FFE_CZ_TIMER_MODE_DIS 0
2475#define FRF_BB_TIMER_MODE_LBN 12
2476#define FRF_BB_TIMER_MODE_WIDTH 2
2477#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
2478#define FFE_BB_TIMER_MODE_TRIG_START 2
2479#define FFE_BB_TIMER_MODE_IMMED_START 1
2480#define FFE_BB_TIMER_MODE_DIS 0
2481#define FRF_CZ_TIMER_VAL_LBN 0
2482#define FRF_CZ_TIMER_VAL_WIDTH 14
2483#define FRF_BB_TIMER_VAL_LBN 0
2484#define FRF_BB_TIMER_VAL_WIDTH 12
2485
2486/* TX_PACE_TBL: Transmit pacing table */
2487#define FR_BZ_TX_PACE_TBL 0x00f80000
2488#define FR_BZ_TX_PACE_TBL_STEP 16
2489#define FR_CZ_TX_PACE_TBL_ROWS 1024
2490#define FR_BB_TX_PACE_TBL_ROWS 4096
2491#define FRF_BZ_TX_PACE_LBN 0
2492#define FRF_BZ_TX_PACE_WIDTH 5
2493
2494/* RX_INDIRECTION_TBL: RX Indirection Table */
2495#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
2496#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
2497#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
2498#define FRF_BZ_IT_QUEUE_LBN 0
2499#define FRF_BZ_IT_QUEUE_WIDTH 6
2500
2501/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
2502#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
2503#define FR_CZ_TX_FILTER_TBL0_STEP 16
2504#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
2505#define FRF_CZ_TIFT_TCP_UDP_LBN 108
2506#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
2507#define FRF_CZ_TIFT_TXQ_ID_LBN 96
2508#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
2509#define FRF_CZ_TIFT_DEST_IP_LBN 64
2510#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
2511#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
2512#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
2513#define FRF_CZ_TIFT_SRC_IP_LBN 16
2514#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
2515#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
2516#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
2517
2518/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
2519#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
2520#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
2521#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
2522#define FRF_CZ_TMFT_TXQ_ID_LBN 61
2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
2526#define FRF_CZ_TMFT_SRC_MAC_LBN 16
2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44
2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0
2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
2530
2531/* MC_TREG_SMEM: MC Shared Memory */
2532#define FR_CZ_MC_TREG_SMEM 0x00ff0000
2533#define FR_CZ_MC_TREG_SMEM_STEP 4
2534#define FR_CZ_MC_TREG_SMEM_ROWS 512
2535#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
2536#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
2537
2538/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2539#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
2540#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
2541#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
2542/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2543#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
2544/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
2545#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
2546#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
2547#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
2548#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
2549#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
2550#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
2551#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
2552#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
2553#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
2554#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
2555#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
2556
2557/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2558#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
2559#define FR_BZ_MSIX_PBA_TABLE_STEP 4
2560#define FR_BB_MSIX_PBA_TABLE_ROWS 2
2561/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2562#define FR_CZ_MSIX_PBA_TABLE 0x00008000
2563/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
2564#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
2565#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
2566#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
2567
2568/* SRM_DBG_REG: SRAM debug access */
2569#define FR_BZ_SRM_DBG 0x03000000
2570#define FR_BZ_SRM_DBG_STEP 8
2571#define FR_CZ_SRM_DBG_ROWS 262144
2572#define FR_BB_SRM_DBG_ROWS 2097152
2573#define FRF_BZ_SRM_DBG_LBN 0
2574#define FRF_BZ_SRM_DBG_WIDTH 64
2575
2576/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
2577#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
2578#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
2579#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
2580#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
2581#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
2582
2583/* DRIVER_EV */
2584#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
2585#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
2586#define FSE_BZ_TX_DSC_ERROR_EV 15
2587#define FSE_BZ_RX_DSC_ERROR_EV 14
2588#define FSE_AA_RX_RECOVER_EV 11
2589#define FSE_AZ_TIMER_EV 10
2590#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
2591#define FSE_AZ_WAKE_UP_EV 6
2592#define FSE_AZ_SRM_UPD_DONE_EV 5
2593#define FSE_AB_EVQ_NOT_EN_EV 3
2594#define FSE_AZ_EVQ_INIT_DONE_EV 2
2595#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
2596#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
2597#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
2598#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
2599
2600/* EVENT_ENTRY */
2601#define FSF_AZ_EV_CODE_LBN 60
2602#define FSF_AZ_EV_CODE_WIDTH 4
2603#define FSE_CZ_EV_CODE_MCDI_EV 12
2604#define FSE_CZ_EV_CODE_USER_EV 8
2605#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
2606#define FSE_AZ_EV_CODE_GLOBAL_EV 6
2607#define FSE_AZ_EV_CODE_DRIVER_EV 5
2608#define FSE_AZ_EV_CODE_TX_EV 2
2609#define FSE_AZ_EV_CODE_RX_EV 0
2610#define FSF_AZ_EV_DATA_LBN 0
2611#define FSF_AZ_EV_DATA_WIDTH 60
2612
2613/* GLOBAL_EV */
2614#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
2615#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
2616#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
2617#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
2618#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
2619#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
2620#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
2621#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
2622#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
2623#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
2624#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
2625#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
2626
2627/* LEGACY_INT_VEC */
2628#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
2629#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
2630#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
2631#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
2632#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
2633#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
2634#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
2635#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
2636#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
2637#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
2638
2639/* MC_XGMAC_FLTR_RULE_DEF */
2640#define FSF_CZ_MC_XFRC_MODE_LBN 416
2641#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
2642#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
2643#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
2644#define FSF_CZ_MC_XFRC_HASH_LBN 384
2645#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
2646#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
2647#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
2648#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
2649#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
2650#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
2651#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
2652
2653/* RX_EV */
2654#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
2655#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
2656#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
2657#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
2658#define FSF_AZ_RX_EV_PKT_OK_LBN 56
2659#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
2660#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
2661#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
2662#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
2663#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2664#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
2665#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
2666#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
2667#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
2668#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
2669#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
2670#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
2671#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
2672#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
2673#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
2674#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
2675#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
2676#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
2677#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
2678#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
2679#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
2680#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
2681#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
2682#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
2683#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
2684#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
2685#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
2686#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
2687#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
2688#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
2689#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
2690#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
2691#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
2692#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
2693#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
2694#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
2695#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
2696#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
2697#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
2698#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
2699#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
2700#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
2701#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
2702#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
2703#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
2704#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
2705#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
2706#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
2707#define FSF_AZ_RX_EV_PORT_LBN 30
2708#define FSF_AZ_RX_EV_PORT_WIDTH 1
2709#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
2710#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
2711#define FSF_AZ_RX_EV_SOP_LBN 15
2712#define FSF_AZ_RX_EV_SOP_WIDTH 1
2713#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
2714#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
2715#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
2716#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
2717#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
2718#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
2719#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
2720#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
2721
2722/* RX_KER_DESC */
2723#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
2724#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
2725#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
2726#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
2727#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
2728#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
2729
2730/* RX_USER_DESC */
2731#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
2732#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
2733#define FSF_AZ_RX_USER_BUF_ID_LBN 0
2734#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
2735
2736/* TX_EV */
2737#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
2738#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
2739#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
2740#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
2741#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
2742#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
2743#define FSF_AZ_TX_EV_PORT_LBN 16
2744#define FSF_AZ_TX_EV_PORT_WIDTH 1
2745#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
2746#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
2747#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
2748#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2749#define FSF_AZ_TX_EV_COMP_LBN 12
2750#define FSF_AZ_TX_EV_COMP_WIDTH 1
2751#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
2752#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
2753
2754/* TX_KER_DESC */
2755#define FSF_AZ_TX_KER_CONT_LBN 62
2756#define FSF_AZ_TX_KER_CONT_WIDTH 1
2757#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
2758#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
2759#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
2760#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
2761#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
2762#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
2763
2764/* TX_USER_DESC */
2765#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
2766#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
2767#define FSF_AZ_TX_USER_CONT_LBN 46
2768#define FSF_AZ_TX_USER_CONT_WIDTH 1
2769#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
2770#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
2771#define FSF_AZ_TX_USER_BUF_ID_LBN 13
2772#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
2773#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
2774#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
2775
2776/* USER_EV */
2777#define FSF_CZ_USER_QID_LBN 32
2778#define FSF_CZ_USER_QID_WIDTH 10
2779#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
2780#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
2781
2782/**************************************************************************
2783 *
2784 * Falcon B0 PCIe core indirect registers
2785 *
2786 **************************************************************************
2787 */
2788
2789#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
2790
2791#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
2792
2793#define FPCR_BB_ACK_RPL_TIMER 0x700
2794#define FPCRF_BB_ACK_TL_LBN 0
2795#define FPCRF_BB_ACK_TL_WIDTH 16
2796#define FPCRF_BB_RPL_TL_LBN 16
2797#define FPCRF_BB_RPL_TL_WIDTH 16
2798
2799#define FPCR_BB_ACK_FREQ 0x70C
2800#define FPCRF_BB_ACK_FREQ_LBN 0
2801#define FPCRF_BB_ACK_FREQ_WIDTH 7
2802
2803/**************************************************************************
2804 *
2805 * Pseudo-registers and fields
2806 *
2807 **************************************************************************
2808 */
2809
2810/* Interrupt acknowledge work-around register (A0/A1 only) */
2811#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
2812
2813/* EE_SPI_HCMD_REG: SPI host command register */
2814/* Values for the EE_SPI_HCMD_SF_SEL register field */
2815#define FFE_AB_SPI_DEVICE_EEPROM 0
2816#define FFE_AB_SPI_DEVICE_FLASH 1
2817
2818/* NIC_STAT_REG: NIC status register */
2819#define FRF_AB_STRAP_10G_LBN 2
2820#define FRF_AB_STRAP_10G_WIDTH 1
2821#define FRF_AA_STRAP_PCIE_LBN 0
2822#define FRF_AA_STRAP_PCIE_WIDTH 1
2823
2824/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
2825#define FRF_AZ_FATAL_INTR_LBN 0
2826#define FRF_AZ_FATAL_INTR_WIDTH 12
2827
2828/* SRM_CFG_REG: SRAM configuration register */
2829/* We treat the number of SRAM banks and bank size as a single field */
2830#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
2831#define FRF_AZ_SRM_NB_SZ_WIDTH \
2832 (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
2833#define FFE_AB_SRM_NB1_SZ2M 0
2834#define FFE_AB_SRM_NB1_SZ4M 1
2835#define FFE_AB_SRM_NB1_SZ8M 2
2836#define FFE_AB_SRM_NB_SZ_DEF 3
2837#define FFE_AB_SRM_NB2_SZ4M 4
2838#define FFE_AB_SRM_NB2_SZ8M 5
2839#define FFE_AB_SRM_NB2_SZ16M 6
2840#define FFE_AB_SRM_NB_SZ_RES 7
2841
2842/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
2843/* We write just the last dword of these registers */
2844#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
2845 (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
2846 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
2847#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
2848#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
2849
2850/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
2851#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
2852 (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
2853 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
2854#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
2855#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
2856
2857/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
2858#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
2859#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
2860
2861/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
2862#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
2863#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
2864
2865/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
2866#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
2867#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
2868 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
2869
2870/* XM_RX_PARAM_REG: XGMAC receive parameter register */
2871#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
2872#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
2873 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
2874
2875/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2876/* Default values */
2877#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
2878#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
2879#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
2880
2881/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2882/* XGXS all-lanes status fields */
2883#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
2884#define FRF_AB_XX_SYNC_STAT_WIDTH 4
2885#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
2886#define FRF_AB_XX_COMMA_DET_WIDTH 4
2887#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
2888#define FRF_AB_XX_CHAR_ERR_WIDTH 4
2889#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
2890#define FRF_AB_XX_DISPERR_WIDTH 4
2891#define FFE_AB_XX_STAT_ALL_LANES 0xf
2892#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895
2896/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
2899#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
2900#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
2901#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
2902
2903/* EVENT_ENTRY */
2904/* Magic number field for event test */
2905#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
2906#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
2907
2908/**************************************************************************
2909 *
2910 * Falcon MAC stats
2911 *
2912 **************************************************************************
2913 *
2914 */
2915
2916#define GRxGoodOct_offset 0x0
2917#define GRxGoodOct_WIDTH 48
2918#define GRxBadOct_offset 0x8
2919#define GRxBadOct_WIDTH 48
2920#define GRxMissPkt_offset 0x10
2921#define GRxMissPkt_WIDTH 32
2922#define GRxFalseCRS_offset 0x14
2923#define GRxFalseCRS_WIDTH 32
2924#define GRxPausePkt_offset 0x18
2925#define GRxPausePkt_WIDTH 32
2926#define GRxBadPkt_offset 0x1C
2927#define GRxBadPkt_WIDTH 32
2928#define GRxUcastPkt_offset 0x20
2929#define GRxUcastPkt_WIDTH 32
2930#define GRxMcastPkt_offset 0x24
2931#define GRxMcastPkt_WIDTH 32
2932#define GRxBcastPkt_offset 0x28
2933#define GRxBcastPkt_WIDTH 32
2934#define GRxGoodLt64Pkt_offset 0x2C
2935#define GRxGoodLt64Pkt_WIDTH 32
2936#define GRxBadLt64Pkt_offset 0x30
2937#define GRxBadLt64Pkt_WIDTH 32
2938#define GRx64Pkt_offset 0x34
2939#define GRx64Pkt_WIDTH 32
2940#define GRx65to127Pkt_offset 0x38
2941#define GRx65to127Pkt_WIDTH 32
2942#define GRx128to255Pkt_offset 0x3C
2943#define GRx128to255Pkt_WIDTH 32
2944#define GRx256to511Pkt_offset 0x40
2945#define GRx256to511Pkt_WIDTH 32
2946#define GRx512to1023Pkt_offset 0x44
2947#define GRx512to1023Pkt_WIDTH 32
2948#define GRx1024to15xxPkt_offset 0x48
2949#define GRx1024to15xxPkt_WIDTH 32
2950#define GRx15xxtoJumboPkt_offset 0x4C
2951#define GRx15xxtoJumboPkt_WIDTH 32
2952#define GRxGtJumboPkt_offset 0x50
2953#define GRxGtJumboPkt_WIDTH 32
2954#define GRxFcsErr64to15xxPkt_offset 0x54
2955#define GRxFcsErr64to15xxPkt_WIDTH 32
2956#define GRxFcsErr15xxtoJumboPkt_offset 0x58
2957#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
2958#define GRxFcsErrGtJumboPkt_offset 0x5C
2959#define GRxFcsErrGtJumboPkt_WIDTH 32
2960#define GTxGoodBadOct_offset 0x80
2961#define GTxGoodBadOct_WIDTH 48
2962#define GTxGoodOct_offset 0x88
2963#define GTxGoodOct_WIDTH 48
2964#define GTxSglColPkt_offset 0x90
2965#define GTxSglColPkt_WIDTH 32
2966#define GTxMultColPkt_offset 0x94
2967#define GTxMultColPkt_WIDTH 32
2968#define GTxExColPkt_offset 0x98
2969#define GTxExColPkt_WIDTH 32
2970#define GTxDefPkt_offset 0x9C
2971#define GTxDefPkt_WIDTH 32
2972#define GTxLateCol_offset 0xA0
2973#define GTxLateCol_WIDTH 32
2974#define GTxExDefPkt_offset 0xA4
2975#define GTxExDefPkt_WIDTH 32
2976#define GTxPausePkt_offset 0xA8
2977#define GTxPausePkt_WIDTH 32
2978#define GTxBadPkt_offset 0xAC
2979#define GTxBadPkt_WIDTH 32
2980#define GTxUcastPkt_offset 0xB0
2981#define GTxUcastPkt_WIDTH 32
2982#define GTxMcastPkt_offset 0xB4
2983#define GTxMcastPkt_WIDTH 32
2984#define GTxBcastPkt_offset 0xB8
2985#define GTxBcastPkt_WIDTH 32
2986#define GTxLt64Pkt_offset 0xBC
2987#define GTxLt64Pkt_WIDTH 32
2988#define GTx64Pkt_offset 0xC0
2989#define GTx64Pkt_WIDTH 32
2990#define GTx65to127Pkt_offset 0xC4
2991#define GTx65to127Pkt_WIDTH 32
2992#define GTx128to255Pkt_offset 0xC8
2993#define GTx128to255Pkt_WIDTH 32
2994#define GTx256to511Pkt_offset 0xCC
2995#define GTx256to511Pkt_WIDTH 32
2996#define GTx512to1023Pkt_offset 0xD0
2997#define GTx512to1023Pkt_WIDTH 32
2998#define GTx1024to15xxPkt_offset 0xD4
2999#define GTx1024to15xxPkt_WIDTH 32
3000#define GTx15xxtoJumboPkt_offset 0xD8
3001#define GTx15xxtoJumboPkt_WIDTH 32
3002#define GTxGtJumboPkt_offset 0xDC
3003#define GTxGtJumboPkt_WIDTH 32
3004#define GTxNonTcpUdpPkt_offset 0xE0
3005#define GTxNonTcpUdpPkt_WIDTH 16
3006#define GTxMacSrcErrPkt_offset 0xE4
3007#define GTxMacSrcErrPkt_WIDTH 16
3008#define GTxIpSrcErrPkt_offset 0xE8
3009#define GTxIpSrcErrPkt_WIDTH 16
3010#define GDmaDone_offset 0xEC
3011#define GDmaDone_WIDTH 32
3012
3013#define XgRxOctets_offset 0x0
3014#define XgRxOctets_WIDTH 48
3015#define XgRxOctetsOK_offset 0x8
3016#define XgRxOctetsOK_WIDTH 48
3017#define XgRxPkts_offset 0x10
3018#define XgRxPkts_WIDTH 32
3019#define XgRxPktsOK_offset 0x14
3020#define XgRxPktsOK_WIDTH 32
3021#define XgRxBroadcastPkts_offset 0x18
3022#define XgRxBroadcastPkts_WIDTH 32
3023#define XgRxMulticastPkts_offset 0x1C
3024#define XgRxMulticastPkts_WIDTH 32
3025#define XgRxUnicastPkts_offset 0x20
3026#define XgRxUnicastPkts_WIDTH 32
3027#define XgRxUndersizePkts_offset 0x24
3028#define XgRxUndersizePkts_WIDTH 32
3029#define XgRxOversizePkts_offset 0x28
3030#define XgRxOversizePkts_WIDTH 32
3031#define XgRxJabberPkts_offset 0x2C
3032#define XgRxJabberPkts_WIDTH 32
3033#define XgRxUndersizeFCSerrorPkts_offset 0x30
3034#define XgRxUndersizeFCSerrorPkts_WIDTH 32
3035#define XgRxDropEvents_offset 0x34
3036#define XgRxDropEvents_WIDTH 32
3037#define XgRxFCSerrorPkts_offset 0x38
3038#define XgRxFCSerrorPkts_WIDTH 32
3039#define XgRxAlignError_offset 0x3C
3040#define XgRxAlignError_WIDTH 32
3041#define XgRxSymbolError_offset 0x40
3042#define XgRxSymbolError_WIDTH 32
3043#define XgRxInternalMACError_offset 0x44
3044#define XgRxInternalMACError_WIDTH 32
3045#define XgRxControlPkts_offset 0x48
3046#define XgRxControlPkts_WIDTH 32
3047#define XgRxPausePkts_offset 0x4C
3048#define XgRxPausePkts_WIDTH 32
3049#define XgRxPkts64Octets_offset 0x50
3050#define XgRxPkts64Octets_WIDTH 32
3051#define XgRxPkts65to127Octets_offset 0x54
3052#define XgRxPkts65to127Octets_WIDTH 32
3053#define XgRxPkts128to255Octets_offset 0x58
3054#define XgRxPkts128to255Octets_WIDTH 32
3055#define XgRxPkts256to511Octets_offset 0x5C
3056#define XgRxPkts256to511Octets_WIDTH 32
3057#define XgRxPkts512to1023Octets_offset 0x60
3058#define XgRxPkts512to1023Octets_WIDTH 32
3059#define XgRxPkts1024to15xxOctets_offset 0x64
3060#define XgRxPkts1024to15xxOctets_WIDTH 32
3061#define XgRxPkts15xxtoMaxOctets_offset 0x68
3062#define XgRxPkts15xxtoMaxOctets_WIDTH 32
3063#define XgRxLengthError_offset 0x6C
3064#define XgRxLengthError_WIDTH 32
3065#define XgTxPkts_offset 0x80
3066#define XgTxPkts_WIDTH 32
3067#define XgTxOctets_offset 0x88
3068#define XgTxOctets_WIDTH 48
3069#define XgTxMulticastPkts_offset 0x90
3070#define XgTxMulticastPkts_WIDTH 32
3071#define XgTxBroadcastPkts_offset 0x94
3072#define XgTxBroadcastPkts_WIDTH 32
3073#define XgTxUnicastPkts_offset 0x98
3074#define XgTxUnicastPkts_WIDTH 32
3075#define XgTxControlPkts_offset 0x9C
3076#define XgTxControlPkts_WIDTH 32
3077#define XgTxPausePkts_offset 0xA0
3078#define XgTxPausePkts_WIDTH 32
3079#define XgTxPkts64Octets_offset 0xA4
3080#define XgTxPkts64Octets_WIDTH 32
3081#define XgTxPkts65to127Octets_offset 0xA8
3082#define XgTxPkts65to127Octets_WIDTH 32
3083#define XgTxPkts128to255Octets_offset 0xAC
3084#define XgTxPkts128to255Octets_WIDTH 32
3085#define XgTxPkts256to511Octets_offset 0xB0
3086#define XgTxPkts256to511Octets_WIDTH 32
3087#define XgTxPkts512to1023Octets_offset 0xB4
3088#define XgTxPkts512to1023Octets_WIDTH 32
3089#define XgTxPkts1024to15xxOctets_offset 0xB8
3090#define XgTxPkts1024to15xxOctets_WIDTH 32
3091#define XgTxPkts1519toMaxOctets_offset 0xBC
3092#define XgTxPkts1519toMaxOctets_WIDTH 32
3093#define XgTxUndersizePkts_offset 0xC0
3094#define XgTxUndersizePkts_WIDTH 32
3095#define XgTxOversizePkts_offset 0xC4
3096#define XgTxOversizePkts_WIDTH 32
3097#define XgTxNonTcpUdpPkt_offset 0xC8
3098#define XgTxNonTcpUdpPkt_WIDTH 16
3099#define XgTxMacSrcErrPkt_offset 0xCC
3100#define XgTxMacSrcErrPkt_WIDTH 16
3101#define XgTxIpSrcErrPkt_offset 0xD0
3102#define XgTxIpSrcErrPkt_WIDTH 16
3103#define XgDmaDone_offset 0xD4
3104#define XgDmaDone_WIDTH 32
3105
3106#define FALCON_STATS_NOT_DONE 0x00000000
3107#define FALCON_STATS_DONE 0xffffffff
3108
3109/* Interrupt status register bits */
3110#define FATAL_INT_LBN 64
3111#define FATAL_INT_WIDTH 1
3112#define INT_EVQS_LBN 40
3113#define INT_EVQS_WIDTH 4
3114#define INT_FLAG_LBN 32
3115#define INT_FLAG_WIDTH 1
3116#define EVQ_FIFO_HF_LBN 1
3117#define EVQ_FIFO_HF_WIDTH 1
3118#define EVQ_FIFO_AF_LBN 0
3119#define EVQ_FIFO_AF_WIDTH 1
3120
3121/**************************************************************************
3122 *
3123 * Falcon non-volatile configuration
3124 *
3125 **************************************************************************
3126 */
3127
3128/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
3129struct falcon_nvconfig_board_v2 {
3130 __le16 nports;
3131 u8 port0_phy_addr;
3132 u8 port0_phy_type;
3133 u8 port1_phy_addr;
3134 u8 port1_phy_type;
3135 __le16 asic_sub_revision;
3136 __le16 board_revision;
3137} __packed;
3138
3139/* Board configuration v3 extra information */
3140struct falcon_nvconfig_board_v3 {
3141 __le32 spi_device_type[2];
3142} __packed;
3143
3144/* Bit numbers for spi_device_type */
3145#define SPI_DEV_TYPE_SIZE_LBN 0
3146#define SPI_DEV_TYPE_SIZE_WIDTH 5
3147#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
3148#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
3149#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
3150#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
3151#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
3152#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
3153#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
3154#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
3155#define SPI_DEV_TYPE_FIELD(type, field) \
3156 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
3157
3158#define FALCON_NVCONFIG_OFFSET 0x300
3159
3160#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
3161struct falcon_nvconfig {
3162 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
3163 u8 mac_address[2][8]; /* 0x310 */
3164 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
3165 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
3166 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
3167 efx_oword_t hw_init_reg; /* 0x350 */
3168 efx_oword_t nic_stat_reg; /* 0x360 */
3169 efx_oword_t glb_ctl_reg; /* 0x370 */
3170 efx_oword_t srm_cfg_reg; /* 0x380 */
3171 efx_oword_t spare_reg; /* 0x390 */
3172 __le16 board_magic_num; /* 0x3A0 */
3173 __le16 board_struct_ver;
3174 __le16 board_checksum;
3175 struct falcon_nvconfig_board_v2 board_v2;
3176 efx_oword_t ee_base_page_reg; /* 0x3B0 */
3177 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
3178} __packed;
3179
3180#endif /* EFX_REGS_H */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 98bff5ada09a..accf055ff89d 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -16,7 +16,6 @@
16#include <net/ip.h> 16#include <net/ip.h>
17#include <net/checksum.h> 17#include <net/checksum.h>
18#include "net_driver.h" 18#include "net_driver.h"
19#include "rx.h"
20#include "efx.h" 19#include "efx.h"
21#include "falcon.h" 20#include "falcon.h"
22#include "selftest.h" 21#include "selftest.h"
@@ -61,7 +60,7 @@
61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 60 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 61 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
63 */ 62 */
64static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; 63static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
65 64
66#define RX_ALLOC_LEVEL_LRO 0x2000 65#define RX_ALLOC_LEVEL_LRO 0x2000
67#define RX_ALLOC_LEVEL_MAX 0x3000 66#define RX_ALLOC_LEVEL_MAX 0x3000
@@ -293,8 +292,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
293 * fill anyway. 292 * fill anyway.
294 */ 293 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 294 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > 295 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297 rx_queue->efx->type->rxd_ring_mask + 1);
298 296
299 /* Don't fill if we don't need to */ 297 /* Don't fill if we don't need to */
300 if (fill_level >= rx_queue->fast_fill_trigger) 298 if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +314,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
316 retry: 314 retry:
317 /* Recalculate current fill level now that we have the lock */ 315 /* Recalculate current fill level now that we have the lock */
318 fill_level = (rx_queue->added_count - rx_queue->removed_count); 316 fill_level = (rx_queue->added_count - rx_queue->removed_count);
319 EFX_BUG_ON_PARANOID(fill_level > 317 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
320 rx_queue->efx->type->rxd_ring_mask + 1);
321 space = rx_queue->fast_fill_limit - fill_level; 318 space = rx_queue->fast_fill_limit - fill_level;
322 if (space < EFX_RX_BATCH) 319 if (space < EFX_RX_BATCH)
323 goto out_unlock; 320 goto out_unlock;
@@ -329,8 +326,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
329 326
330 do { 327 do {
331 for (i = 0; i < EFX_RX_BATCH; ++i) { 328 for (i = 0; i < EFX_RX_BATCH; ++i) {
332 index = (rx_queue->added_count & 329 index = rx_queue->added_count & EFX_RXQ_MASK;
333 rx_queue->efx->type->rxd_ring_mask);
334 rx_buf = efx_rx_buffer(rx_queue, index); 330 rx_buf = efx_rx_buffer(rx_queue, index);
335 rc = efx_init_rx_buffer(rx_queue, rx_buf); 331 rc = efx_init_rx_buffer(rx_queue, rx_buf);
336 if (unlikely(rc)) 332 if (unlikely(rc))
@@ -448,17 +444,23 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
448 bool checksummed) 444 bool checksummed)
449{ 445{
450 struct napi_struct *napi = &channel->napi_str; 446 struct napi_struct *napi = &channel->napi_str;
447 gro_result_t gro_result;
451 448
452 /* Pass the skb/page into the LRO engine */ 449 /* Pass the skb/page into the LRO engine */
453 if (rx_buf->page) { 450 if (rx_buf->page) {
454 struct sk_buff *skb = napi_get_frags(napi); 451 struct page *page = rx_buf->page;
452 struct sk_buff *skb;
455 453
454 EFX_BUG_ON_PARANOID(rx_buf->skb);
455 rx_buf->page = NULL;
456
457 skb = napi_get_frags(napi);
456 if (!skb) { 458 if (!skb) {
457 put_page(rx_buf->page); 459 put_page(page);
458 goto out; 460 return;
459 } 461 }
460 462
461 skb_shinfo(skb)->frags[0].page = rx_buf->page; 463 skb_shinfo(skb)->frags[0].page = page;
462 skb_shinfo(skb)->frags[0].page_offset = 464 skb_shinfo(skb)->frags[0].page_offset =
463 efx_rx_buf_offset(rx_buf); 465 efx_rx_buf_offset(rx_buf);
464 skb_shinfo(skb)->frags[0].size = rx_buf->len; 466 skb_shinfo(skb)->frags[0].size = rx_buf->len;
@@ -470,17 +472,24 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
470 skb->ip_summed = 472 skb->ip_summed =
471 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 473 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
472 474
473 napi_gro_frags(napi); 475 skb_record_rx_queue(skb, channel->channel);
474 476
475out: 477 gro_result = napi_gro_frags(napi);
476 EFX_BUG_ON_PARANOID(rx_buf->skb);
477 rx_buf->page = NULL;
478 } else { 478 } else {
479 EFX_BUG_ON_PARANOID(!rx_buf->skb); 479 struct sk_buff *skb = rx_buf->skb;
480 EFX_BUG_ON_PARANOID(!checksummed);
481 480
482 napi_gro_receive(napi, rx_buf->skb); 481 EFX_BUG_ON_PARANOID(!skb);
482 EFX_BUG_ON_PARANOID(!checksummed);
483 rx_buf->skb = NULL; 483 rx_buf->skb = NULL;
484
485 gro_result = napi_gro_receive(napi, skb);
486 }
487
488 if (gro_result == GRO_NORMAL) {
489 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
490 } else if (gro_result != GRO_DROP) {
491 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
492 channel->irq_mod_score += 2;
484 } 493 }
485} 494}
486 495
@@ -558,7 +567,7 @@ void __efx_rx_packet(struct efx_channel *channel,
558 if (unlikely(efx->loopback_selftest)) { 567 if (unlikely(efx->loopback_selftest)) {
559 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 568 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
560 efx_free_rx_buffer(efx, rx_buf); 569 efx_free_rx_buffer(efx, rx_buf);
561 goto done; 570 return;
562 } 571 }
563 572
564 if (rx_buf->skb) { 573 if (rx_buf->skb) {
@@ -570,34 +579,28 @@ void __efx_rx_packet(struct efx_channel *channel,
570 * at the ethernet header */ 579 * at the ethernet header */
571 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, 580 rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
572 efx->net_dev); 581 efx->net_dev);
582
583 skb_record_rx_queue(rx_buf->skb, channel->channel);
573 } 584 }
574 585
575 if (likely(checksummed || rx_buf->page)) { 586 if (likely(checksummed || rx_buf->page)) {
576 efx_rx_packet_lro(channel, rx_buf, checksummed); 587 efx_rx_packet_lro(channel, rx_buf, checksummed);
577 goto done; 588 return;
578 } 589 }
579 590
580 /* We now own the SKB */ 591 /* We now own the SKB */
581 skb = rx_buf->skb; 592 skb = rx_buf->skb;
582 rx_buf->skb = NULL; 593 rx_buf->skb = NULL;
583
584 EFX_BUG_ON_PARANOID(rx_buf->page);
585 EFX_BUG_ON_PARANOID(rx_buf->skb);
586 EFX_BUG_ON_PARANOID(!skb); 594 EFX_BUG_ON_PARANOID(!skb);
587 595
588 /* Set the SKB flags */ 596 /* Set the SKB flags */
589 skb->ip_summed = CHECKSUM_NONE; 597 skb->ip_summed = CHECKSUM_NONE;
590 598
591 skb_record_rx_queue(skb, channel->channel);
592
593 /* Pass the packet up */ 599 /* Pass the packet up */
594 netif_receive_skb(skb); 600 netif_receive_skb(skb);
595 601
596 /* Update allocation strategy method */ 602 /* Update allocation strategy method */
597 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 603 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
598
599done:
600 ;
601} 604}
602 605
603void efx_rx_strategy(struct efx_channel *channel) 606void efx_rx_strategy(struct efx_channel *channel)
@@ -632,7 +635,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
632 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); 635 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
633 636
634 /* Allocate RX buffers */ 637 /* Allocate RX buffers */
635 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 638 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
636 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 639 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
637 if (!rx_queue->buffer) 640 if (!rx_queue->buffer)
638 return -ENOMEM; 641 return -ENOMEM;
@@ -647,7 +650,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
647 650
648void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 651void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
649{ 652{
650 struct efx_nic *efx = rx_queue->efx;
651 unsigned int max_fill, trigger, limit; 653 unsigned int max_fill, trigger, limit;
652 654
653 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); 655 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -660,7 +662,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
660 rx_queue->min_overfill = -1U; 662 rx_queue->min_overfill = -1U;
661 663
662 /* Initialise limit fields */ 664 /* Initialise limit fields */
663 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; 665 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
664 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 666 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
665 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 667 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
666 668
@@ -683,7 +685,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
683 685
684 /* Release RX buffers NB start at index 0 not current HW ptr */ 686 /* Release RX buffers NB start at index 0 not current HW ptr */
685 if (rx_queue->buffer) { 687 if (rx_queue->buffer) {
686 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { 688 for (i = 0; i <= EFX_RXQ_MASK; i++) {
687 rx_buf = efx_rx_buffer(rx_queue, i); 689 rx_buf = efx_rx_buffer(rx_queue, i);
688 efx_fini_rx_buffer(rx_queue, rx_buf); 690 efx_fini_rx_buffer(rx_queue, rx_buf);
689 } 691 }
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
deleted file mode 100644
index 42ee7555a80b..000000000000
--- a/drivers/net/sfc/rx.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_RX_H
11#define EFX_RX_H
12
13#include "net_driver.h"
14
15int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
16void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19
20void efx_rx_strategy(struct efx_channel *channel);
21void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
22void efx_rx_work(struct work_struct *data);
23void __efx_rx_packet(struct efx_channel *channel,
24 struct efx_rx_buffer *rx_buf, bool checksummed);
25
26#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 817c7efc11e0..74e84afd5b6b 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -20,14 +20,12 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include "net_driver.h" 22#include "net_driver.h"
23#include "ethtool.h"
24#include "efx.h" 23#include "efx.h"
25#include "falcon.h" 24#include "falcon.h"
26#include "selftest.h" 25#include "selftest.h"
27#include "boards.h"
28#include "workarounds.h" 26#include "workarounds.h"
29#include "spi.h" 27#include "spi.h"
30#include "falcon_io.h" 28#include "io.h"
31#include "mdio_10g.h" 29#include "mdio_10g.h"
32 30
33/* 31/*
@@ -57,6 +55,7 @@ static const char *payload_msg =
57 * @flush: Drop all packets in efx_loopback_rx_packet 55 * @flush: Drop all packets in efx_loopback_rx_packet
58 * @packet_count: Number of packets being used in this test 56 * @packet_count: Number of packets being used in this test
59 * @skbs: An array of skbs transmitted 57 * @skbs: An array of skbs transmitted
58 * @offload_csum: Checksums are being offloaded
60 * @rx_good: RX good packet count 59 * @rx_good: RX good packet count
61 * @rx_bad: RX bad packet count 60 * @rx_bad: RX bad packet count
62 * @payload: Payload used in tests 61 * @payload: Payload used in tests
@@ -65,10 +64,7 @@ struct efx_loopback_state {
65 bool flush; 64 bool flush;
66 int packet_count; 65 int packet_count;
67 struct sk_buff **skbs; 66 struct sk_buff **skbs;
68
69 /* Checksums are being offloaded */
70 bool offload_csum; 67 bool offload_csum;
71
72 atomic_t rx_good; 68 atomic_t rx_good;
73 atomic_t rx_bad; 69 atomic_t rx_bad;
74 struct efx_loopback_payload payload; 70 struct efx_loopback_payload payload;
@@ -129,7 +125,7 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
129 int rc; 125 int rc;
130 126
131 /* Not supported on A-series silicon */ 127 /* Not supported on A-series silicon */
132 if (falcon_rev(efx) < FALCON_REV_B0) 128 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
133 return 0; 129 return 0;
134 130
135 rc = falcon_test_registers(efx); 131 rc = falcon_test_registers(efx);
@@ -177,8 +173,8 @@ static int efx_test_interrupts(struct efx_nic *efx,
177 return -ETIMEDOUT; 173 return -ETIMEDOUT;
178 174
179 success: 175 success:
180 EFX_LOG(efx, "test interrupt (mode %d) seen on CPU%d\n", 176 EFX_LOG(efx, "%s test interrupt seen on CPU%d\n", INT_MODE(efx),
181 efx->interrupt_mode, efx->last_irq_cpu); 177 efx->last_irq_cpu);
182 tests->interrupt = 1; 178 tests->interrupt = 1;
183 return 0; 179 return 0;
184} 180}
@@ -426,7 +422,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
426 422
427 if (efx_dev_registered(efx)) 423 if (efx_dev_registered(efx))
428 netif_tx_lock_bh(efx->net_dev); 424 netif_tx_lock_bh(efx->net_dev);
429 rc = efx_xmit(efx, tx_queue, skb); 425 rc = efx_enqueue_skb(tx_queue, skb);
430 if (efx_dev_registered(efx)) 426 if (efx_dev_registered(efx))
431 netif_tx_unlock_bh(efx->net_dev); 427 netif_tx_unlock_bh(efx->net_dev);
432 428
@@ -439,7 +435,6 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
439 kfree_skb(skb); 435 kfree_skb(skb);
440 return -EPIPE; 436 return -EPIPE;
441 } 437 }
442 efx->net_dev->trans_start = jiffies;
443 } 438 }
444 439
445 return 0; 440 return 0;
@@ -527,7 +522,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
527 522
528 for (i = 0; i < 3; i++) { 523 for (i = 0; i < 3; i++) {
529 /* Determine how many packets to send */ 524 /* Determine how many packets to send */
530 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 525 state->packet_count = EFX_TXQ_SIZE / 3;
531 state->packet_count = min(1 << (i << 2), state->packet_count); 526 state->packet_count = min(1 << (i << 2), state->packet_count);
532 state->skbs = kzalloc(sizeof(state->skbs[0]) * 527 state->skbs = kzalloc(sizeof(state->skbs[0]) *
533 state->packet_count, GFP_KERNEL); 528 state->packet_count, GFP_KERNEL);
@@ -612,13 +607,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
612 flush_workqueue(efx->workqueue); 607 flush_workqueue(efx->workqueue);
613 rmb(); 608 rmb();
614 609
615 /* We need both the phy and xaui links to be ok. 610 /* We need both the PHY and MAC-PHY links to be OK */
616 * rather than relying on the falcon_xmac irq/poll 611 link_up = efx->link_state.up;
617 * regime, just poll xaui directly */ 612 if (link_up)
618 link_up = efx->link_up; 613 link_up = !efx->mac_op->check_fault(efx);
619 if (link_up && EFX_IS10G(efx) &&
620 !falcon_xaui_link_ok(efx))
621 link_up = false;
622 614
623 } while ((++count < 20) && !link_up); 615 } while ((++count < 20) && !link_up);
624 616
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
deleted file mode 100644
index 49eb91b5f50c..000000000000
--- a/drivers/net/sfc/sfe4001.c
+++ /dev/null
@@ -1,435 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10/*****************************************************************************
11 * Support for the SFE4001 and SFN4111T NICs.
12 *
13 * The SFE4001 does not power-up fully at reset due to its high power
14 * consumption. We control its power via a PCA9539 I/O expander.
15 * Both boards have a MAX6647 temperature monitor which we expose to
16 * the lm90 driver.
17 *
18 * This also provides minimal support for reflashing the PHY, which is
19 * initiated by resetting it with the FLASH_CFG_1 pin pulled down.
20 * On SFE4001 rev A2 and later this is connected to the 3V3X output of
21 * the IO-expander; on the SFN4111T it is connected to Falcon's GPIO3.
22 * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually
23 * exclusive with the network device being open.
24 */
25
26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
28#include "net_driver.h"
29#include "efx.h"
30#include "phy.h"
31#include "boards.h"
32#include "falcon.h"
33#include "falcon_hwdefs.h"
34#include "falcon_io.h"
35#include "mac.h"
36#include "workarounds.h"
37
38/**************************************************************************
39 *
40 * I2C IO Expander device
41 *
42 **************************************************************************/
43#define PCA9539 0x74
44
45#define P0_IN 0x00
46#define P0_OUT 0x02
47#define P0_INVERT 0x04
48#define P0_CONFIG 0x06
49
50#define P0_EN_1V0X_LBN 0
51#define P0_EN_1V0X_WIDTH 1
52#define P0_EN_1V2_LBN 1
53#define P0_EN_1V2_WIDTH 1
54#define P0_EN_2V5_LBN 2
55#define P0_EN_2V5_WIDTH 1
56#define P0_EN_3V3X_LBN 3
57#define P0_EN_3V3X_WIDTH 1
58#define P0_EN_5V_LBN 4
59#define P0_EN_5V_WIDTH 1
60#define P0_SHORTEN_JTAG_LBN 5
61#define P0_SHORTEN_JTAG_WIDTH 1
62#define P0_X_TRST_LBN 6
63#define P0_X_TRST_WIDTH 1
64#define P0_DSP_RESET_LBN 7
65#define P0_DSP_RESET_WIDTH 1
66
67#define P1_IN 0x01
68#define P1_OUT 0x03
69#define P1_INVERT 0x05
70#define P1_CONFIG 0x07
71
72#define P1_AFE_PWD_LBN 0
73#define P1_AFE_PWD_WIDTH 1
74#define P1_DSP_PWD25_LBN 1
75#define P1_DSP_PWD25_WIDTH 1
76#define P1_RESERVED_LBN 2
77#define P1_RESERVED_WIDTH 2
78#define P1_SPARE_LBN 4
79#define P1_SPARE_WIDTH 4
80
81/* Temperature Sensor */
82#define MAX664X_REG_RSL 0x02
83#define MAX664X_REG_WLHO 0x0B
84
85static void sfe4001_poweroff(struct efx_nic *efx)
86{
87 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
88 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
89
90 /* Turn off all power rails and disable outputs */
91 i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff);
92 i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff);
93 i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff);
94
95 /* Clear any over-temperature alert */
96 i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
97}
98
99static int sfe4001_poweron(struct efx_nic *efx)
100{
101 struct i2c_client *hwmon_client = efx->board_info.hwmon_client;
102 struct i2c_client *ioexp_client = efx->board_info.ioexp_client;
103 unsigned int i, j;
104 int rc;
105 u8 out;
106
107 /* Clear any previous over-temperature alert */
108 rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL);
109 if (rc < 0)
110 return rc;
111
112 /* Enable port 0 and port 1 outputs on IO expander */
113 rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00);
114 if (rc)
115 return rc;
116 rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG,
117 0xff & ~(1 << P1_SPARE_LBN));
118 if (rc)
119 goto fail_on;
120
121 /* If PHY power is on, turn it all off and wait 1 second to
122 * ensure a full reset.
123 */
124 rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT);
125 if (rc < 0)
126 goto fail_on;
127 out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
128 (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
129 (0 << P0_EN_1V0X_LBN));
130 if (rc != out) {
131 EFX_INFO(efx, "power-cycling PHY\n");
132 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
133 if (rc)
134 goto fail_on;
135 schedule_timeout_uninterruptible(HZ);
136 }
137
138 for (i = 0; i < 20; ++i) {
139 /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
140 out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
141 (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
142 (1 << P0_X_TRST_LBN));
143 if (efx->phy_mode & PHY_MODE_SPECIAL)
144 out |= 1 << P0_EN_3V3X_LBN;
145
146 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
147 if (rc)
148 goto fail_on;
149 msleep(10);
150
151 /* Turn on 1V power rail */
152 out &= ~(1 << P0_EN_1V0X_LBN);
153 rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out);
154 if (rc)
155 goto fail_on;
156
157 EFX_INFO(efx, "waiting for DSP boot (attempt %d)...\n", i);
158
159 /* In flash config mode, DSP does not turn on AFE, so
160 * just wait 1 second.
161 */
162 if (efx->phy_mode & PHY_MODE_SPECIAL) {
163 schedule_timeout_uninterruptible(HZ);
164 return 0;
165 }
166
167 for (j = 0; j < 10; ++j) {
168 msleep(100);
169
170 /* Check DSP has asserted AFE power line */
171 rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN);
172 if (rc < 0)
173 goto fail_on;
174 if (rc & (1 << P1_AFE_PWD_LBN))
175 return 0;
176 }
177 }
178
179 EFX_INFO(efx, "timed out waiting for DSP boot\n");
180 rc = -ETIMEDOUT;
181fail_on:
182 sfe4001_poweroff(efx);
183 return rc;
184}
185
186static int sfn4111t_reset(struct efx_nic *efx)
187{
188 efx_oword_t reg;
189
190 /* GPIO 3 and the GPIO register are shared with I2C, so block that */
191 i2c_lock_adapter(&efx->i2c_adap);
192
193 /* Pull RST_N (GPIO 2) low then let it up again, setting the
194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
195 * output enables; the output levels should always be 0 (low)
196 * and we rely on external pull-ups. */
197 falcon_read(efx, &reg, GPIO_CTL_REG_KER);
198 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true);
199 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
200 msleep(1000);
201 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false);
202 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN,
203 !!(efx->phy_mode & PHY_MODE_SPECIAL));
204 falcon_write(efx, &reg, GPIO_CTL_REG_KER);
205 msleep(1);
206
207 i2c_unlock_adapter(&efx->i2c_adap);
208
209 ssleep(1);
210 return 0;
211}
212
213static ssize_t show_phy_flash_cfg(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
216 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
217 return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL));
218}
219
220static ssize_t set_phy_flash_cfg(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t count)
223{
224 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
225 enum efx_phy_mode old_mode, new_mode;
226 int err;
227
228 rtnl_lock();
229 old_mode = efx->phy_mode;
230 if (count == 0 || *buf == '0')
231 new_mode = old_mode & ~PHY_MODE_SPECIAL;
232 else
233 new_mode = PHY_MODE_SPECIAL;
234 if (old_mode == new_mode) {
235 err = 0;
236 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
237 err = -EBUSY;
238 } else {
239 /* Reset the PHY, reconfigure the MAC and enable/disable
240 * MAC stats accordingly. */
241 efx->phy_mode = new_mode;
242 if (new_mode & PHY_MODE_SPECIAL)
243 efx_stats_disable(efx);
244 if (efx->board_info.type == EFX_BOARD_SFE4001)
245 err = sfe4001_poweron(efx);
246 else
247 err = sfn4111t_reset(efx);
248 efx_reconfigure_port(efx);
249 if (!(new_mode & PHY_MODE_SPECIAL))
250 efx_stats_enable(efx);
251 }
252 rtnl_unlock();
253
254 return err ? err : count;
255}
256
257static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg);
258
259static void sfe4001_fini(struct efx_nic *efx)
260{
261 EFX_INFO(efx, "%s\n", __func__);
262
263 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
264 sfe4001_poweroff(efx);
265 i2c_unregister_device(efx->board_info.ioexp_client);
266 i2c_unregister_device(efx->board_info.hwmon_client);
267}
268
269static int sfe4001_check_hw(struct efx_nic *efx)
270{
271 s32 status;
272
273 /* If XAUI link is up then do not monitor */
274 if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
275 return 0;
276
277 /* Check the powered status of the PHY. Lack of power implies that
278 * the MAX6647 has shut down power to it, probably due to a temp.
279 * alarm. Reading the power status rather than the MAX6647 status
280 * directly because the later is read-to-clear and would thus
281 * start to power up the PHY again when polled, causing us to blip
282 * the power undesirably.
283 * We know we can read from the IO expander because we did
284 * it during power-on. Assume failure now is bad news. */
285 status = i2c_smbus_read_byte_data(efx->board_info.ioexp_client, P1_IN);
286 if (status >= 0 &&
287 (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0)
288 return 0;
289
290 /* Use board power control, not PHY power control */
291 sfe4001_poweroff(efx);
292 efx->phy_mode = PHY_MODE_OFF;
293
294 return (status < 0) ? -EIO : -ERANGE;
295}
296
297static struct i2c_board_info sfe4001_hwmon_info = {
298 I2C_BOARD_INFO("max6647", 0x4e),
299};
300
301/* This board uses an I2C expander to provider power to the PHY, which needs to
302 * be turned on before the PHY can be used.
303 * Context: Process context, rtnl lock held
304 */
305int sfe4001_init(struct efx_nic *efx)
306{
307 int rc;
308
309#if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE)
310 efx->board_info.hwmon_client =
311 i2c_new_device(&efx->i2c_adap, &sfe4001_hwmon_info);
312#else
313 efx->board_info.hwmon_client =
314 i2c_new_dummy(&efx->i2c_adap, sfe4001_hwmon_info.addr);
315#endif
316 if (!efx->board_info.hwmon_client)
317 return -EIO;
318
319 /* Raise board/PHY high limit from 85 to 90 degrees Celsius */
320 rc = i2c_smbus_write_byte_data(efx->board_info.hwmon_client,
321 MAX664X_REG_WLHO, 90);
322 if (rc)
323 goto fail_hwmon;
324
325 efx->board_info.ioexp_client = i2c_new_dummy(&efx->i2c_adap, PCA9539);
326 if (!efx->board_info.ioexp_client) {
327 rc = -EIO;
328 goto fail_hwmon;
329 }
330
331 /* 10Xpress has fixed-function LED pins, so there is no board-specific
332 * blink code. */
333 efx->board_info.blink = tenxpress_phy_blink;
334
335 efx->board_info.monitor = sfe4001_check_hw;
336 efx->board_info.fini = sfe4001_fini;
337
338 if (efx->phy_mode & PHY_MODE_SPECIAL) {
339 /* PHY won't generate a 156.25 MHz clock and MAC stats fetch
340 * will fail. */
341 efx_stats_disable(efx);
342 }
343 rc = sfe4001_poweron(efx);
344 if (rc)
345 goto fail_ioexp;
346
347 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
348 if (rc)
349 goto fail_on;
350
351 EFX_INFO(efx, "PHY is powered on\n");
352 return 0;
353
354fail_on:
355 sfe4001_poweroff(efx);
356fail_ioexp:
357 i2c_unregister_device(efx->board_info.ioexp_client);
358fail_hwmon:
359 i2c_unregister_device(efx->board_info.hwmon_client);
360 return rc;
361}
362
363static int sfn4111t_check_hw(struct efx_nic *efx)
364{
365 s32 status;
366
367 /* If XAUI link is up then do not monitor */
368 if (EFX_WORKAROUND_7884(efx) && efx->mac_up)
369 return 0;
370
371 /* Test LHIGH, RHIGH, FAULT, EOT and IOT alarms */
372 status = i2c_smbus_read_byte_data(efx->board_info.hwmon_client,
373 MAX664X_REG_RSL);
374 if (status < 0)
375 return -EIO;
376 if (status & 0x57)
377 return -ERANGE;
378 return 0;
379}
380
381static void sfn4111t_fini(struct efx_nic *efx)
382{
383 EFX_INFO(efx, "%s\n", __func__);
384
385 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
386 i2c_unregister_device(efx->board_info.hwmon_client);
387}
388
389static struct i2c_board_info sfn4111t_a0_hwmon_info = {
390 I2C_BOARD_INFO("max6647", 0x4e),
391};
392
393static struct i2c_board_info sfn4111t_r5_hwmon_info = {
394 I2C_BOARD_INFO("max6646", 0x4d),
395};
396
397int sfn4111t_init(struct efx_nic *efx)
398{
399 int i = 0;
400 int rc;
401
402 efx->board_info.hwmon_client =
403 i2c_new_device(&efx->i2c_adap,
404 (efx->board_info.minor < 5) ?
405 &sfn4111t_a0_hwmon_info :
406 &sfn4111t_r5_hwmon_info);
407 if (!efx->board_info.hwmon_client)
408 return -EIO;
409
410 efx->board_info.blink = tenxpress_phy_blink;
411 efx->board_info.monitor = sfn4111t_check_hw;
412 efx->board_info.fini = sfn4111t_fini;
413
414 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
415 if (rc)
416 goto fail_hwmon;
417
418 do {
419 if (efx->phy_mode & PHY_MODE_SPECIAL) {
420 /* PHY may not generate a 156.25 MHz clock and MAC
421 * stats fetch will fail. */
422 efx_stats_disable(efx);
423 sfn4111t_reset(efx);
424 }
425 rc = sft9001_wait_boot(efx);
426 if (rc == 0)
427 return 0;
428 efx->phy_mode = PHY_MODE_SPECIAL;
429 } while (rc == -EINVAL && ++i < 2);
430
431 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg);
432fail_hwmon:
433 i2c_unregister_device(efx->board_info.hwmon_client);
434 return rc;
435}
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f4d509015f75..1bd79650a00f 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -14,8 +14,7 @@
14#include "mdio_10g.h" 14#include "mdio_10g.h"
15#include "falcon.h" 15#include "falcon.h"
16#include "phy.h" 16#include "phy.h"
17#include "falcon_hwdefs.h" 17#include "regs.h"
18#include "boards.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h" 19#include "selftest.h"
21 20
@@ -84,9 +83,9 @@
84#define PMA_PMD_LED_FLASH (3) 83#define PMA_PMD_LED_FLASH (3)
85#define PMA_PMD_LED_MASK 3 84#define PMA_PMD_LED_MASK 3
86/* All LEDs under hardware control */ 85/* All LEDs under hardware control */
87#define PMA_PMD_LED_FULL_AUTO (0) 86#define SFT9001_PMA_PMD_LED_DEFAULT 0
88/* Green and Amber under hardware control, Red off */ 87/* Green and Amber under hardware control, Red off */
89#define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) 88#define SFX7101_PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
90 89
91#define PMA_PMD_SPEED_ENABLE_REG 49192 90#define PMA_PMD_SPEED_ENABLE_REG 49192
92#define PMA_PMD_100TX_ADV_LBN 1 91#define PMA_PMD_100TX_ADV_LBN 1
@@ -292,7 +291,7 @@ static int tenxpress_init(struct efx_nic *efx)
292 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG, 291 efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG,
293 1 << PMA_PMA_LED_ACTIVITY_LBN, true); 292 1 << PMA_PMA_LED_ACTIVITY_LBN, true);
294 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, 293 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG,
295 PMA_PMD_LED_DEFAULT); 294 SFX7101_PMA_PMD_LED_DEFAULT);
296 } 295 }
297 296
298 return 0; 297 return 0;
@@ -301,8 +300,11 @@ static int tenxpress_init(struct efx_nic *efx)
301static int tenxpress_phy_init(struct efx_nic *efx) 300static int tenxpress_phy_init(struct efx_nic *efx)
302{ 301{
303 struct tenxpress_phy_data *phy_data; 302 struct tenxpress_phy_data *phy_data;
303 u16 old_adv, adv;
304 int rc = 0; 304 int rc = 0;
305 305
306 falcon_board(efx)->type->init_phy(efx);
307
306 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 308 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
307 if (!phy_data) 309 if (!phy_data)
308 return -ENOMEM; 310 return -ENOMEM;
@@ -333,6 +335,15 @@ static int tenxpress_phy_init(struct efx_nic *efx)
333 if (rc < 0) 335 if (rc < 0)
334 goto fail; 336 goto fail;
335 337
338 /* Set pause advertising */
339 old_adv = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
340 adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
341 mii_advertise_flowctrl(efx->wanted_fc));
342 if (adv != old_adv) {
343 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, adv);
344 mdio45_nway_restart(&efx->mdio);
345 }
346
336 if (efx->phy_type == PHY_TYPE_SFT9001B) { 347 if (efx->phy_type == PHY_TYPE_SFT9001B) {
337 rc = device_create_file(&efx->pci_dev->dev, 348 rc = device_create_file(&efx->pci_dev->dev,
338 &dev_attr_phy_short_reach); 349 &dev_attr_phy_short_reach);
@@ -363,7 +374,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
363 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 374 /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so
364 * a special software reset can glitch the XGMAC sufficiently for stats 375 * a special software reset can glitch the XGMAC sufficiently for stats
365 * requests to fail. */ 376 * requests to fail. */
366 efx_stats_disable(efx); 377 falcon_stop_nic_stats(efx);
367 378
368 /* Initiate reset */ 379 /* Initiate reset */
369 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG); 380 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG);
@@ -385,7 +396,7 @@ static int tenxpress_special_reset(struct efx_nic *efx)
385 /* Wait for the XGXS state machine to churn */ 396 /* Wait for the XGXS state machine to churn */
386 mdelay(10); 397 mdelay(10);
387out: 398out:
388 efx_stats_enable(efx); 399 falcon_start_nic_stats(efx);
389 return rc; 400 return rc;
390} 401}
391 402
@@ -532,52 +543,41 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
532 543
533 phy_data->loopback_mode = efx->loopback_mode; 544 phy_data->loopback_mode = efx->loopback_mode;
534 phy_data->phy_mode = efx->phy_mode; 545 phy_data->phy_mode = efx->phy_mode;
535
536 if (efx->phy_type == PHY_TYPE_SFX7101) {
537 efx->link_speed = 10000;
538 efx->link_fd = true;
539 efx->link_up = sfx7101_link_ok(efx);
540 } else {
541 efx->phy_op->get_settings(efx, &ecmd);
542 efx->link_speed = ecmd.speed;
543 efx->link_fd = ecmd.duplex == DUPLEX_FULL;
544 efx->link_up = sft9001_link_ok(efx, &ecmd);
545 }
546 efx->link_fc = efx_mdio_get_pause(efx);
547} 546}
548 547
549/* Poll PHY for interrupt */ 548static void
550static void tenxpress_phy_poll(struct efx_nic *efx) 549tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd);
550
551/* Poll for link state changes */
552static bool tenxpress_phy_poll(struct efx_nic *efx)
551{ 553{
552 struct tenxpress_phy_data *phy_data = efx->phy_data; 554 struct efx_link_state old_state = efx->link_state;
553 bool change = false;
554 555
555 if (efx->phy_type == PHY_TYPE_SFX7101) { 556 if (efx->phy_type == PHY_TYPE_SFX7101) {
556 bool link_ok = sfx7101_link_ok(efx); 557 efx->link_state.up = sfx7101_link_ok(efx);
557 if (link_ok != efx->link_up) { 558 efx->link_state.speed = 10000;
558 change = true; 559 efx->link_state.fd = true;
559 } else { 560 efx->link_state.fc = efx_mdio_get_pause(efx);
560 unsigned int link_fc = efx_mdio_get_pause(efx); 561
561 if (link_fc != efx->link_fc) 562 sfx7101_check_bad_lp(efx, efx->link_state.up);
562 change = true;
563 }
564 sfx7101_check_bad_lp(efx, link_ok);
565 } else if (efx->loopback_mode) {
566 bool link_ok = sft9001_link_ok(efx, NULL);
567 if (link_ok != efx->link_up)
568 change = true;
569 } else { 563 } else {
570 int status = efx_mdio_read(efx, MDIO_MMD_PMAPMD, 564 struct ethtool_cmd ecmd;
571 MDIO_PMA_LASI_STAT);
572 if (status & MDIO_PMA_LASI_LSALARM)
573 change = true;
574 }
575 565
576 if (change) 566 /* Check the LASI alarm first */
577 falcon_sim_phy_event(efx); 567 if (efx->loopback_mode == LOOPBACK_NONE &&
568 !(efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT) &
569 MDIO_PMA_LASI_LSALARM))
570 return false;
578 571
579 if (phy_data->phy_mode != PHY_MODE_NORMAL) 572 tenxpress_get_settings(efx, &ecmd);
580 return; 573
574 efx->link_state.up = sft9001_link_ok(efx, &ecmd);
575 efx->link_state.speed = ecmd.speed;
576 efx->link_state.fd = (ecmd.duplex == DUPLEX_FULL);
577 efx->link_state.fc = efx_mdio_get_pause(efx);
578 }
579
580 return !efx_link_state_equal(&efx->link_state, &old_state);
581} 581}
582 582
583static void tenxpress_phy_fini(struct efx_nic *efx) 583static void tenxpress_phy_fini(struct efx_nic *efx)
@@ -604,18 +604,29 @@ static void tenxpress_phy_fini(struct efx_nic *efx)
604} 604}
605 605
606 606
607/* Set the RX and TX LEDs and Link LED flashing. The other LEDs 607/* Override the RX, TX and link LEDs */
608 * (which probably aren't wired anyway) are left in AUTO mode */ 608void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
609void tenxpress_phy_blink(struct efx_nic *efx, bool blink)
610{ 609{
611 int reg; 610 int reg;
612 611
613 if (blink) 612 switch (mode) {
614 reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) | 613 case EFX_LED_OFF:
615 (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) | 614 reg = (PMA_PMD_LED_OFF << PMA_PMD_LED_TX_LBN) |
616 (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN); 615 (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) |
617 else 616 (PMA_PMD_LED_OFF << PMA_PMD_LED_LINK_LBN);
618 reg = PMA_PMD_LED_DEFAULT; 617 break;
618 case EFX_LED_ON:
619 reg = (PMA_PMD_LED_ON << PMA_PMD_LED_TX_LBN) |
620 (PMA_PMD_LED_ON << PMA_PMD_LED_RX_LBN) |
621 (PMA_PMD_LED_ON << PMA_PMD_LED_LINK_LBN);
622 break;
623 default:
624 if (efx->phy_type == PHY_TYPE_SFX7101)
625 reg = SFX7101_PMA_PMD_LED_DEFAULT;
626 else
627 reg = SFT9001_PMA_PMD_LED_DEFAULT;
628 break;
629 }
619 630
620 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg); 631 efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG, reg);
621} 632}
@@ -742,6 +753,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
742 753
743 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 754 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
744 755
756 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
745 if (efx->phy_type != PHY_TYPE_SFX7101) { 757 if (efx->phy_type != PHY_TYPE_SFX7101) {
746 ecmd->supported |= (SUPPORTED_100baseT_Full | 758 ecmd->supported |= (SUPPORTED_100baseT_Full |
747 SUPPORTED_1000baseT_Full); 759 SUPPORTED_1000baseT_Full);
@@ -793,7 +805,6 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
793 .reconfigure = tenxpress_phy_reconfigure, 805 .reconfigure = tenxpress_phy_reconfigure,
794 .poll = tenxpress_phy_poll, 806 .poll = tenxpress_phy_poll,
795 .fini = tenxpress_phy_fini, 807 .fini = tenxpress_phy_fini,
796 .clear_interrupt = efx_port_dummy_op_void,
797 .get_settings = tenxpress_get_settings, 808 .get_settings = tenxpress_get_settings,
798 .set_settings = tenxpress_set_settings, 809 .set_settings = tenxpress_set_settings,
799 .set_npage_adv = sfx7101_set_npage_adv, 810 .set_npage_adv = sfx7101_set_npage_adv,
@@ -810,7 +821,6 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
810 .reconfigure = tenxpress_phy_reconfigure, 821 .reconfigure = tenxpress_phy_reconfigure,
811 .poll = tenxpress_phy_poll, 822 .poll = tenxpress_phy_poll,
812 .fini = tenxpress_phy_fini, 823 .fini = tenxpress_phy_fini,
813 .clear_interrupt = efx_port_dummy_op_void,
814 .get_settings = tenxpress_get_settings, 824 .get_settings = tenxpress_get_settings,
815 .set_settings = tenxpress_set_settings, 825 .set_settings = tenxpress_set_settings,
816 .set_npage_adv = sft9001_set_npage_adv, 826 .set_npage_adv = sft9001_set_npage_adv,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..c54fa30e6277 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -15,7 +15,6 @@
15#include <linux/if_ether.h> 15#include <linux/if_ether.h>
16#include <linux/highmem.h> 16#include <linux/highmem.h>
17#include "net_driver.h" 17#include "net_driver.h"
18#include "tx.h"
19#include "efx.h" 18#include "efx.h"
20#include "falcon.h" 19#include "falcon.h"
21#include "workarounds.h" 20#include "workarounds.h"
@@ -26,8 +25,7 @@
26 * The tx_queue descriptor ring fill-level must fall below this value 25 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue 26 * before we restart the netif queue
28 */ 27 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ 28#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31 29
32/* We want to be able to nest calls to netif_stop_queue(), since each 30/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue. 31 * channel can have an individual stop on the queue.
@@ -125,6 +123,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
125} 123}
126 124
127 125
126static inline unsigned
127efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
128{
129 /* Depending on the NIC revision, we can use descriptor
130 * lengths up to 8K or 8K-1. However, since PCI Express
131 * devices must split read requests at 4K boundaries, there is
132 * little benefit from using descriptors that cross those
133 * boundaries and we keep things simple by not doing so.
134 */
135 unsigned len = (~dma_addr & 0xfff) + 1;
136
137 /* Work around hardware bug for unaligned buffers. */
138 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
139 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
140
141 return len;
142}
143
128/* 144/*
129 * Add a socket buffer to a TX queue 145 * Add a socket buffer to a TX queue
130 * 146 *
@@ -135,11 +151,13 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
135 * If any DMA mapping fails, any mapped fragments will be unmapped, 151 * If any DMA mapping fails, any mapped fragments will be unmapped,
136 * the queue's insert pointer will be restored to its original value. 152 * the queue's insert pointer will be restored to its original value.
137 * 153 *
154 * This function is split out from efx_hard_start_xmit to allow the
155 * loopback test to direct packets via specific TX queues.
156 *
138 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY 157 * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
139 * You must hold netif_tx_lock() to call this function. 158 * You must hold netif_tx_lock() to call this function.
140 */ 159 */
141static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, 160netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
142 struct sk_buff *skb)
143{ 161{
144 struct efx_nic *efx = tx_queue->efx; 162 struct efx_nic *efx = tx_queue->efx;
145 struct pci_dev *pci_dev = efx->pci_dev; 163 struct pci_dev *pci_dev = efx->pci_dev;
@@ -147,7 +165,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
147 skb_frag_t *fragment; 165 skb_frag_t *fragment;
148 struct page *page; 166 struct page *page;
149 int page_offset; 167 int page_offset;
150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 168 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
151 dma_addr_t dma_addr, unmap_addr = 0; 169 dma_addr_t dma_addr, unmap_addr = 0;
152 unsigned int dma_len; 170 unsigned int dma_len;
153 bool unmap_single; 171 bool unmap_single;
@@ -156,7 +174,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
156 174
157 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); 175 EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
158 176
159 if (skb_shinfo((struct sk_buff *)skb)->gso_size) 177 if (skb_shinfo(skb)->gso_size)
160 return efx_enqueue_skb_tso(tx_queue, skb); 178 return efx_enqueue_skb_tso(tx_queue, skb);
161 179
162 /* Get size of the initial fragment */ 180 /* Get size of the initial fragment */
@@ -171,7 +189,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
171 } 189 }
172 190
173 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 191 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level; 192 q_space = EFX_TXQ_MASK - 1 - fill_level;
175 193
176 /* Map for DMA. Use pci_map_single rather than pci_map_page 194 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse 195 * since this is more efficient on machines with sparse
@@ -208,16 +226,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
208 &tx_queue->read_count; 226 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count 227 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count); 228 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 - 229 q_space = EFX_TXQ_MASK - 1 - fill_level;
212 fill_level);
213 if (unlikely(q_space-- <= 0)) 230 if (unlikely(q_space-- <= 0))
214 goto stop; 231 goto stop;
215 smp_mb(); 232 smp_mb();
216 --tx_queue->stopped; 233 --tx_queue->stopped;
217 } 234 }
218 235
219 insert_ptr = (tx_queue->insert_count & 236 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr]; 237 buffer = &tx_queue->buffer[insert_ptr];
222 efx_tsoh_free(tx_queue, buffer); 238 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh); 239 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -226,14 +242,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
226 EFX_BUG_ON_PARANOID(!buffer->continuation); 242 EFX_BUG_ON_PARANOID(!buffer->continuation);
227 EFX_BUG_ON_PARANOID(buffer->unmap_len); 243 EFX_BUG_ON_PARANOID(buffer->unmap_len);
228 244
229 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 245 dma_len = efx_max_tx_len(efx, dma_addr);
230 if (likely(dma_len > len)) 246 if (likely(dma_len >= len))
231 dma_len = len; 247 dma_len = len;
232 248
233 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
234 if (misalign && dma_len + misalign > 512)
235 dma_len = 512 - misalign;
236
237 /* Fill out per descriptor fields */ 249 /* Fill out per descriptor fields */
238 buffer->len = dma_len; 250 buffer->len = dma_len;
239 buffer->dma_addr = dma_addr; 251 buffer->dma_addr = dma_addr;
@@ -276,7 +288,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
276 skb_shinfo(skb)->nr_frags + 1); 288 skb_shinfo(skb)->nr_frags + 1);
277 289
278 /* Mark the packet as transmitted, and free the SKB ourselves */ 290 /* Mark the packet as transmitted, and free the SKB ourselves */
279 dev_kfree_skb_any((struct sk_buff *)skb); 291 dev_kfree_skb_any(skb);
280 goto unwind; 292 goto unwind;
281 293
282 stop: 294 stop:
@@ -289,7 +301,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
289 /* Work backwards until we hit the original insert pointer value */ 301 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) { 302 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count; 303 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 304 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
293 buffer = &tx_queue->buffer[insert_ptr]; 305 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer); 306 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0; 307 buffer->len = 0;
@@ -318,10 +330,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
318{ 330{
319 struct efx_nic *efx = tx_queue->efx; 331 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr; 332 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322 333
323 stop_index = (index + 1) & mask; 334 stop_index = (index + 1) & EFX_TXQ_MASK;
324 read_ptr = tx_queue->read_count & mask; 335 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
325 336
326 while (read_ptr != stop_index) { 337 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 338 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,28 +349,10 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
338 buffer->len = 0; 349 buffer->len = 0;
339 350
340 ++tx_queue->read_count; 351 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask; 352 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
342 } 353 }
343} 354}
344 355
345/* Initiate a packet transmission on the specified TX queue.
346 * Note that returning anything other than NETDEV_TX_OK will cause the
347 * OS to free the skb.
348 *
349 * This function is split out from efx_hard_start_xmit to allow the
350 * loopback test to direct packets via specific TX queues. It is
351 * therefore a non-static inline, so as not to penalise performance
352 * for non-loopback transmissions.
353 *
354 * Context: netif_tx_lock held
355 */
356inline netdev_tx_t efx_xmit(struct efx_nic *efx,
357 struct efx_tx_queue *tx_queue, struct sk_buff *skb)
358{
359 /* Map fragments for DMA and add to TX queue */
360 return efx_enqueue_skb(tx_queue, skb);
361}
362
363/* Initiate a packet transmission. We use one channel per CPU 356/* Initiate a packet transmission. We use one channel per CPU
364 * (sharing when we have more CPUs than channels). On Falcon, the TX 357 * (sharing when we have more CPUs than channels). On Falcon, the TX
365 * completion events will be directed back to the CPU that transmitted 358 * completion events will be directed back to the CPU that transmitted
@@ -383,7 +376,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
383 else 376 else
384 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM]; 377 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
385 378
386 return efx_xmit(efx, tx_queue, skb); 379 return efx_enqueue_skb(tx_queue, skb);
387} 380}
388 381
389void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 382void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
@@ -391,7 +384,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
391 unsigned fill_level; 384 unsigned fill_level;
392 struct efx_nic *efx = tx_queue->efx; 385 struct efx_nic *efx = tx_queue->efx;
393 386
394 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); 387 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
395 388
396 efx_dequeue_buffers(tx_queue, index); 389 efx_dequeue_buffers(tx_queue, index);
397 390
@@ -401,7 +394,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401 smp_mb(); 394 smp_mb();
402 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 395 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
403 fill_level = tx_queue->insert_count - tx_queue->read_count; 396 fill_level = tx_queue->insert_count - tx_queue->read_count;
404 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 397 if (fill_level < EFX_TXQ_THRESHOLD) {
405 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 398 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
406 399
407 /* Do this under netif_tx_lock(), to avoid racing 400 /* Do this under netif_tx_lock(), to avoid racing
@@ -425,11 +418,11 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
425 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 418 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
426 419
427 /* Allocate software ring */ 420 /* Allocate software ring */
428 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 421 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
429 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 422 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
430 if (!tx_queue->buffer) 423 if (!tx_queue->buffer)
431 return -ENOMEM; 424 return -ENOMEM;
432 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 425 for (i = 0; i <= EFX_TXQ_MASK; ++i)
433 tx_queue->buffer[i].continuation = true; 426 tx_queue->buffer[i].continuation = true;
434 427
435 /* Allocate hardware ring */ 428 /* Allocate hardware ring */
@@ -468,8 +461,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
468 461
469 /* Free any buffers left in the ring */ 462 /* Free any buffers left in the ring */
470 while (tx_queue->read_count != tx_queue->write_count) { 463 while (tx_queue->read_count != tx_queue->write_count) {
471 buffer = &tx_queue->buffer[tx_queue->read_count & 464 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
472 tx_queue->efx->type->txd_ring_mask];
473 efx_dequeue_buffer(tx_queue, buffer); 465 efx_dequeue_buffer(tx_queue, buffer);
474 buffer->continuation = true; 466 buffer->continuation = true;
475 buffer->len = 0; 467 buffer->len = 0;
@@ -708,14 +700,14 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
708{ 700{
709 struct efx_tx_buffer *buffer; 701 struct efx_tx_buffer *buffer;
710 struct efx_nic *efx = tx_queue->efx; 702 struct efx_nic *efx = tx_queue->efx;
711 unsigned dma_len, fill_level, insert_ptr, misalign; 703 unsigned dma_len, fill_level, insert_ptr;
712 int q_space; 704 int q_space;
713 705
714 EFX_BUG_ON_PARANOID(len <= 0); 706 EFX_BUG_ON_PARANOID(len <= 0);
715 707
716 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 708 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
717 /* -1 as there is no way to represent all descriptors used */ 709 /* -1 as there is no way to represent all descriptors used */
718 q_space = efx->type->txd_ring_mask - 1 - fill_level; 710 q_space = EFX_TXQ_MASK - 1 - fill_level;
719 711
720 while (1) { 712 while (1) {
721 if (unlikely(q_space-- <= 0)) { 713 if (unlikely(q_space-- <= 0)) {
@@ -731,7 +723,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
731 *(volatile unsigned *)&tx_queue->read_count; 723 *(volatile unsigned *)&tx_queue->read_count;
732 fill_level = (tx_queue->insert_count 724 fill_level = (tx_queue->insert_count
733 - tx_queue->old_read_count); 725 - tx_queue->old_read_count);
734 q_space = efx->type->txd_ring_mask - 1 - fill_level; 726 q_space = EFX_TXQ_MASK - 1 - fill_level;
735 if (unlikely(q_space-- <= 0)) { 727 if (unlikely(q_space-- <= 0)) {
736 *final_buffer = NULL; 728 *final_buffer = NULL;
737 return 1; 729 return 1;
@@ -740,13 +732,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
740 --tx_queue->stopped; 732 --tx_queue->stopped;
741 } 733 }
742 734
743 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 735 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
744 buffer = &tx_queue->buffer[insert_ptr]; 736 buffer = &tx_queue->buffer[insert_ptr];
745 ++tx_queue->insert_count; 737 ++tx_queue->insert_count;
746 738
747 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 739 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
748 tx_queue->read_count > 740 tx_queue->read_count >
749 efx->type->txd_ring_mask); 741 EFX_TXQ_MASK);
750 742
751 efx_tsoh_free(tx_queue, buffer); 743 efx_tsoh_free(tx_queue, buffer);
752 EFX_BUG_ON_PARANOID(buffer->len); 744 EFX_BUG_ON_PARANOID(buffer->len);
@@ -757,12 +749,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
757 749
758 buffer->dma_addr = dma_addr; 750 buffer->dma_addr = dma_addr;
759 751
760 /* Ensure we do not cross a boundary unsupported by H/W */ 752 dma_len = efx_max_tx_len(efx, dma_addr);
761 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
762
763 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
764 if (misalign && dma_len + misalign > 512)
765 dma_len = 512 - misalign;
766 753
767 /* If there is enough space to send then do so */ 754 /* If there is enough space to send then do so */
768 if (dma_len >= len) 755 if (dma_len >= len)
@@ -792,8 +779,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792{ 779{
793 struct efx_tx_buffer *buffer; 780 struct efx_tx_buffer *buffer;
794 781
795 buffer = &tx_queue->buffer[tx_queue->insert_count & 782 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
796 tx_queue->efx->type->txd_ring_mask];
797 efx_tsoh_free(tx_queue, buffer); 783 efx_tsoh_free(tx_queue, buffer);
798 EFX_BUG_ON_PARANOID(buffer->len); 784 EFX_BUG_ON_PARANOID(buffer->len);
799 EFX_BUG_ON_PARANOID(buffer->unmap_len); 785 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +804,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
818 while (tx_queue->insert_count != tx_queue->write_count) { 804 while (tx_queue->insert_count != tx_queue->write_count) {
819 --tx_queue->insert_count; 805 --tx_queue->insert_count;
820 buffer = &tx_queue->buffer[tx_queue->insert_count & 806 buffer = &tx_queue->buffer[tx_queue->insert_count &
821 tx_queue->efx->type->txd_ring_mask]; 807 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 808 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 809 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0; 810 buffer->len = 0;
@@ -1099,7 +1085,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1099 1085
1100 mem_err: 1086 mem_err:
1101 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n"); 1087 EFX_ERR(efx, "Out of memory for TSO headers, or PCI mapping error\n");
1102 dev_kfree_skb_any((struct sk_buff *)skb); 1088 dev_kfree_skb_any(skb);
1103 goto unwind; 1089 goto unwind;
1104 1090
1105 stop: 1091 stop:
@@ -1135,7 +1121,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1135 unsigned i; 1121 unsigned i;
1136 1122
1137 if (tx_queue->buffer) { 1123 if (tx_queue->buffer) {
1138 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1124 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1139 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1125 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1140 } 1126 }
1141 1127
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
deleted file mode 100644
index e3678962a5b4..000000000000
--- a/drivers/net/sfc/tx.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_TX_H
12#define EFX_TX_H
13
14#include "net_driver.h"
15
16int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
17void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
18void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
19void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
20
21netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
22 struct net_device *net_dev);
23void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
24
25#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index c821c15445a0..021d0d2d97f3 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -16,7 +16,8 @@
16 */ 16 */
17 17
18#define EFX_WORKAROUND_ALWAYS(efx) 1 18#define EFX_WORKAROUND_ALWAYS(efx) 1
19#define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) 19#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
20#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
20#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) 21#define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx)
21#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ 22#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \
22 (efx)->phy_type == PHY_TYPE_SFT9001B) 23 (efx)->phy_type == PHY_TYPE_SFT9001B)
@@ -27,20 +28,18 @@
27#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 28#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
28/* Bit-bashed I2C reads cause performance drop */ 29/* Bit-bashed I2C reads cause performance drop */
29#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G 30#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
30/* TX pkt parser problem with <= 16 byte TXes */
31#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
32/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor 31/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
33 * or a PCIe error (bug 11028) */ 32 * or a PCIe error (bug 11028) */
34#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS 33#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
35/* Transmit flow control may get disabled */ 34/* Transmit flow control may get disabled */
36#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS 35#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
37/* Flush events can take a very long time to appear */
38#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
39/* Truncated IPv4 packets can confuse the TX packet parser */ 36/* Truncated IPv4 packets can confuse the TX packet parser */
40#define EFX_WORKAROUND_15592 EFX_WORKAROUND_ALWAYS 37#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
41 38
42/* Spurious parity errors in TSORT buffers */ 39/* Spurious parity errors in TSORT buffers */
43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 40#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
41/* Unaligned read request >512 bytes after aligning may break TSORT */
42#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
44/* iSCSI parsing errors */ 43/* iSCSI parsing errors */
45#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A 44#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
46/* RX events go missing */ 45/* RX events go missing */
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index f4dfd1f679a9..6b364a6c6c60 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -365,11 +365,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
365 } 365 }
366 skb_reserve(newskb, 2); 366 skb_reserve(newskb, 2);
367 } else { 367 } else {
368 skb = netdev_alloc_skb(dev, len + 2); 368 skb = netdev_alloc_skb_ip_align(dev, len);
369 if (skb) { 369 if (skb)
370 skb_reserve(skb, 2);
371 skb_copy_to_linear_data(skb, rd->skb->data, len); 370 skb_copy_to_linear_data(skb, rd->skb->data, len);
372 } 371
373 newskb = rd->skb; 372 newskb = rd->skb;
374 } 373 }
375memory_squeeze: 374memory_squeeze:
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 528b912a4b0d..c88bc1013047 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -30,6 +30,7 @@
30#include <linux/phy.h> 30#include <linux/phy.h>
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h>
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
34 35
35#include "sh_eth.h" 36#include "sh_eth.h"
@@ -299,16 +300,20 @@ static void update_mac_address(struct net_device *ndev)
299 * When you want use this device, you must set MAC address in bootloader. 300 * When you want use this device, you must set MAC address in bootloader.
300 * 301 *
301 */ 302 */
302static void read_mac_address(struct net_device *ndev) 303static void read_mac_address(struct net_device *ndev, unsigned char *mac)
303{ 304{
304 u32 ioaddr = ndev->base_addr; 305 u32 ioaddr = ndev->base_addr;
305 306
306 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); 307 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
307 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; 308 memcpy(ndev->dev_addr, mac, 6);
308 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; 309 } else {
309 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); 310 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
310 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; 311 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
311 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); 312 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
313 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
314 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
315 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
316 }
312} 317}
313 318
314struct bb_info { 319struct bb_info {
@@ -1009,7 +1014,9 @@ static int sh_eth_open(struct net_device *ndev)
1009 int ret = 0; 1014 int ret = 0;
1010 struct sh_eth_private *mdp = netdev_priv(ndev); 1015 struct sh_eth_private *mdp = netdev_priv(ndev);
1011 1016
1012 ret = request_irq(ndev->irq, &sh_eth_interrupt, 1017 pm_runtime_get_sync(&mdp->pdev->dev);
1018
1019 ret = request_irq(ndev->irq, sh_eth_interrupt,
1013#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764) 1020#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
1014 IRQF_SHARED, 1021 IRQF_SHARED,
1015#else 1022#else
@@ -1045,6 +1052,7 @@ static int sh_eth_open(struct net_device *ndev)
1045 1052
1046out_free_irq: 1053out_free_irq:
1047 free_irq(ndev->irq, ndev); 1054 free_irq(ndev->irq, ndev);
1055 pm_runtime_put_sync(&mdp->pdev->dev);
1048 return ret; 1056 return ret;
1049} 1057}
1050 1058
@@ -1176,6 +1184,8 @@ static int sh_eth_close(struct net_device *ndev)
1176 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 1184 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1177 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); 1185 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1178 1186
1187 pm_runtime_put_sync(&mdp->pdev->dev);
1188
1179 return 0; 1189 return 0;
1180} 1190}
1181 1191
@@ -1184,6 +1194,8 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1184 struct sh_eth_private *mdp = netdev_priv(ndev); 1194 struct sh_eth_private *mdp = netdev_priv(ndev);
1185 u32 ioaddr = ndev->base_addr; 1195 u32 ioaddr = ndev->base_addr;
1186 1196
1197 pm_runtime_get_sync(&mdp->pdev->dev);
1198
1187 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR); 1199 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
1188 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */ 1200 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
1189 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR); 1201 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
@@ -1199,6 +1211,8 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1199 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1211 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
1200 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1212 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
1201#endif 1213#endif
1214 pm_runtime_put_sync(&mdp->pdev->dev);
1215
1202 return &mdp->stats; 1216 return &mdp->stats;
1203} 1217}
1204 1218
@@ -1407,6 +1421,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1407 1421
1408 mdp = netdev_priv(ndev); 1422 mdp = netdev_priv(ndev);
1409 spin_lock_init(&mdp->lock); 1423 spin_lock_init(&mdp->lock);
1424 mdp->pdev = pdev;
1425 pm_runtime_enable(&pdev->dev);
1426 pm_runtime_resume(&pdev->dev);
1410 1427
1411 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 1428 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1412 /* get PHY ID */ 1429 /* get PHY ID */
@@ -1428,7 +1445,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1428 mdp->post_fw = POST_FW >> (devno << 1); 1445 mdp->post_fw = POST_FW >> (devno << 1);
1429 1446
1430 /* read and set MAC address */ 1447 /* read and set MAC address */
1431 read_mac_address(ndev); 1448 read_mac_address(ndev, pd->mac_addr);
1432 1449
1433 /* First device only init */ 1450 /* First device only init */
1434 if (!devno) { 1451 if (!devno) {
@@ -1482,18 +1499,37 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
1482 sh_mdio_release(ndev); 1499 sh_mdio_release(ndev);
1483 unregister_netdev(ndev); 1500 unregister_netdev(ndev);
1484 flush_scheduled_work(); 1501 flush_scheduled_work();
1485 1502 pm_runtime_disable(&pdev->dev);
1486 free_netdev(ndev); 1503 free_netdev(ndev);
1487 platform_set_drvdata(pdev, NULL); 1504 platform_set_drvdata(pdev, NULL);
1488 1505
1489 return 0; 1506 return 0;
1490} 1507}
1491 1508
1509static int sh_eth_runtime_nop(struct device *dev)
1510{
1511 /*
1512 * Runtime PM callback shared between ->runtime_suspend()
1513 * and ->runtime_resume(). Simply returns success.
1514 *
1515 * This driver re-initializes all registers after
1516 * pm_runtime_get_sync() anyway so there is no need
1517 * to save and restore registers here.
1518 */
1519 return 0;
1520}
1521
1522static struct dev_pm_ops sh_eth_dev_pm_ops = {
1523 .runtime_suspend = sh_eth_runtime_nop,
1524 .runtime_resume = sh_eth_runtime_nop,
1525};
1526
1492static struct platform_driver sh_eth_driver = { 1527static struct platform_driver sh_eth_driver = {
1493 .probe = sh_eth_drv_probe, 1528 .probe = sh_eth_drv_probe,
1494 .remove = sh_eth_drv_remove, 1529 .remove = sh_eth_drv_remove,
1495 .driver = { 1530 .driver = {
1496 .name = CARDNAME, 1531 .name = CARDNAME,
1532 .pm = &sh_eth_dev_pm_ops,
1497 }, 1533 },
1498}; 1534};
1499 1535
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index ba151f86ae7b..8b47763958f2 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -703,6 +703,7 @@ struct sh_eth_cpu_data {
703}; 703};
704 704
705struct sh_eth_private { 705struct sh_eth_private {
706 struct platform_device *pdev;
706 struct sh_eth_cpu_data *cd; 707 struct sh_eth_cpu_data *cd;
707 dma_addr_t rx_desc_dma; 708 dma_addr_t rx_desc_dma;
708 dma_addr_t tx_desc_dma; 709 dma_addr_t tx_desc_dma;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 7cc9898f4e00..31233b4c44a0 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -536,13 +536,12 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
536 if (pkt_size >= rx_copybreak) 536 if (pkt_size >= rx_copybreak)
537 goto out; 537 goto out;
538 538
539 skb = netdev_alloc_skb(tp->dev, pkt_size + 2); 539 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
540 if (!skb) 540 if (!skb)
541 goto out; 541 goto out;
542 542
543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, 543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
544 PCI_DMA_FROMDEVICE); 544 PCI_DMA_FROMDEVICE);
545 skb_reserve(skb, 2);
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 545 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
547 *sk_buff = skb; 546 *sk_buff = skb;
548 done = true; 547 done = true;
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index c072f7f36acf..9a12d88ac2d9 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1016,7 +1016,7 @@ sis900_open(struct net_device *net_dev)
1016 /* Equalizer workaround Rule */ 1016 /* Equalizer workaround Rule */
1017 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1017 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1018 1018
1019 ret = request_irq(net_dev->irq, &sis900_interrupt, IRQF_SHARED, 1019 ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED,
1020 net_dev->name, net_dev); 1020 net_dev->name, net_dev);
1021 if (ret) 1021 if (ret)
1022 return ret; 1022 return ret;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 8f5414348e86..34b4e7d500da 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3071,11 +3071,10 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3071 goto error; 3071 goto error;
3072 3072
3073 if (len < RX_COPY_THRESHOLD) { 3073 if (len < RX_COPY_THRESHOLD) {
3074 skb = netdev_alloc_skb(dev, len + 2); 3074 skb = netdev_alloc_skb_ip_align(dev, len);
3075 if (!skb) 3075 if (!skb)
3076 goto resubmit; 3076 goto resubmit;
3077 3077
3078 skb_reserve(skb, 2);
3079 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3078 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3080 pci_unmap_addr(e, mapaddr), 3079 pci_unmap_addr(e, mapaddr),
3081 len, PCI_DMA_FROMDEVICE); 3080 len, PCI_DMA_FROMDEVICE);
@@ -3086,11 +3085,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3086 skge_rx_reuse(e, skge->rx_buf_size); 3085 skge_rx_reuse(e, skge->rx_buf_size);
3087 } else { 3086 } else {
3088 struct sk_buff *nskb; 3087 struct sk_buff *nskb;
3089 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); 3088
3089 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3090 if (!nskb) 3090 if (!nskb)
3091 goto resubmit; 3091 goto resubmit;
3092 3092
3093 skb_reserve(nskb, NET_IP_ALIGN);
3094 pci_unmap_single(skge->hw->pdev, 3093 pci_unmap_single(skge->hw->pdev,
3095 pci_unmap_addr(e, mapaddr), 3094 pci_unmap_addr(e, mapaddr),
3096 pci_unmap_len(e, maplen), 3095 pci_unmap_len(e, maplen),
@@ -3948,7 +3947,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3948 hw->pdev = pdev; 3947 hw->pdev = pdev;
3949 spin_lock_init(&hw->hw_lock); 3948 spin_lock_init(&hw->hw_lock);
3950 spin_lock_init(&hw->phy_lock); 3949 spin_lock_init(&hw->phy_lock);
3951 tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw); 3950 tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
3952 3951
3953 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3952 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3954 if (!hw->regs) { 3953 if (!hw->regs) {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 6a10d7ba5877..a3d99913f184 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.25" 53#define DRV_VERSION "1.26"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -102,6 +102,7 @@ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { 102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ 103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ 104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
105 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
@@ -139,6 +140,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
139 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ 140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ 141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ 142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
142 { 0 } 144 { 0 }
143}; 145};
144 146
@@ -602,6 +604,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
602 /* apply workaround for integrated resistors calibration */ 604 /* apply workaround for integrated resistors calibration */
603 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); 605 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
604 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); 606 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
607 } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
608 /* apply fixes in PHY AFE */
609 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
610
611 /* apply RDAC termination workaround */
612 gm_phy_write(hw, port, 24, 0x2800);
613 gm_phy_write(hw, port, 23, 0x2001);
614
615 /* set page register back to 0 */
616 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
605 } else if (hw->chip_id != CHIP_ID_YUKON_EX && 617 } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
606 hw->chip_id < CHIP_ID_YUKON_SUPR) { 618 hw->chip_id < CHIP_ID_YUKON_SUPR) {
607 /* no effect on Yukon-XL */ 619 /* no effect on Yukon-XL */
@@ -786,8 +798,7 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
786 798
787 if ( (hw->chip_id == CHIP_ID_YUKON_EX && 799 if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
788 hw->chip_rev != CHIP_REV_YU_EX_A0) || 800 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
789 hw->chip_id == CHIP_ID_YUKON_FE_P || 801 hw->chip_id >= CHIP_ID_YUKON_FE_P) {
790 hw->chip_id == CHIP_ID_YUKON_SUPR) {
791 /* Yukon-Extreme B0 and further Extreme devices */ 802 /* Yukon-Extreme B0 and further Extreme devices */
792 /* enable Store & Forward mode for TX */ 803 /* enable Store & Forward mode for TX */
793 804
@@ -925,8 +936,14 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
925 936
926 /* On chips without ram buffer, pause is controled by MAC level */ 937 /* On chips without ram buffer, pause is controled by MAC level */
927 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { 938 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
928 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 939 /* Pause threshold is scaled by 8 in bytes */
929 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 940 if (hw->chip_id == CHIP_ID_YUKON_FE_P
941 && hw->chip_rev == CHIP_REV_YU_FE2_A0)
942 reg = 1568 / 8;
943 else
944 reg = 1024 / 8;
945 sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
946 sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
930 947
931 sky2_set_tx_stfwd(hw, port); 948 sky2_set_tx_stfwd(hw, port);
932 } 949 }
@@ -1397,6 +1414,31 @@ static int sky2_rx_start(struct sky2_port *sky2)
1397 1414
1398 /* Tell chip about available buffers */ 1415 /* Tell chip about available buffers */
1399 sky2_rx_update(sky2, rxq); 1416 sky2_rx_update(sky2, rxq);
1417
1418 if (hw->chip_id == CHIP_ID_YUKON_EX ||
1419 hw->chip_id == CHIP_ID_YUKON_SUPR) {
1420 /*
1421 * Disable flushing of non ASF packets;
1422 * must be done after initializing the BMUs;
1423 * drivers without ASF support should do this too, otherwise
1424 * it may happen that they cannot run on ASF devices;
1425 * remember that the MAC FIFO isn't reset during initialization.
1426 */
1427 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
1428 }
1429
1430 if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
1431 /* Enable RX Home Address & Routing Header checksum fix */
1432 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
1433 RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
1434
1435 /* Enable TX Home Address & Routing Header checksum fix */
1436 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1437 TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1438 }
1439
1440
1441
1400 return 0; 1442 return 0;
1401nomem: 1443nomem:
1402 sky2_rx_clean(sky2); 1444 sky2_rx_clean(sky2);
@@ -2096,6 +2138,25 @@ out:
2096 spin_unlock(&sky2->phy_lock); 2138 spin_unlock(&sky2->phy_lock);
2097} 2139}
2098 2140
2141/* Special quick link interrupt (Yukon-2 Optima only) */
2142static void sky2_qlink_intr(struct sky2_hw *hw)
2143{
2144 struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
2145 u32 imask;
2146 u16 phy;
2147
2148 /* disable irq */
2149 imask = sky2_read32(hw, B0_IMSK);
2150 imask &= ~Y2_IS_PHY_QLNK;
2151 sky2_write32(hw, B0_IMSK, imask);
2152
2153 /* reset PHY Link Detect */
2154 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2155 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2156
2157 sky2_link_up(sky2);
2158}
2159
2099/* Transmit timeout is only called if we are running, carrier is up 2160/* Transmit timeout is only called if we are running, carrier is up
2100 * and tx queue is full (stopped). 2161 * and tx queue is full (stopped).
2101 */ 2162 */
@@ -2191,9 +2252,8 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
2191{ 2252{
2192 struct sk_buff *skb; 2253 struct sk_buff *skb;
2193 2254
2194 skb = netdev_alloc_skb(sky2->netdev, length + 2); 2255 skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
2195 if (likely(skb)) { 2256 if (likely(skb)) {
2196 skb_reserve(skb, 2);
2197 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, 2257 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2198 length, PCI_DMA_FROMDEVICE); 2258 length, PCI_DMA_FROMDEVICE);
2199 skb_copy_from_linear_data(re->skb, skb->data, length); 2259 skb_copy_from_linear_data(re->skb, skb->data, length);
@@ -2766,6 +2826,9 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
2766 if (status & Y2_IS_IRQ_PHY2) 2826 if (status & Y2_IS_IRQ_PHY2)
2767 sky2_phy_intr(hw, 1); 2827 sky2_phy_intr(hw, 1);
2768 2828
2829 if (status & Y2_IS_PHY_QLNK)
2830 sky2_qlink_intr(hw);
2831
2769 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { 2832 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
2770 work_done += sky2_status_intr(hw, work_limit - work_done, idx); 2833 work_done += sky2_status_intr(hw, work_limit - work_done, idx);
2771 2834
@@ -2815,6 +2878,7 @@ static u32 sky2_mhz(const struct sky2_hw *hw)
2815 case CHIP_ID_YUKON_EX: 2878 case CHIP_ID_YUKON_EX:
2816 case CHIP_ID_YUKON_SUPR: 2879 case CHIP_ID_YUKON_SUPR:
2817 case CHIP_ID_YUKON_UL_2: 2880 case CHIP_ID_YUKON_UL_2:
2881 case CHIP_ID_YUKON_OPT:
2818 return 125; 2882 return 125;
2819 2883
2820 case CHIP_ID_YUKON_FE: 2884 case CHIP_ID_YUKON_FE:
@@ -2904,6 +2968,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2904 break; 2968 break;
2905 2969
2906 case CHIP_ID_YUKON_UL_2: 2970 case CHIP_ID_YUKON_UL_2:
2971 case CHIP_ID_YUKON_OPT:
2907 hw->flags = SKY2_HW_GIGABIT 2972 hw->flags = SKY2_HW_GIGABIT
2908 | SKY2_HW_ADV_POWER_CTL; 2973 | SKY2_HW_ADV_POWER_CTL;
2909 break; 2974 break;
@@ -2986,6 +3051,52 @@ static void sky2_reset(struct sky2_hw *hw)
2986 sky2_write16(hw, SK_REG(i, GMAC_CTRL), 3051 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2987 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON 3052 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2988 | GMC_BYP_RETR_ON); 3053 | GMC_BYP_RETR_ON);
3054
3055 }
3056
3057 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
3058 /* enable MACSec clock gating */
3059 sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
3060 }
3061
3062 if (hw->chip_id == CHIP_ID_YUKON_OPT) {
3063 u16 reg;
3064 u32 msk;
3065
3066 if (hw->chip_rev == 0) {
3067 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
3068 sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
3069
3070 /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
3071 reg = 10;
3072 } else {
3073 /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
3074 reg = 3;
3075 }
3076
3077 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3078
3079 /* reset PHY Link Detect */
3080 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3081 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3082 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3083
3084
3085 /* enable PHY Quick Link */
3086 msk = sky2_read32(hw, B0_IMSK);
3087 msk |= Y2_IS_PHY_QLNK;
3088 sky2_write32(hw, B0_IMSK, msk);
3089
3090 /* check if PSMv2 was running before */
3091 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3092 if (reg & PCI_EXP_LNKCTL_ASPMC) {
3093 int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3094 /* restore the PCIe Link Control register */
3095 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3096 }
3097
3098 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3099 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
2989 } 3100 }
2990 3101
2991 /* Clear I2C IRQ noise */ 3102 /* Clear I2C IRQ noise */
@@ -4406,9 +4517,11 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4406 "FE+", /* 0xb8 */ 4517 "FE+", /* 0xb8 */
4407 "Supreme", /* 0xb9 */ 4518 "Supreme", /* 0xb9 */
4408 "UL 2", /* 0xba */ 4519 "UL 2", /* 0xba */
4520 "Unknown", /* 0xbb */
4521 "Optima", /* 0xbc */
4409 }; 4522 };
4410 4523
4411 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2) 4524 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
4412 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); 4525 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4413 else 4526 else
4414 snprintf(buf, sz, "(chip %#x)", chipid); 4527 snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ed54129698b4..365d79c7d834 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -16,6 +16,13 @@ enum {
16 PCI_DEV_REG5 = 0x88, 16 PCI_DEV_REG5 = 0x88,
17 PCI_CFG_REG_0 = 0x90, 17 PCI_CFG_REG_0 = 0x90,
18 PCI_CFG_REG_1 = 0x94, 18 PCI_CFG_REG_1 = 0x94,
19
20 PSM_CONFIG_REG0 = 0x98,
21 PSM_CONFIG_REG1 = 0x9C,
22 PSM_CONFIG_REG2 = 0x160,
23 PSM_CONFIG_REG3 = 0x164,
24 PSM_CONFIG_REG4 = 0x168,
25
19}; 26};
20 27
21/* Yukon-2 */ 28/* Yukon-2 */
@@ -48,6 +55,37 @@ enum pci_dev_reg_2 {
48 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ 55 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
49}; 56};
50 57
58/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
59enum pci_dev_reg_3 {
60 P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
61 P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
62 P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
63 P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
64 P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
65 P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
66 P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
67 P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
68 P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
69 P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
70 P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
71 P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
72 P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
73 P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
74 P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
75 P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
76 P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
77 P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
78 P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
79 P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
80 PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
81 P_CLK_COR_REGS_D0_DIS |
82 P_CLK_COR_LNK1_D0_DIS |
83 P_CLK_MAC_LNK1_D0_DIS |
84 P_CLK_PCI_MST_ARB_DIS |
85 P_CLK_COR_COMMON_DIS |
86 P_CLK_COR_LNK1_BMU_DIS,
87};
88
51/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ 89/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
52enum pci_dev_reg_4 { 90enum pci_dev_reg_4 {
53 /* (Link Training & Status State Machine) */ 91 /* (Link Training & Status State Machine) */
@@ -114,7 +152,7 @@ enum pci_dev_reg_5 {
114 P_GAT_PCIE_RX_EL_IDLE, 152 P_GAT_PCIE_RX_EL_IDLE,
115}; 153};
116 154
117#/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ 155/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
118enum pci_cfg_reg1 { 156enum pci_cfg_reg1 {
119 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ 157 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
120 /* Bit 23..21: Release Clock on Event */ 158 /* Bit 23..21: Release Clock on Event */
@@ -145,6 +183,72 @@ enum pci_cfg_reg1 {
145 P_CF1_ENA_TXBMU_WR_IDLE, 183 P_CF1_ENA_TXBMU_WR_IDLE,
146}; 184};
147 185
186/* Yukon-Optima */
187enum {
188 PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
189
190 PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
191 PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
192
193 PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
194
195 PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
196 PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
197 PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
198 PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
199
200 PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
201
202 PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
203 PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
204};
205
206/* Yukon-Supreme */
207enum {
208 PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
209
210 PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
211 PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
212 PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
213 PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
214 PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
215 PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
216 PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
217 PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
218 PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
219 PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
220 PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
221 PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
222 PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
223 PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
224 PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
225 PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
226 PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
227 PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
228 PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
229
230 PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
231 PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
232 PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
233 PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
234 PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
235 PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
236 PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
237 PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
238 PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
239 PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
240};
241
242/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
243enum {
244 /* PHY Link Detect Timer */
245 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
246 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
247
248 PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
249 PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
250};
251
148 252
149#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 253#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
150 PCI_STATUS_SIG_SYSTEM_ERROR | \ 254 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -197,6 +301,9 @@ enum csr_regs {
197 B2_I2C_IRQ = 0x0168, 301 B2_I2C_IRQ = 0x0168,
198 B2_I2C_SW = 0x016c, 302 B2_I2C_SW = 0x016c,
199 303
304 Y2_PEX_PHY_DATA = 0x0170,
305 Y2_PEX_PHY_ADDR = 0x0172,
306
200 B3_RAM_ADDR = 0x0180, 307 B3_RAM_ADDR = 0x0180,
201 B3_RAM_DATA_LO = 0x0184, 308 B3_RAM_DATA_LO = 0x0184,
202 B3_RAM_DATA_HI = 0x0188, 309 B3_RAM_DATA_HI = 0x0188,
@@ -317,6 +424,10 @@ enum {
317 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ 424 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
318 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ 425 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
319 426
427 Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
428 Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
429 Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
430
320 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ 431 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
321 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ 432 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
322 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ 433 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
@@ -435,6 +546,7 @@ enum {
435 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ 546 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
436 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ 547 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
437 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ 548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
438}; 550};
439enum yukon_ec_rev { 551enum yukon_ec_rev {
440 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 552 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
@@ -459,6 +571,8 @@ enum yukon_ex_rev {
459}; 571};
460enum yukon_supr_rev { 572enum yukon_supr_rev {
461 CHIP_REV_YU_SU_A0 = 0, 573 CHIP_REV_YU_SU_A0 = 0,
574 CHIP_REV_YU_SU_B0 = 1,
575 CHIP_REV_YU_SU_B1 = 3,
462}; 576};
463 577
464 578
@@ -513,6 +627,12 @@ enum {
513 TIM_T_STEP = 1<<0, /* Test step */ 627 TIM_T_STEP = 1<<0, /* Test step */
514}; 628};
515 629
630/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
631enum {
632 PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
633 PEX_DB_ACCESS = 1<<30, /* Access to debug register */
634};
635
516/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ 636/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
517 /* Bit 31..19: reserved */ 637 /* Bit 31..19: reserved */
518#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ 638#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
@@ -688,10 +808,11 @@ enum {
688 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ 808 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
689 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ 809 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
690 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ 810 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
691 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ 811 RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */
812 RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */
692 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ 813 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
693 RX_GMF_UP_THR = 0x0c58,/* 8 bit Rx Upper Pause Thr (Yukon-EC_U) */ 814 RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */
694 RX_GMF_LP_THR = 0x0c5a,/* 8 bit Rx Lower Pause Thr (Yukon-EC_U) */ 815 RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */
695 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ 816 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
696 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ 817 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
697 818
@@ -754,6 +875,42 @@ enum {
754 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ 875 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
755}; 876};
756 877
878/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
879enum {
880 TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
881 TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
882 TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
883 TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
884 TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
885 TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
886 TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
887 TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
888
889 TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
890 TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
891 TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
892
893 TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
894 TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
895 TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
896
897 TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
898 TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
899 TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
900
901 TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
902 TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
903 TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
904
905 TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
906 TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
907 TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
908
909 TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
910 TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
911 TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
912};
913
757/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ 914/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
758/* PREF_UNIT_CTRL 32 bit Prefetch Control register */ 915/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
759enum { 916enum {
@@ -1674,6 +1831,12 @@ enum {
1674 1831
1675/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ 1832/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1676enum { 1833enum {
1834 RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
1835 RX_GCLKMAC_OFF = 1<<30,
1836
1837 RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
1838 RX_STFW_ENA = 1<<28,
1839
1677 RX_TRUNC_ON = 1<<27, /* enable packet truncation */ 1840 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1678 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ 1841 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1679 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ 1842 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
@@ -1711,6 +1874,20 @@ enum {
1711 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, 1874 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1712}; 1875};
1713 1876
1877/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
1878enum {
1879 RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
1880 RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
1881 RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
1882 RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
1883 RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
1884 RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
1885 RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
1886 RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
1887 RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
1888 RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
1889};
1890
1714/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ 1891/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
1715enum { 1892enum {
1716 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ 1893 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index fe3cebb984de..6640469b5d3b 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -80,6 +80,7 @@
80#include <linux/rtnetlink.h> 80#include <linux/rtnetlink.h>
81#include <linux/if_arp.h> 81#include <linux/if_arp.h>
82#include <linux/if_slip.h> 82#include <linux/if_slip.h>
83#include <linux/compat.h>
83#include <linux/delay.h> 84#include <linux/delay.h>
84#include <linux/init.h> 85#include <linux/init.h>
85#include "slip.h" 86#include "slip.h"
@@ -1169,6 +1170,27 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
1169 } 1170 }
1170} 1171}
1171 1172
1173#ifdef CONFIG_COMPAT
1174static long slip_compat_ioctl(struct tty_struct *tty, struct file *file,
1175 unsigned int cmd, unsigned long arg)
1176{
1177 switch (cmd) {
1178 case SIOCGIFNAME:
1179 case SIOCGIFENCAP:
1180 case SIOCSIFENCAP:
1181 case SIOCSIFHWADDR:
1182 case SIOCSKEEPALIVE:
1183 case SIOCGKEEPALIVE:
1184 case SIOCSOUTFILL:
1185 case SIOCGOUTFILL:
1186 return slip_ioctl(tty, file, cmd,
1187 (unsigned long)compat_ptr(arg));
1188 }
1189
1190 return -ENOIOCTLCMD;
1191}
1192#endif
1193
1172/* VSV changes start here */ 1194/* VSV changes start here */
1173#ifdef CONFIG_SLIP_SMART 1195#ifdef CONFIG_SLIP_SMART
1174/* function do_ioctl called from net/core/dev.c 1196/* function do_ioctl called from net/core/dev.c
@@ -1261,6 +1283,9 @@ static struct tty_ldisc_ops sl_ldisc = {
1261 .close = slip_close, 1283 .close = slip_close,
1262 .hangup = slip_hangup, 1284 .hangup = slip_hangup,
1263 .ioctl = slip_ioctl, 1285 .ioctl = slip_ioctl,
1286#ifdef CONFIG_COMPAT
1287 .compat_ioctl = slip_compat_ioctl,
1288#endif
1264 .receive_buf = slip_receive_buf, 1289 .receive_buf = slip_receive_buf,
1265 .write_wakeup = slip_write_wakeup, 1290 .write_wakeup = slip_write_wakeup,
1266}; 1291};
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 2a6b6de95339..44ebbaa7457b 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1984,7 +1984,7 @@ static int __devinit smc911x_probe(struct net_device *dev)
1984#endif 1984#endif
1985 1985
1986 /* Grab the IRQ */ 1986 /* Grab the IRQ */
1987 retval = request_irq(dev->irq, &smc911x_interrupt, 1987 retval = request_irq(dev->irq, smc911x_interrupt,
1988 irq_flags, dev->name, dev); 1988 irq_flags, dev->name, dev);
1989 if (retval) 1989 if (retval)
1990 goto err_out; 1990 goto err_out;
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 934a12012829..8371b82323ac 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -1050,7 +1050,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
1050 memset(netdev_priv(dev), 0, sizeof(struct smc_local)); 1050 memset(netdev_priv(dev), 0, sizeof(struct smc_local));
1051 1051
1052 /* Grab the IRQ */ 1052 /* Grab the IRQ */
1053 retval = request_irq(dev->irq, &smc_interrupt, 0, DRV_NAME, dev); 1053 retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
1054 if (retval) { 1054 if (retval) {
1055 printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME, 1055 printk("%s: unable to get IRQ %d (irqval=%d).\n", DRV_NAME,
1056 dev->irq, retval); 1056 dev->irq, retval);
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 05c91ee6921e..2ab90260d4a6 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2031,7 +2031,7 @@ static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
2031 } 2031 }
2032 2032
2033 /* Grab the IRQ */ 2033 /* Grab the IRQ */
2034 retval = request_irq(dev->irq, &smc_interrupt, irq_flags, dev->name, dev); 2034 retval = request_irq(dev->irq, smc_interrupt, irq_flags, dev->name, dev);
2035 if (retval) 2035 if (retval)
2036 goto err_out; 2036 goto err_out;
2037 2037
@@ -2365,9 +2365,10 @@ static int __devexit smc_drv_remove(struct platform_device *pdev)
2365 return 0; 2365 return 0;
2366} 2366}
2367 2367
2368static int smc_drv_suspend(struct platform_device *dev, pm_message_t state) 2368static int smc_drv_suspend(struct device *dev)
2369{ 2369{
2370 struct net_device *ndev = platform_get_drvdata(dev); 2370 struct platform_device *pdev = to_platform_device(dev);
2371 struct net_device *ndev = platform_get_drvdata(pdev);
2371 2372
2372 if (ndev) { 2373 if (ndev) {
2373 if (netif_running(ndev)) { 2374 if (netif_running(ndev)) {
@@ -2379,9 +2380,10 @@ static int smc_drv_suspend(struct platform_device *dev, pm_message_t state)
2379 return 0; 2380 return 0;
2380} 2381}
2381 2382
2382static int smc_drv_resume(struct platform_device *dev) 2383static int smc_drv_resume(struct device *dev)
2383{ 2384{
2384 struct net_device *ndev = platform_get_drvdata(dev); 2385 struct platform_device *pdev = to_platform_device(dev);
2386 struct net_device *ndev = platform_get_drvdata(pdev);
2385 2387
2386 if (ndev) { 2388 if (ndev) {
2387 struct smc_local *lp = netdev_priv(ndev); 2389 struct smc_local *lp = netdev_priv(ndev);
@@ -2397,14 +2399,18 @@ static int smc_drv_resume(struct platform_device *dev)
2397 return 0; 2399 return 0;
2398} 2400}
2399 2401
2402static struct dev_pm_ops smc_drv_pm_ops = {
2403 .suspend = smc_drv_suspend,
2404 .resume = smc_drv_resume,
2405};
2406
2400static struct platform_driver smc_driver = { 2407static struct platform_driver smc_driver = {
2401 .probe = smc_drv_probe, 2408 .probe = smc_drv_probe,
2402 .remove = __devexit_p(smc_drv_remove), 2409 .remove = __devexit_p(smc_drv_remove),
2403 .suspend = smc_drv_suspend,
2404 .resume = smc_drv_resume,
2405 .driver = { 2410 .driver = {
2406 .name = CARDNAME, 2411 .name = CARDNAME,
2407 .owner = THIS_MODULE, 2412 .owner = THIS_MODULE,
2413 .pm = &smc_drv_pm_ops,
2408 }, 2414 },
2409}; 2415};
2410 2416
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index f9cdcbcb77d4..7f01e60d5172 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2071,6 +2071,9 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2071 if (is_valid_ether_addr(dev->dev_addr)) { 2071 if (is_valid_ether_addr(dev->dev_addr)) {
2072 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2072 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2073 SMSC_TRACE(PROBE, "MAC Address is specified by configuration"); 2073 SMSC_TRACE(PROBE, "MAC Address is specified by configuration");
2074 } else if (is_valid_ether_addr(pdata->config.mac)) {
2075 memcpy(dev->dev_addr, pdata->config.mac, 6);
2076 SMSC_TRACE(PROBE, "MAC Address specified by platform data");
2074 } else { 2077 } else {
2075 /* Try reading mac address from device. if EEPROM is present 2078 /* Try reading mac address from device. if EEPROM is present
2076 * it will already have been set */ 2079 * it will already have been set */
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index b4909a2dec66..92e2bbe6b49b 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1161,7 +1161,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
1161 phydev->phy_id); 1161 phydev->phy_id);
1162 1162
1163 phydev = phy_connect(dev, dev_name(&phydev->dev), 1163 phydev = phy_connect(dev, dev_name(&phydev->dev),
1164 &smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1164 smsc9420_phy_adjust_link, 0, PHY_INTERFACE_MODE_MII);
1165 1165
1166 if (IS_ERR(phydev)) { 1166 if (IS_ERR(phydev)) {
1167 pr_err("%s: Could not attach to PHY\n", dev->name); 1167 pr_err("%s: Could not attach to PHY\n", dev->name);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 90e663f4515c..782910cf220f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -57,6 +57,7 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
57MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); 57MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59MODULE_VERSION(VERSION); 59MODULE_VERSION(VERSION);
60MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
60 61
61static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; 62static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
62static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; 63static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index a36e2b51e88c..aa10158adb9e 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -928,7 +928,7 @@ static int netdev_open(struct net_device *dev)
928 928
929 /* Do we ever need to reset the chip??? */ 929 /* Do we ever need to reset the chip??? */
930 930
931 retval = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev); 931 retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
932 if (retval) 932 if (retval)
933 return retval; 933 return retval;
934 934
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 9542995ba667..40813f0b5339 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1024,7 +1024,7 @@ static int stmmac_open(struct net_device *dev)
1024 } 1024 }
1025 1025
1026 /* Request the IRQ lines */ 1026 /* Request the IRQ lines */
1027 ret = request_irq(dev->irq, &stmmac_interrupt, 1027 ret = request_irq(dev->irq, stmmac_interrupt,
1028 IRQF_SHARED, dev->name, dev); 1028 IRQF_SHARED, dev->name, dev);
1029 if (unlikely(ret < 0)) { 1029 if (unlikely(ret < 0)) {
1030 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", 1030 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 2f1eaaf7a727..b447a8719427 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -191,7 +191,7 @@ static int sun3_82586_open(struct net_device *dev)
191 startrecv586(dev); 191 startrecv586(dev);
192 sun3_enaint(); 192 sun3_enaint();
193 193
194 ret = request_irq(dev->irq, &sun3_82586_interrupt,0,dev->name,dev); 194 ret = request_irq(dev->irq, sun3_82586_interrupt,0,dev->name,dev);
195 if (ret) 195 if (ret)
196 { 196 {
197 sun3_reset586(); 197 sun3_reset586();
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 536cf7e06bfd..25e81ebd9cd8 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -919,7 +919,7 @@ static int bigmac_open(struct net_device *dev)
919 struct bigmac *bp = netdev_priv(dev); 919 struct bigmac *bp = netdev_priv(dev);
920 int ret; 920 int ret;
921 921
922 ret = request_irq(dev->irq, &bigmac_interrupt, IRQF_SHARED, dev->name, bp); 922 ret = request_irq(dev->irq, bigmac_interrupt, IRQF_SHARED, dev->name, bp);
923 if (ret) { 923 if (ret) {
924 printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); 924 printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
925 return ret; 925 return ret;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index e13685a570f4..5c396c2e6e76 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -819,7 +819,7 @@ static int netdev_open(struct net_device *dev)
819 819
820 /* Do we need to reset the chip??? */ 820 /* Do we need to reset the chip??? */
821 821
822 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev); 822 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
823 if (i) 823 if (i)
824 return i; 824 return i;
825 825
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 61640b99b705..b571a1babab9 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1034,10 +1034,8 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1034 (csum_stuff_off << 21)); 1034 (csum_stuff_off << 21));
1035 } 1035 }
1036 1036
1037 local_irq_save(flags); 1037 if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
1038 if (!spin_trylock(&gp->tx_lock)) {
1039 /* Tell upper layer to requeue */ 1038 /* Tell upper layer to requeue */
1040 local_irq_restore(flags);
1041 return NETDEV_TX_LOCKED; 1039 return NETDEV_TX_LOCKED;
1042 } 1040 }
1043 /* We raced with gem_do_stop() */ 1041 /* We raced with gem_do_stop() */
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 37d721bbdb35..1f842a78acd1 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2184,7 +2184,7 @@ static int happy_meal_open(struct net_device *dev)
2184 * into a single source which we register handling at probe time. 2184 * into a single source which we register handling at probe time.
2185 */ 2185 */
2186 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { 2186 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2187 if (request_irq(dev->irq, &happy_meal_interrupt, 2187 if (request_irq(dev->irq, happy_meal_interrupt,
2188 IRQF_SHARED, dev->name, (void *)dev)) { 2188 IRQF_SHARED, dev->name, (void *)dev)) {
2189 HMD(("EAGAIN\n")); 2189 HMD(("EAGAIN\n"));
2190 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", 2190 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 9d6fd4760eab..64e7d08c878f 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -923,7 +923,7 @@ static int lance_open(struct net_device *dev)
923 923
924 STOP_LANCE(lp); 924 STOP_LANCE(lp);
925 925
926 if (request_irq(dev->irq, &lance_interrupt, IRQF_SHARED, 926 if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED,
927 lancestr, (void *) dev)) { 927 lancestr, (void *) dev)) {
928 printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq); 928 printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
929 return -EAGAIN; 929 return -EAGAIN;
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index dcefb608a9f4..45c383f285ee 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -807,7 +807,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child)
807 807
808 qec_init_once(qecp, op); 808 qec_init_once(qecp, op);
809 809
810 if (request_irq(op->irqs[0], &qec_interrupt, 810 if (request_irq(op->irqs[0], qec_interrupt,
811 IRQF_SHARED, "qec", (void *) qecp)) { 811 IRQF_SHARED, "qec", (void *) qecp)) {
812 printk(KERN_ERR "qec: Can't register irq.\n"); 812 printk(KERN_ERR "qec: Can't register irq.\n");
813 goto fail; 813 goto fail;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d1298e5b72c5..75a669d48e5e 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -22,11 +22,7 @@
22 * All Rights Reserved. 22 * All Rights Reserved.
23 */ 23 */
24 24
25#ifdef TC35815_NAPI 25#define DRV_VERSION "1.39"
26#define DRV_VERSION "1.38-NAPI"
27#else
28#define DRV_VERSION "1.38"
29#endif
30static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 26static const char *version = "tc35815.c:v" DRV_VERSION "\n";
31#define MODNAME "tc35815" 27#define MODNAME "tc35815"
32 28
@@ -54,13 +50,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
54#include <asm/io.h> 50#include <asm/io.h>
55#include <asm/byteorder.h> 51#include <asm/byteorder.h>
56 52
57/* First, a few definitions that the brave might change. */
58
59#define GATHER_TXINT /* On-Demand Tx Interrupt */
60#define WORKAROUND_LOSTCAR
61#define WORKAROUND_100HALF_PROMISC
62/* #define TC35815_USE_PACKEDBUFFER */
63
64enum tc35815_chiptype { 53enum tc35815_chiptype {
65 TC35815CF = 0, 54 TC35815CF = 0,
66 TC35815_NWU, 55 TC35815_NWU,
@@ -330,17 +319,10 @@ struct BDesc {
330 319
331 320
332/* Some useful constants. */ 321/* Some useful constants. */
333#undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */
334 322
335#ifdef NO_CHECK_CARRIER 323#define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \
336#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
337 Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
338 Tx_En) /* maybe 0x7b01 */
339#else
340#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
341 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ 324 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
342 Tx_En) /* maybe 0x7b01 */ 325 Tx_En) /* maybe 0x7b01 */
343#endif
344/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ 326/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
345#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ 327#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
346 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ 328 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
@@ -361,13 +343,6 @@ struct BDesc {
361#define TX_THRESHOLD_KEEP_LIMIT 10 343#define TX_THRESHOLD_KEEP_LIMIT 10
362 344
363/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 345/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
364#ifdef TC35815_USE_PACKEDBUFFER
365#define FD_PAGE_NUM 2
366#define RX_BUF_NUM 8 /* >= 2 */
367#define RX_FD_NUM 250 /* >= 32 */
368#define TX_FD_NUM 128
369#define RX_BUF_SIZE PAGE_SIZE
370#else /* TC35815_USE_PACKEDBUFFER */
371#define FD_PAGE_NUM 4 346#define FD_PAGE_NUM 4
372#define RX_BUF_NUM 128 /* < 256 */ 347#define RX_BUF_NUM 128 /* < 256 */
373#define RX_FD_NUM 256 /* >= 32 */ 348#define RX_FD_NUM 256 /* >= 32 */
@@ -381,7 +356,6 @@ struct BDesc {
381#define RX_BUF_SIZE \ 356#define RX_BUF_SIZE \
382 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) 357 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
383#endif 358#endif
384#endif /* TC35815_USE_PACKEDBUFFER */
385#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ 359#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
386#define NAPI_WEIGHT 16 360#define NAPI_WEIGHT 16
387 361
@@ -439,11 +413,7 @@ struct tc35815_local {
439 /* 413 /*
440 * Transmitting: Batch Mode. 414 * Transmitting: Batch Mode.
441 * 1 BD in 1 TxFD. 415 * 1 BD in 1 TxFD.
442 * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER) 416 * Receiving: Non-Packing Mode.
443 * 1 circular FD for Free Buffer List.
444 * RX_BUF_NUM BD in Free Buffer FD.
445 * One Free Buffer BD has PAGE_SIZE data buffer.
446 * Or Non-Packing Mode.
447 * 1 circular FD for Free Buffer List. 417 * 1 circular FD for Free Buffer List.
448 * RX_BUF_NUM BD in Free Buffer FD. 418 * RX_BUF_NUM BD in Free Buffer FD.
449 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 419 * One Free Buffer BD has ETH_FRAME_LEN data buffer.
@@ -457,21 +427,11 @@ struct tc35815_local {
457 struct RxFD *rfd_limit; 427 struct RxFD *rfd_limit;
458 struct RxFD *rfd_cur; 428 struct RxFD *rfd_cur;
459 struct FrFD *fbl_ptr; 429 struct FrFD *fbl_ptr;
460#ifdef TC35815_USE_PACKEDBUFFER
461 unsigned char fbl_curid;
462 void *data_buf[RX_BUF_NUM]; /* packing */
463 dma_addr_t data_buf_dma[RX_BUF_NUM];
464 struct {
465 struct sk_buff *skb;
466 dma_addr_t skb_dma;
467 } tx_skbs[TX_FD_NUM];
468#else
469 unsigned int fbl_count; 430 unsigned int fbl_count;
470 struct { 431 struct {
471 struct sk_buff *skb; 432 struct sk_buff *skb;
472 dma_addr_t skb_dma; 433 dma_addr_t skb_dma;
473 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 434 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
474#endif
475 u32 msg_enable; 435 u32 msg_enable;
476 enum tc35815_chiptype chiptype; 436 enum tc35815_chiptype chiptype;
477}; 437};
@@ -486,51 +446,6 @@ static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
486 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); 446 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
487} 447}
488#endif 448#endif
489#ifdef TC35815_USE_PACKEDBUFFER
490static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
491{
492 int i;
493 for (i = 0; i < RX_BUF_NUM; i++) {
494 if (bus >= lp->data_buf_dma[i] &&
495 bus < lp->data_buf_dma[i] + PAGE_SIZE)
496 return (void *)((u8 *)lp->data_buf[i] +
497 (bus - lp->data_buf_dma[i]));
498 }
499 return NULL;
500}
501
502#define TC35815_DMA_SYNC_ONDEMAND
503static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
504{
505#ifdef TC35815_DMA_SYNC_ONDEMAND
506 void *buf;
507 /* pci_map + pci_dma_sync will be more effective than
508 * pci_alloc_consistent on some archs. */
509 buf = (void *)__get_free_page(GFP_ATOMIC);
510 if (!buf)
511 return NULL;
512 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
513 PCI_DMA_FROMDEVICE);
514 if (pci_dma_mapping_error(hwdev, *dma_handle)) {
515 free_page((unsigned long)buf);
516 return NULL;
517 }
518 return buf;
519#else
520 return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
521#endif
522}
523
524static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
525{
526#ifdef TC35815_DMA_SYNC_ONDEMAND
527 pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
528 free_page((unsigned long)buf);
529#else
530 pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
531#endif
532}
533#else /* TC35815_USE_PACKEDBUFFER */
534static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, 449static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
535 struct pci_dev *hwdev, 450 struct pci_dev *hwdev,
536 dma_addr_t *dma_handle) 451 dma_addr_t *dma_handle)
@@ -555,19 +470,14 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_
555 PCI_DMA_FROMDEVICE); 470 PCI_DMA_FROMDEVICE);
556 dev_kfree_skb_any(skb); 471 dev_kfree_skb_any(skb);
557} 472}
558#endif /* TC35815_USE_PACKEDBUFFER */
559 473
560/* Index to functions, as function prototypes. */ 474/* Index to functions, as function prototypes. */
561 475
562static int tc35815_open(struct net_device *dev); 476static int tc35815_open(struct net_device *dev);
563static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); 477static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
564static irqreturn_t tc35815_interrupt(int irq, void *dev_id); 478static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
565#ifdef TC35815_NAPI
566static int tc35815_rx(struct net_device *dev, int limit); 479static int tc35815_rx(struct net_device *dev, int limit);
567static int tc35815_poll(struct napi_struct *napi, int budget); 480static int tc35815_poll(struct napi_struct *napi, int budget);
568#else
569static void tc35815_rx(struct net_device *dev);
570#endif
571static void tc35815_txdone(struct net_device *dev); 481static void tc35815_txdone(struct net_device *dev);
572static int tc35815_close(struct net_device *dev); 482static int tc35815_close(struct net_device *dev);
573static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 483static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
@@ -654,8 +564,6 @@ static void tc_handle_link_change(struct net_device *dev)
654 * TX4939 PCFG.SPEEDn bit will be changed on 564 * TX4939 PCFG.SPEEDn bit will be changed on
655 * NETDEV_CHANGE event. 565 * NETDEV_CHANGE event.
656 */ 566 */
657
658#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
659 /* 567 /*
660 * WORKAROUND: enable LostCrS only if half duplex 568 * WORKAROUND: enable LostCrS only if half duplex
661 * operation. 569 * operation.
@@ -665,7 +573,6 @@ static void tc_handle_link_change(struct net_device *dev)
665 lp->chiptype != TC35815_TX4939) 573 lp->chiptype != TC35815_TX4939)
666 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, 574 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
667 &tr->Tx_Ctl); 575 &tr->Tx_Ctl);
668#endif
669 576
670 lp->speed = phydev->speed; 577 lp->speed = phydev->speed;
671 lp->duplex = phydev->duplex; 578 lp->duplex = phydev->duplex;
@@ -674,11 +581,9 @@ static void tc_handle_link_change(struct net_device *dev)
674 581
675 if (phydev->link != lp->link) { 582 if (phydev->link != lp->link) {
676 if (phydev->link) { 583 if (phydev->link) {
677#ifdef WORKAROUND_100HALF_PROMISC
678 /* delayed promiscuous enabling */ 584 /* delayed promiscuous enabling */
679 if (dev->flags & IFF_PROMISC) 585 if (dev->flags & IFF_PROMISC)
680 tc35815_set_multicast_list(dev); 586 tc35815_set_multicast_list(dev);
681#endif
682 } else { 587 } else {
683 lp->speed = 0; 588 lp->speed = 0;
684 lp->duplex = -1; 589 lp->duplex = -1;
@@ -923,9 +828,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
923 dev->netdev_ops = &tc35815_netdev_ops; 828 dev->netdev_ops = &tc35815_netdev_ops;
924 dev->ethtool_ops = &tc35815_ethtool_ops; 829 dev->ethtool_ops = &tc35815_ethtool_ops;
925 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 830 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
926#ifdef TC35815_NAPI
927 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 831 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
928#endif
929 832
930 dev->irq = pdev->irq; 833 dev->irq = pdev->irq;
931 dev->base_addr = (unsigned long)ioaddr; 834 dev->base_addr = (unsigned long)ioaddr;
@@ -1007,25 +910,6 @@ tc35815_init_queues(struct net_device *dev)
1007 if (!lp->fd_buf) 910 if (!lp->fd_buf)
1008 return -ENOMEM; 911 return -ENOMEM;
1009 for (i = 0; i < RX_BUF_NUM; i++) { 912 for (i = 0; i < RX_BUF_NUM; i++) {
1010#ifdef TC35815_USE_PACKEDBUFFER
1011 lp->data_buf[i] =
1012 alloc_rxbuf_page(lp->pci_dev,
1013 &lp->data_buf_dma[i]);
1014 if (!lp->data_buf[i]) {
1015 while (--i >= 0) {
1016 free_rxbuf_page(lp->pci_dev,
1017 lp->data_buf[i],
1018 lp->data_buf_dma[i]);
1019 lp->data_buf[i] = NULL;
1020 }
1021 pci_free_consistent(lp->pci_dev,
1022 PAGE_SIZE * FD_PAGE_NUM,
1023 lp->fd_buf,
1024 lp->fd_buf_dma);
1025 lp->fd_buf = NULL;
1026 return -ENOMEM;
1027 }
1028#else
1029 lp->rx_skbs[i].skb = 913 lp->rx_skbs[i].skb =
1030 alloc_rxbuf_skb(dev, lp->pci_dev, 914 alloc_rxbuf_skb(dev, lp->pci_dev,
1031 &lp->rx_skbs[i].skb_dma); 915 &lp->rx_skbs[i].skb_dma);
@@ -1043,15 +927,9 @@ tc35815_init_queues(struct net_device *dev)
1043 lp->fd_buf = NULL; 927 lp->fd_buf = NULL;
1044 return -ENOMEM; 928 return -ENOMEM;
1045 } 929 }
1046#endif
1047 } 930 }
1048 printk(KERN_DEBUG "%s: FD buf %p DataBuf", 931 printk(KERN_DEBUG "%s: FD buf %p DataBuf",
1049 dev->name, lp->fd_buf); 932 dev->name, lp->fd_buf);
1050#ifdef TC35815_USE_PACKEDBUFFER
1051 printk(" DataBuf");
1052 for (i = 0; i < RX_BUF_NUM; i++)
1053 printk(" %p", lp->data_buf[i]);
1054#endif
1055 printk("\n"); 933 printk("\n");
1056 } else { 934 } else {
1057 for (i = 0; i < FD_PAGE_NUM; i++) 935 for (i = 0; i < FD_PAGE_NUM; i++)
@@ -1084,7 +962,6 @@ tc35815_init_queues(struct net_device *dev)
1084 lp->fbl_ptr = (struct FrFD *)fd_addr; 962 lp->fbl_ptr = (struct FrFD *)fd_addr;
1085 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); 963 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
1086 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); 964 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
1087#ifndef TC35815_USE_PACKEDBUFFER
1088 /* 965 /*
1089 * move all allocated skbs to head of rx_skbs[] array. 966 * move all allocated skbs to head of rx_skbs[] array.
1090 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in 967 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
@@ -1102,11 +979,7 @@ tc35815_init_queues(struct net_device *dev)
1102 lp->fbl_count++; 979 lp->fbl_count++;
1103 } 980 }
1104 } 981 }
1105#endif
1106 for (i = 0; i < RX_BUF_NUM; i++) { 982 for (i = 0; i < RX_BUF_NUM; i++) {
1107#ifdef TC35815_USE_PACKEDBUFFER
1108 lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
1109#else
1110 if (i >= lp->fbl_count) { 983 if (i >= lp->fbl_count) {
1111 lp->fbl_ptr->bd[i].BuffData = 0; 984 lp->fbl_ptr->bd[i].BuffData = 0;
1112 lp->fbl_ptr->bd[i].BDCtl = 0; 985 lp->fbl_ptr->bd[i].BDCtl = 0;
@@ -1114,15 +987,11 @@ tc35815_init_queues(struct net_device *dev)
1114 } 987 }
1115 lp->fbl_ptr->bd[i].BuffData = 988 lp->fbl_ptr->bd[i].BuffData =
1116 cpu_to_le32(lp->rx_skbs[i].skb_dma); 989 cpu_to_le32(lp->rx_skbs[i].skb_dma);
1117#endif
1118 /* BDID is index of FrFD.bd[] */ 990 /* BDID is index of FrFD.bd[] */
1119 lp->fbl_ptr->bd[i].BDCtl = 991 lp->fbl_ptr->bd[i].BDCtl =
1120 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | 992 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
1121 RX_BUF_SIZE); 993 RX_BUF_SIZE);
1122 } 994 }
1123#ifdef TC35815_USE_PACKEDBUFFER
1124 lp->fbl_curid = 0;
1125#endif
1126 995
1127 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", 996 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
1128 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); 997 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
@@ -1196,19 +1065,11 @@ tc35815_free_queues(struct net_device *dev)
1196 lp->fbl_ptr = NULL; 1065 lp->fbl_ptr = NULL;
1197 1066
1198 for (i = 0; i < RX_BUF_NUM; i++) { 1067 for (i = 0; i < RX_BUF_NUM; i++) {
1199#ifdef TC35815_USE_PACKEDBUFFER
1200 if (lp->data_buf[i]) {
1201 free_rxbuf_page(lp->pci_dev,
1202 lp->data_buf[i], lp->data_buf_dma[i]);
1203 lp->data_buf[i] = NULL;
1204 }
1205#else
1206 if (lp->rx_skbs[i].skb) { 1068 if (lp->rx_skbs[i].skb) {
1207 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, 1069 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1208 lp->rx_skbs[i].skb_dma); 1070 lp->rx_skbs[i].skb_dma);
1209 lp->rx_skbs[i].skb = NULL; 1071 lp->rx_skbs[i].skb = NULL;
1210 } 1072 }
1211#endif
1212 } 1073 }
1213 if (lp->fd_buf) { 1074 if (lp->fd_buf) {
1214 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, 1075 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
@@ -1254,7 +1115,7 @@ dump_rxfd(struct RxFD *fd)
1254 return bd_count; 1115 return bd_count;
1255} 1116}
1256 1117
1257#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER) 1118#ifdef DEBUG
1258static void 1119static void
1259dump_frfd(struct FrFD *fd) 1120dump_frfd(struct FrFD *fd)
1260{ 1121{
@@ -1271,9 +1132,7 @@ dump_frfd(struct FrFD *fd)
1271 le32_to_cpu(fd->bd[i].BDCtl)); 1132 le32_to_cpu(fd->bd[i].BDCtl));
1272 printk("\n"); 1133 printk("\n");
1273} 1134}
1274#endif
1275 1135
1276#ifdef DEBUG
1277static void 1136static void
1278panic_queues(struct net_device *dev) 1137panic_queues(struct net_device *dev)
1279{ 1138{
@@ -1389,7 +1248,7 @@ tc35815_open(struct net_device *dev)
1389 * This is used if the interrupt line can turned off (shared). 1248 * This is used if the interrupt line can turned off (shared).
1390 * See 3c503.c for an example of selecting the IRQ at config-time. 1249 * See 3c503.c for an example of selecting the IRQ at config-time.
1391 */ 1250 */
1392 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, 1251 if (request_irq(dev->irq, tc35815_interrupt, IRQF_SHARED,
1393 dev->name, dev)) 1252 dev->name, dev))
1394 return -EAGAIN; 1253 return -EAGAIN;
1395 1254
@@ -1400,9 +1259,7 @@ tc35815_open(struct net_device *dev)
1400 return -EAGAIN; 1259 return -EAGAIN;
1401 } 1260 }
1402 1261
1403#ifdef TC35815_NAPI
1404 napi_enable(&lp->napi); 1262 napi_enable(&lp->napi);
1405#endif
1406 1263
1407 /* Reset the hardware here. Don't forget to set the station address. */ 1264 /* Reset the hardware here. Don't forget to set the station address. */
1408 spin_lock_irq(&lp->lock); 1265 spin_lock_irq(&lp->lock);
@@ -1478,9 +1335,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1478 (struct tc35815_regs __iomem *)dev->base_addr; 1335 (struct tc35815_regs __iomem *)dev->base_addr;
1479 /* Start DMA Transmitter. */ 1336 /* Start DMA Transmitter. */
1480 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1337 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1481#ifdef GATHER_TXINT
1482 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1338 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1483#endif
1484 if (netif_msg_tx_queued(lp)) { 1339 if (netif_msg_tx_queued(lp)) {
1485 printk("%s: starting TxFD.\n", dev->name); 1340 printk("%s: starting TxFD.\n", dev->name);
1486 dump_txfd(txfd); 1341 dump_txfd(txfd);
@@ -1536,11 +1391,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1536 tc35815_schedule_restart(dev); 1391 tc35815_schedule_restart(dev);
1537} 1392}
1538 1393
1539#ifdef TC35815_NAPI
1540static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) 1394static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1541#else
1542static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1543#endif
1544{ 1395{
1545 struct tc35815_local *lp = netdev_priv(dev); 1396 struct tc35815_local *lp = netdev_priv(dev);
1546 int ret = -1; 1397 int ret = -1;
@@ -1579,12 +1430,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1579 /* normal notification */ 1430 /* normal notification */
1580 if (status & Int_IntMacRx) { 1431 if (status & Int_IntMacRx) {
1581 /* Got a packet(s). */ 1432 /* Got a packet(s). */
1582#ifdef TC35815_NAPI
1583 ret = tc35815_rx(dev, limit); 1433 ret = tc35815_rx(dev, limit);
1584#else
1585 tc35815_rx(dev);
1586 ret = 0;
1587#endif
1588 lp->lstats.rx_ints++; 1434 lp->lstats.rx_ints++;
1589 } 1435 }
1590 if (status & Int_IntMacTx) { 1436 if (status & Int_IntMacTx) {
@@ -1592,7 +1438,8 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1592 lp->lstats.tx_ints++; 1438 lp->lstats.tx_ints++;
1593 tc35815_txdone(dev); 1439 tc35815_txdone(dev);
1594 netif_wake_queue(dev); 1440 netif_wake_queue(dev);
1595 ret = 0; 1441 if (ret < 0)
1442 ret = 0;
1596 } 1443 }
1597 return ret; 1444 return ret;
1598} 1445}
@@ -1607,7 +1454,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1607 struct tc35815_local *lp = netdev_priv(dev); 1454 struct tc35815_local *lp = netdev_priv(dev);
1608 struct tc35815_regs __iomem *tr = 1455 struct tc35815_regs __iomem *tr =
1609 (struct tc35815_regs __iomem *)dev->base_addr; 1456 (struct tc35815_regs __iomem *)dev->base_addr;
1610#ifdef TC35815_NAPI
1611 u32 dmactl = tc_readl(&tr->DMA_Ctl); 1457 u32 dmactl = tc_readl(&tr->DMA_Ctl);
1612 1458
1613 if (!(dmactl & DMA_IntMask)) { 1459 if (!(dmactl & DMA_IntMask)) {
@@ -1624,22 +1470,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1624 return IRQ_HANDLED; 1470 return IRQ_HANDLED;
1625 } 1471 }
1626 return IRQ_NONE; 1472 return IRQ_NONE;
1627#else
1628 int handled;
1629 u32 status;
1630
1631 spin_lock(&lp->lock);
1632 status = tc_readl(&tr->Int_Src);
1633 /* BLEx, FDAEx will be cleared later */
1634 tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1635 &tr->Int_Src); /* write to clear */
1636 handled = tc35815_do_interrupt(dev, status);
1637 if (status & (Int_BLEx | Int_FDAEx))
1638 tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src);
1639 (void)tc_readl(&tr->Int_Src); /* flush */
1640 spin_unlock(&lp->lock);
1641 return IRQ_RETVAL(handled >= 0);
1642#endif /* TC35815_NAPI */
1643} 1473}
1644 1474
1645#ifdef CONFIG_NET_POLL_CONTROLLER 1475#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1652,20 +1482,13 @@ static void tc35815_poll_controller(struct net_device *dev)
1652#endif 1482#endif
1653 1483
1654/* We have a good packet(s), get it/them out of the buffers. */ 1484/* We have a good packet(s), get it/them out of the buffers. */
1655#ifdef TC35815_NAPI
1656static int 1485static int
1657tc35815_rx(struct net_device *dev, int limit) 1486tc35815_rx(struct net_device *dev, int limit)
1658#else
1659static void
1660tc35815_rx(struct net_device *dev)
1661#endif
1662{ 1487{
1663 struct tc35815_local *lp = netdev_priv(dev); 1488 struct tc35815_local *lp = netdev_priv(dev);
1664 unsigned int fdctl; 1489 unsigned int fdctl;
1665 int i; 1490 int i;
1666#ifdef TC35815_NAPI
1667 int received = 0; 1491 int received = 0;
1668#endif
1669 1492
1670 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { 1493 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1671 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); 1494 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
@@ -1684,52 +1507,9 @@ tc35815_rx(struct net_device *dev)
1684 struct sk_buff *skb; 1507 struct sk_buff *skb;
1685 unsigned char *data; 1508 unsigned char *data;
1686 int cur_bd; 1509 int cur_bd;
1687#ifdef TC35815_USE_PACKEDBUFFER
1688 int offset;
1689#endif
1690 1510
1691#ifdef TC35815_NAPI
1692 if (--limit < 0) 1511 if (--limit < 0)
1693 break; 1512 break;
1694#endif
1695#ifdef TC35815_USE_PACKEDBUFFER
1696 BUG_ON(bd_count > 2);
1697 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1698 if (skb == NULL) {
1699 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1700 dev->name);
1701 dev->stats.rx_dropped++;
1702 break;
1703 }
1704 skb_reserve(skb, NET_IP_ALIGN);
1705
1706 data = skb_put(skb, pkt_len);
1707
1708 /* copy from receive buffer */
1709 cur_bd = 0;
1710 offset = 0;
1711 while (offset < pkt_len && cur_bd < bd_count) {
1712 int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
1713 BD_BuffLength_MASK;
1714 dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
1715 void *rxbuf = rxbuf_bus_to_virt(lp, dma);
1716 if (offset + len > pkt_len)
1717 len = pkt_len - offset;
1718#ifdef TC35815_DMA_SYNC_ONDEMAND
1719 pci_dma_sync_single_for_cpu(lp->pci_dev,
1720 dma, len,
1721 PCI_DMA_FROMDEVICE);
1722#endif
1723 memcpy(data + offset, rxbuf, len);
1724#ifdef TC35815_DMA_SYNC_ONDEMAND
1725 pci_dma_sync_single_for_device(lp->pci_dev,
1726 dma, len,
1727 PCI_DMA_FROMDEVICE);
1728#endif
1729 offset += len;
1730 cur_bd++;
1731 }
1732#else /* TC35815_USE_PACKEDBUFFER */
1733 BUG_ON(bd_count > 1); 1513 BUG_ON(bd_count > 1);
1734 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) 1514 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1735 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1515 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
@@ -1757,16 +1537,11 @@ tc35815_rx(struct net_device *dev)
1757 memmove(skb->data, skb->data - NET_IP_ALIGN, 1537 memmove(skb->data, skb->data - NET_IP_ALIGN,
1758 pkt_len); 1538 pkt_len);
1759 data = skb_put(skb, pkt_len); 1539 data = skb_put(skb, pkt_len);
1760#endif /* TC35815_USE_PACKEDBUFFER */
1761 if (netif_msg_pktdata(lp)) 1540 if (netif_msg_pktdata(lp))
1762 print_eth(data); 1541 print_eth(data);
1763 skb->protocol = eth_type_trans(skb, dev); 1542 skb->protocol = eth_type_trans(skb, dev);
1764#ifdef TC35815_NAPI
1765 netif_receive_skb(skb); 1543 netif_receive_skb(skb);
1766 received++; 1544 received++;
1767#else
1768 netif_rx(skb);
1769#endif
1770 dev->stats.rx_packets++; 1545 dev->stats.rx_packets++;
1771 dev->stats.rx_bytes += pkt_len; 1546 dev->stats.rx_bytes += pkt_len;
1772 } else { 1547 } else {
@@ -1803,19 +1578,11 @@ tc35815_rx(struct net_device *dev)
1803 BUG_ON(id >= RX_BUF_NUM); 1578 BUG_ON(id >= RX_BUF_NUM);
1804#endif 1579#endif
1805 /* free old buffers */ 1580 /* free old buffers */
1806#ifdef TC35815_USE_PACKEDBUFFER
1807 while (lp->fbl_curid != id)
1808#else
1809 lp->fbl_count--; 1581 lp->fbl_count--;
1810 while (lp->fbl_count < RX_BUF_NUM) 1582 while (lp->fbl_count < RX_BUF_NUM)
1811#endif
1812 { 1583 {
1813#ifdef TC35815_USE_PACKEDBUFFER
1814 unsigned char curid = lp->fbl_curid;
1815#else
1816 unsigned char curid = 1584 unsigned char curid =
1817 (id + 1 + lp->fbl_count) % RX_BUF_NUM; 1585 (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1818#endif
1819 struct BDesc *bd = &lp->fbl_ptr->bd[curid]; 1586 struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1820#ifdef DEBUG 1587#ifdef DEBUG
1821 bdctl = le32_to_cpu(bd->BDCtl); 1588 bdctl = le32_to_cpu(bd->BDCtl);
@@ -1826,7 +1593,6 @@ tc35815_rx(struct net_device *dev)
1826 } 1593 }
1827#endif 1594#endif
1828 /* pass BD to controller */ 1595 /* pass BD to controller */
1829#ifndef TC35815_USE_PACKEDBUFFER
1830 if (!lp->rx_skbs[curid].skb) { 1596 if (!lp->rx_skbs[curid].skb) {
1831 lp->rx_skbs[curid].skb = 1597 lp->rx_skbs[curid].skb =
1832 alloc_rxbuf_skb(dev, 1598 alloc_rxbuf_skb(dev,
@@ -1836,21 +1602,11 @@ tc35815_rx(struct net_device *dev)
1836 break; /* try on next reception */ 1602 break; /* try on next reception */
1837 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); 1603 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1838 } 1604 }
1839#endif /* TC35815_USE_PACKEDBUFFER */
1840 /* Note: BDLength was modified by chip. */ 1605 /* Note: BDLength was modified by chip. */
1841 bd->BDCtl = cpu_to_le32(BD_CownsBD | 1606 bd->BDCtl = cpu_to_le32(BD_CownsBD |
1842 (curid << BD_RxBDID_SHIFT) | 1607 (curid << BD_RxBDID_SHIFT) |
1843 RX_BUF_SIZE); 1608 RX_BUF_SIZE);
1844#ifdef TC35815_USE_PACKEDBUFFER
1845 lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
1846 if (netif_msg_rx_status(lp)) {
1847 printk("%s: Entering new FBD %d\n",
1848 dev->name, lp->fbl_curid);
1849 dump_frfd(lp->fbl_ptr);
1850 }
1851#else
1852 lp->fbl_count++; 1609 lp->fbl_count++;
1853#endif
1854 } 1610 }
1855 } 1611 }
1856 1612
@@ -1882,12 +1638,9 @@ tc35815_rx(struct net_device *dev)
1882#endif 1638#endif
1883 } 1639 }
1884 1640
1885#ifdef TC35815_NAPI
1886 return received; 1641 return received;
1887#endif
1888} 1642}
1889 1643
1890#ifdef TC35815_NAPI
1891static int tc35815_poll(struct napi_struct *napi, int budget) 1644static int tc35815_poll(struct napi_struct *napi, int budget)
1892{ 1645{
1893 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); 1646 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
@@ -1924,13 +1677,8 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1924 } 1677 }
1925 return received; 1678 return received;
1926} 1679}
1927#endif
1928 1680
1929#ifdef NO_CHECK_CARRIER
1930#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1931#else
1932#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) 1681#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1933#endif
1934 1682
1935static void 1683static void
1936tc35815_check_tx_stat(struct net_device *dev, int status) 1684tc35815_check_tx_stat(struct net_device *dev, int status)
@@ -1944,16 +1692,12 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1944 if (status & Tx_TxColl_MASK) 1692 if (status & Tx_TxColl_MASK)
1945 dev->stats.collisions += status & Tx_TxColl_MASK; 1693 dev->stats.collisions += status & Tx_TxColl_MASK;
1946 1694
1947#ifndef NO_CHECK_CARRIER
1948 /* TX4939 does not have NCarr */ 1695 /* TX4939 does not have NCarr */
1949 if (lp->chiptype == TC35815_TX4939) 1696 if (lp->chiptype == TC35815_TX4939)
1950 status &= ~Tx_NCarr; 1697 status &= ~Tx_NCarr;
1951#ifdef WORKAROUND_LOSTCAR
1952 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1698 /* WORKAROUND: ignore LostCrS in full duplex operation */
1953 if (!lp->link || lp->duplex == DUPLEX_FULL) 1699 if (!lp->link || lp->duplex == DUPLEX_FULL)
1954 status &= ~Tx_NCarr; 1700 status &= ~Tx_NCarr;
1955#endif
1956#endif
1957 1701
1958 if (!(status & TX_STA_ERR)) { 1702 if (!(status & TX_STA_ERR)) {
1959 /* no error. */ 1703 /* no error. */
@@ -1983,12 +1727,10 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1983 dev->stats.tx_fifo_errors++; 1727 dev->stats.tx_fifo_errors++;
1984 msg = "Excessive Deferral."; 1728 msg = "Excessive Deferral.";
1985 } 1729 }
1986#ifndef NO_CHECK_CARRIER
1987 if (status & Tx_NCarr) { 1730 if (status & Tx_NCarr) {
1988 dev->stats.tx_carrier_errors++; 1731 dev->stats.tx_carrier_errors++;
1989 msg = "Lost Carrier Sense."; 1732 msg = "Lost Carrier Sense.";
1990 } 1733 }
1991#endif
1992 if (status & Tx_LateColl) { 1734 if (status & Tx_LateColl) {
1993 dev->stats.tx_aborted_errors++; 1735 dev->stats.tx_aborted_errors++;
1994 msg = "Late Collision."; 1736 msg = "Late Collision.";
@@ -2044,11 +1786,7 @@ tc35815_txdone(struct net_device *dev)
2044 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); 1786 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
2045 lp->tx_skbs[lp->tfd_end].skb = NULL; 1787 lp->tx_skbs[lp->tfd_end].skb = NULL;
2046 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 1788 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
2047#ifdef TC35815_NAPI
2048 dev_kfree_skb_any(skb); 1789 dev_kfree_skb_any(skb);
2049#else
2050 dev_kfree_skb_irq(skb);
2051#endif
2052 } 1790 }
2053 txfd->fd.FDSystem = cpu_to_le32(0xffffffff); 1791 txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
2054 1792
@@ -2083,9 +1821,7 @@ tc35815_txdone(struct net_device *dev)
2083 1821
2084 /* start DMA Transmitter again */ 1822 /* start DMA Transmitter again */
2085 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1823 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
2086#ifdef GATHER_TXINT
2087 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1824 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
2088#endif
2089 if (netif_msg_tx_queued(lp)) { 1825 if (netif_msg_tx_queued(lp)) {
2090 printk("%s: start TxFD on queue.\n", 1826 printk("%s: start TxFD on queue.\n",
2091 dev->name); 1827 dev->name);
@@ -2112,9 +1848,7 @@ tc35815_close(struct net_device *dev)
2112 struct tc35815_local *lp = netdev_priv(dev); 1848 struct tc35815_local *lp = netdev_priv(dev);
2113 1849
2114 netif_stop_queue(dev); 1850 netif_stop_queue(dev);
2115#ifdef TC35815_NAPI
2116 napi_disable(&lp->napi); 1851 napi_disable(&lp->napi);
2117#endif
2118 if (lp->phy_dev) 1852 if (lp->phy_dev)
2119 phy_stop(lp->phy_dev); 1853 phy_stop(lp->phy_dev);
2120 cancel_work_sync(&lp->restart_work); 1854 cancel_work_sync(&lp->restart_work);
@@ -2198,14 +1932,12 @@ tc35815_set_multicast_list(struct net_device *dev)
2198 (struct tc35815_regs __iomem *)dev->base_addr; 1932 (struct tc35815_regs __iomem *)dev->base_addr;
2199 1933
2200 if (dev->flags & IFF_PROMISC) { 1934 if (dev->flags & IFF_PROMISC) {
2201#ifdef WORKAROUND_100HALF_PROMISC
2202 /* With some (all?) 100MHalf HUB, controller will hang 1935 /* With some (all?) 100MHalf HUB, controller will hang
2203 * if we enabled promiscuous mode before linkup... */ 1936 * if we enabled promiscuous mode before linkup... */
2204 struct tc35815_local *lp = netdev_priv(dev); 1937 struct tc35815_local *lp = netdev_priv(dev);
2205 1938
2206 if (!lp->link) 1939 if (!lp->link)
2207 return; 1940 return;
2208#endif
2209 /* Enable promiscuous mode */ 1941 /* Enable promiscuous mode */
2210 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 1942 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2211 } else if ((dev->flags & IFF_ALLMULTI) || 1943 } else if ((dev->flags & IFF_ALLMULTI) ||
@@ -2392,9 +2124,6 @@ static void tc35815_chip_init(struct net_device *dev)
2392 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); 2124 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2393 else 2125 else
2394 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); 2126 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2395#ifdef TC35815_USE_PACKEDBUFFER
2396 tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize); /* Packing */
2397#endif
2398 tc_writel(0, &tr->TxPollCtr); /* Batch mode */ 2127 tc_writel(0, &tr->TxPollCtr); /* Batch mode */
2399 tc_writel(TX_THRESHOLD, &tr->TxThrsh); 2128 tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2400 tc_writel(INT_EN_CMD, &tr->Int_En); 2129 tc_writel(INT_EN_CMD, &tr->Int_En);
@@ -2412,19 +2141,12 @@ static void tc35815_chip_init(struct net_device *dev)
2412 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ 2141 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
2413 2142
2414 /* start MAC transmitter */ 2143 /* start MAC transmitter */
2415#ifndef NO_CHECK_CARRIER
2416 /* TX4939 does not have EnLCarr */ 2144 /* TX4939 does not have EnLCarr */
2417 if (lp->chiptype == TC35815_TX4939) 2145 if (lp->chiptype == TC35815_TX4939)
2418 txctl &= ~Tx_EnLCarr; 2146 txctl &= ~Tx_EnLCarr;
2419#ifdef WORKAROUND_LOSTCAR
2420 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2147 /* WORKAROUND: ignore LostCrS in full duplex operation */
2421 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) 2148 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2422 txctl &= ~Tx_EnLCarr; 2149 txctl &= ~Tx_EnLCarr;
2423#endif
2424#endif /* !NO_CHECK_CARRIER */
2425#ifdef GATHER_TXINT
2426 txctl &= ~Tx_EnComp; /* disable global tx completion int. */
2427#endif
2428 tc_writel(txctl, &tr->Tx_Ctl); 2150 tc_writel(txctl, &tr->Tx_Ctl);
2429} 2151}
2430 2152
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index ec9dfb251f30..2fbac31767fa 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -420,7 +420,7 @@ static int bdx_hw_start(struct bdx_priv *priv)
420 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB); 420 GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
421 421
422#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED) 422#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
423 if ((rc = request_irq(priv->pdev->irq, &bdx_isr_napi, BDX_IRQ_TYPE, 423 if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
424 ndev->name, ndev))) 424 ndev->name, ndev)))
425 goto err_irq; 425 goto err_irq;
426 bdx_enable_interrupts(priv); 426 bdx_enable_interrupts(priv);
@@ -1878,7 +1878,7 @@ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1878 udelay(50); /* give hw a chance to clean fifo */ 1878 udelay(50); /* give hw a chance to clean fifo */
1879 continue; 1879 continue;
1880 } 1880 }
1881 avail = MIN(avail, size); 1881 avail = min(avail, size);
1882 DBG("about to push %d bytes starting %p size %d\n", avail, 1882 DBG("about to push %d bytes starting %p size %d\n", avail,
1883 data, size); 1883 data, size);
1884 bdx_tx_push_desc(priv, data, avail); 1884 bdx_tx_push_desc(priv, data, avail);
@@ -2105,12 +2105,6 @@ err_pci:
2105} 2105}
2106 2106
2107/****************** Ethtool interface *********************/ 2107/****************** Ethtool interface *********************/
2108/* get strings for tests */
2109static const char
2110 bdx_test_names[][ETH_GSTRING_LEN] = {
2111 "No tests defined"
2112};
2113
2114/* get strings for statistics counters */ 2108/* get strings for statistics counters */
2115static const char 2109static const char
2116 bdx_stat_names[][ETH_GSTRING_LEN] = { 2110 bdx_stat_names[][ETH_GSTRING_LEN] = {
@@ -2380,9 +2374,6 @@ bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2380static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2374static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2381{ 2375{
2382 switch (stringset) { 2376 switch (stringset) {
2383 case ETH_SS_TEST:
2384 memcpy(data, *bdx_test_names, sizeof(bdx_test_names));
2385 break;
2386 case ETH_SS_STATS: 2377 case ETH_SS_STATS:
2387 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names)); 2378 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2388 break; 2379 break;
@@ -2390,15 +2381,21 @@ static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2390} 2381}
2391 2382
2392/* 2383/*
2393 * bdx_get_stats_count - return number of 64bit statistics counters 2384 * bdx_get_sset_count - return number of statistics or tests
2394 * @netdev 2385 * @netdev
2395 */ 2386 */
2396static int bdx_get_stats_count(struct net_device *netdev) 2387static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2397{ 2388{
2398 struct bdx_priv *priv = netdev_priv(netdev); 2389 struct bdx_priv *priv = netdev_priv(netdev);
2399 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names) 2390
2400 != sizeof(struct bdx_stats) / sizeof(u64)); 2391 switch (stringset) {
2401 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); 2392 case ETH_SS_STATS:
2393 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2394 != sizeof(struct bdx_stats) / sizeof(u64));
2395 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2396 default:
2397 return -EINVAL;
2398 }
2402} 2399}
2403 2400
2404/* 2401/*
@@ -2441,7 +2438,7 @@ static void bdx_ethtool_ops(struct net_device *netdev)
2441 .get_sg = ethtool_op_get_sg, 2438 .get_sg = ethtool_op_get_sg,
2442 .get_tso = ethtool_op_get_tso, 2439 .get_tso = ethtool_op_get_tso,
2443 .get_strings = bdx_get_strings, 2440 .get_strings = bdx_get_strings,
2444 .get_stats_count = bdx_get_stats_count, 2441 .get_sset_count = bdx_get_sset_count,
2445 .get_ethtool_stats = bdx_get_ethtool_stats, 2442 .get_ethtool_stats = bdx_get_ethtool_stats,
2446 }; 2443 };
2447 2444
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 4fc875e5dcdd..124141909e42 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -76,8 +76,6 @@
76#define FIFO_SIZE 4096 76#define FIFO_SIZE 4096
77#define FIFO_EXTRA_SPACE 1024 77#define FIFO_EXTRA_SPACE 1024
78 78
79#define MIN(x, y) ((x) < (y) ? (x) : (y))
80
81#if BITS_PER_LONG == 64 79#if BITS_PER_LONG == 64
82# define H32_64(x) (u32) ((u64)(x) >> 32) 80# define H32_64(x) (u32) ((u64)(x) >> 32)
83# define L32_64(x) (u32) ((u64)(x) & 0xffffffff) 81# define L32_64(x) (u32) ((u64)(x) & 0xffffffff)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ba5d3fe753b6..6e6db955b4a9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.102" 71#define DRV_MODULE_VERSION "3.104"
72#define DRV_MODULE_RELDATE "September 1, 2009" 72#define DRV_MODULE_RELDATE "November 13, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -137,6 +137,12 @@
137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 139
140#define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142
143#define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145
140/* minimum number of free TX descriptors required to wake up TX process */ 146/* minimum number of free TX descriptors required to wake up TX process */
141#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 147#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
142 148
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 TG3_64BIT_REG_LOW, val); 405 TG3_64BIT_REG_LOW, val);
397 return; 406 return;
398 } 407 }
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { 408 if (off == TG3_RX_STD_PROD_IDX_REG) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val); 410 TG3_64BIT_REG_LOW, val);
402 return; 411 return;
@@ -937,9 +946,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
937 u32 val; 946 u32 val;
938 struct phy_device *phydev; 947 struct phy_device *phydev;
939 948
940 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 950 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610: 951 case TG3_PHY_ID_BCM50610:
952 case TG3_PHY_ID_BCM50610M:
943 val = MAC_PHYCFG2_50610_LED_MODES; 953 val = MAC_PHYCFG2_50610_LED_MODES;
944 break; 954 break;
945 case TG3_PHY_ID_BCMAC131: 955 case TG3_PHY_ID_BCMAC131:
@@ -1031,7 +1041,7 @@ static void tg3_mdio_start(struct tg3 *tp)
1031 if (is_serdes) 1041 if (is_serdes)
1032 tp->phy_addr += 7; 1042 tp->phy_addr += 7;
1033 } else 1043 } else
1034 tp->phy_addr = PHY_ADDR; 1044 tp->phy_addr = TG3_PHY_MII_ADDR;
1035 1045
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && 1046 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -1062,7 +1072,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1062 tp->mdio_bus->read = &tg3_mdio_read; 1072 tp->mdio_bus->read = &tg3_mdio_read;
1063 tp->mdio_bus->write = &tg3_mdio_write; 1073 tp->mdio_bus->write = &tg3_mdio_write;
1064 tp->mdio_bus->reset = &tg3_mdio_reset; 1074 tp->mdio_bus->reset = &tg3_mdio_reset;
1065 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); 1075 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1066 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1076 tp->mdio_bus->irq = &tp->mdio_irq[0];
1067 1077
1068 for (i = 0; i < PHY_MAX_ADDR; i++) 1078 for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1084,7 +1094,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1084 return i; 1094 return i;
1085 } 1095 }
1086 1096
1087 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1097 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1088 1098
1089 if (!phydev || !phydev->drv) { 1099 if (!phydev || !phydev->drv) {
1090 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); 1100 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
@@ -1096,8 +1106,14 @@ static int tg3_mdio_init(struct tg3 *tp)
1096 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1106 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1097 case TG3_PHY_ID_BCM57780: 1107 case TG3_PHY_ID_BCM57780:
1098 phydev->interface = PHY_INTERFACE_MODE_GMII; 1108 phydev->interface = PHY_INTERFACE_MODE_GMII;
1109 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1099 break; 1110 break;
1100 case TG3_PHY_ID_BCM50610: 1111 case TG3_PHY_ID_BCM50610:
1112 case TG3_PHY_ID_BCM50610M:
1113 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1114 PHY_BRCM_RX_REFCLK_UNUSED |
1115 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1116 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1101 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) 1117 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1102 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1118 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1103 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1119 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
@@ -1111,6 +1127,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1111 case TG3_PHY_ID_RTL8201E: 1127 case TG3_PHY_ID_RTL8201E:
1112 case TG3_PHY_ID_BCMAC131: 1128 case TG3_PHY_ID_BCMAC131:
1113 phydev->interface = PHY_INTERFACE_MODE_MII; 1129 phydev->interface = PHY_INTERFACE_MODE_MII;
1130 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1131 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1115 break; 1132 break;
1116 } 1133 }
@@ -1311,7 +1328,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1311 u32 old_tx_mode = tp->tx_mode; 1328 u32 old_tx_mode = tp->tx_mode;
1312 1329
1313 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 1330 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1314 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; 1331 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1315 else 1332 else
1316 autoneg = tp->link_config.autoneg; 1333 autoneg = tp->link_config.autoneg;
1317 1334
@@ -1348,7 +1365,7 @@ static void tg3_adjust_link(struct net_device *dev)
1348 u8 oldflowctrl, linkmesg = 0; 1365 u8 oldflowctrl, linkmesg = 0;
1349 u32 mac_mode, lcl_adv, rmt_adv; 1366 u32 mac_mode, lcl_adv, rmt_adv;
1350 struct tg3 *tp = netdev_priv(dev); 1367 struct tg3 *tp = netdev_priv(dev);
1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1368 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1352 1369
1353 spin_lock_bh(&tp->lock); 1370 spin_lock_bh(&tp->lock);
1354 1371
@@ -1363,8 +1380,11 @@ static void tg3_adjust_link(struct net_device *dev)
1363 1380
1364 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 1381 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1365 mac_mode |= MAC_MODE_PORT_MODE_MII; 1382 mac_mode |= MAC_MODE_PORT_MODE_MII;
1366 else 1383 else if (phydev->speed == SPEED_1000 ||
1384 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1367 mac_mode |= MAC_MODE_PORT_MODE_GMII; 1385 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1386 else
1387 mac_mode |= MAC_MODE_PORT_MODE_MII;
1368 1388
1369 if (phydev->duplex == DUPLEX_HALF) 1389 if (phydev->duplex == DUPLEX_HALF)
1370 mac_mode |= MAC_MODE_HALF_DUPLEX; 1390 mac_mode |= MAC_MODE_HALF_DUPLEX;
@@ -1434,7 +1454,7 @@ static int tg3_phy_init(struct tg3 *tp)
1434 /* Bring the PHY back to a known state. */ 1454 /* Bring the PHY back to a known state. */
1435 tg3_bmcr_reset(tp); 1455 tg3_bmcr_reset(tp);
1436 1456
1437 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1457 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1438 1458
1439 /* Attach the MAC to the PHY. */ 1459 /* Attach the MAC to the PHY. */
1440 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1460 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
@@ -1461,7 +1481,7 @@ static int tg3_phy_init(struct tg3 *tp)
1461 SUPPORTED_Asym_Pause); 1481 SUPPORTED_Asym_Pause);
1462 break; 1482 break;
1463 default: 1483 default:
1464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1484 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1465 return -EINVAL; 1485 return -EINVAL;
1466 } 1486 }
1467 1487
@@ -1479,7 +1499,7 @@ static void tg3_phy_start(struct tg3 *tp)
1479 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1480 return; 1500 return;
1481 1501
1482 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1502 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1483 1503
1484 if (tp->link_config.phy_is_low_power) { 1504 if (tp->link_config.phy_is_low_power) {
1485 tp->link_config.phy_is_low_power = 0; 1505 tp->link_config.phy_is_low_power = 0;
@@ -1499,13 +1519,13 @@ static void tg3_phy_stop(struct tg3 *tp)
1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1519 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1500 return; 1520 return;
1501 1521
1502 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); 1522 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1503} 1523}
1504 1524
1505static void tg3_phy_fini(struct tg3 *tp) 1525static void tg3_phy_fini(struct tg3 *tp)
1506{ 1526{
1507 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 1527 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1508 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1528 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1509 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; 1529 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1510 } 1530 }
1511} 1531}
@@ -2149,6 +2169,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2149 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2169 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2150 udelay(40); 2170 udelay(40);
2151 return; 2171 return;
2172 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2173 u32 phytest;
2174 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2175 u32 phy;
2176
2177 tg3_writephy(tp, MII_ADVERTISE, 0);
2178 tg3_writephy(tp, MII_BMCR,
2179 BMCR_ANENABLE | BMCR_ANRESTART);
2180
2181 tg3_writephy(tp, MII_TG3_FET_TEST,
2182 phytest | MII_TG3_FET_SHADOW_EN);
2183 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2184 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2185 tg3_writephy(tp,
2186 MII_TG3_FET_SHDW_AUXMODE4,
2187 phy);
2188 }
2189 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2190 }
2191 return;
2152 } else if (do_low_power) { 2192 } else if (do_low_power) {
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2193 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2194 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
@@ -2218,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
2218static void tg3_enable_nvram_access(struct tg3 *tp) 2258static void tg3_enable_nvram_access(struct tg3 *tp)
2219{ 2259{
2220 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2260 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2221 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2261 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2222 u32 nvaccess = tr32(NVRAM_ACCESS); 2262 u32 nvaccess = tr32(NVRAM_ACCESS);
2223 2263
2224 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2264 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2229,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
2229static void tg3_disable_nvram_access(struct tg3 *tp) 2269static void tg3_disable_nvram_access(struct tg3 *tp)
2230{ 2270{
2231 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2271 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2232 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2272 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2233 u32 nvaccess = tr32(NVRAM_ACCESS); 2273 u32 nvaccess = tr32(NVRAM_ACCESS);
2234 2274
2235 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2275 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -2474,7 +2514,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2474 struct phy_device *phydev; 2514 struct phy_device *phydev;
2475 u32 phyid, advertising; 2515 u32 phyid, advertising;
2476 2516
2477 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 2517 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2478 2518
2479 tp->link_config.phy_is_low_power = 1; 2519 tp->link_config.phy_is_low_power = 1;
2480 2520
@@ -3243,15 +3283,6 @@ relink:
3243 pci_write_config_word(tp->pdev, 3283 pci_write_config_word(tp->pdev,
3244 tp->pcie_cap + PCI_EXP_LNKCTL, 3284 tp->pcie_cap + PCI_EXP_LNKCTL,
3245 newlnkctl); 3285 newlnkctl);
3246 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3247 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3248 if (tp->link_config.active_speed == SPEED_100 ||
3249 tp->link_config.active_speed == SPEED_10)
3250 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3251 else
3252 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3253 if (newreg != oldreg)
3254 tw32(TG3_PCIE_LNKCTL, newreg);
3255 } 3286 }
3256 3287
3257 if (current_link_up != netif_carrier_ok(tp->dev)) { 3288 if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4375,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
4375 } 4406 }
4376} 4407}
4377 4408
4409static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4410{
4411 if (!ri->skb)
4412 return;
4413
4414 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4415 map_sz, PCI_DMA_FROMDEVICE);
4416 dev_kfree_skb_any(ri->skb);
4417 ri->skb = NULL;
4418}
4419
4378/* Returns size of skb allocated or < 0 on error. 4420/* Returns size of skb allocated or < 0 on error.
4379 * 4421 *
4380 * We only need to fill in the address because the other members 4422 * We only need to fill in the address because the other members
@@ -4386,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
4386 * buffers the cpu only reads the last cacheline of the RX descriptor 4428 * buffers the cpu only reads the last cacheline of the RX descriptor
4387 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4429 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4388 */ 4430 */
4389static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, 4431static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4390 int src_idx, u32 dest_idx_unmasked) 4432 u32 opaque_key, u32 dest_idx_unmasked)
4391{ 4433{
4392 struct tg3 *tp = tnapi->tp;
4393 struct tg3_rx_buffer_desc *desc; 4434 struct tg3_rx_buffer_desc *desc;
4394 struct ring_info *map, *src_map; 4435 struct ring_info *map, *src_map;
4395 struct sk_buff *skb; 4436 struct sk_buff *skb;
4396 dma_addr_t mapping; 4437 dma_addr_t mapping;
4397 int skb_size, dest_idx; 4438 int skb_size, dest_idx;
4398 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4399 4439
4400 src_map = NULL; 4440 src_map = NULL;
4401 switch (opaque_key) { 4441 switch (opaque_key) {
@@ -4403,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4403 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4443 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4404 desc = &tpr->rx_std[dest_idx]; 4444 desc = &tpr->rx_std[dest_idx];
4405 map = &tpr->rx_std_buffers[dest_idx]; 4445 map = &tpr->rx_std_buffers[dest_idx];
4406 if (src_idx >= 0)
4407 src_map = &tpr->rx_std_buffers[src_idx];
4408 skb_size = tp->rx_pkt_map_sz; 4446 skb_size = tp->rx_pkt_map_sz;
4409 break; 4447 break;
4410 4448
@@ -4412,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4412 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4450 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4413 desc = &tpr->rx_jmb[dest_idx].std; 4451 desc = &tpr->rx_jmb[dest_idx].std;
4414 map = &tpr->rx_jmb_buffers[dest_idx]; 4452 map = &tpr->rx_jmb_buffers[dest_idx];
4415 if (src_idx >= 0)
4416 src_map = &tpr->rx_jmb_buffers[src_idx];
4417 skb_size = TG3_RX_JMB_MAP_SZ; 4453 skb_size = TG3_RX_JMB_MAP_SZ;
4418 break; 4454 break;
4419 4455
@@ -4435,13 +4471,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4435 4471
4436 mapping = pci_map_single(tp->pdev, skb->data, skb_size, 4472 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4437 PCI_DMA_FROMDEVICE); 4473 PCI_DMA_FROMDEVICE);
4474 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4475 dev_kfree_skb(skb);
4476 return -EIO;
4477 }
4438 4478
4439 map->skb = skb; 4479 map->skb = skb;
4440 pci_unmap_addr_set(map, mapping, mapping); 4480 pci_unmap_addr_set(map, mapping, mapping);
4441 4481
4442 if (src_map != NULL)
4443 src_map->skb = NULL;
4444
4445 desc->addr_hi = ((u64)mapping >> 32); 4482 desc->addr_hi = ((u64)mapping >> 32);
4446 desc->addr_lo = ((u64)mapping & 0xffffffff); 4483 desc->addr_lo = ((u64)mapping & 0xffffffff);
4447 4484
@@ -4452,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4452 * members of the RX descriptor are invariant. See notes above 4489 * members of the RX descriptor are invariant. See notes above
4453 * tg3_alloc_rx_skb for full details. 4490 * tg3_alloc_rx_skb for full details.
4454 */ 4491 */
4455static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, 4492static void tg3_recycle_rx(struct tg3_napi *tnapi,
4456 int src_idx, u32 dest_idx_unmasked) 4493 struct tg3_rx_prodring_set *dpr,
4494 u32 opaque_key, int src_idx,
4495 u32 dest_idx_unmasked)
4457{ 4496{
4458 struct tg3 *tp = tnapi->tp; 4497 struct tg3 *tp = tnapi->tp;
4459 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4498 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4460 struct ring_info *src_map, *dest_map; 4499 struct ring_info *src_map, *dest_map;
4461 int dest_idx; 4500 int dest_idx;
4462 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4501 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4463 4502
4464 switch (opaque_key) { 4503 switch (opaque_key) {
4465 case RXD_OPAQUE_RING_STD: 4504 case RXD_OPAQUE_RING_STD:
4466 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4505 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4467 dest_desc = &tpr->rx_std[dest_idx]; 4506 dest_desc = &dpr->rx_std[dest_idx];
4468 dest_map = &tpr->rx_std_buffers[dest_idx]; 4507 dest_map = &dpr->rx_std_buffers[dest_idx];
4469 src_desc = &tpr->rx_std[src_idx]; 4508 src_desc = &spr->rx_std[src_idx];
4470 src_map = &tpr->rx_std_buffers[src_idx]; 4509 src_map = &spr->rx_std_buffers[src_idx];
4471 break; 4510 break;
4472 4511
4473 case RXD_OPAQUE_RING_JUMBO: 4512 case RXD_OPAQUE_RING_JUMBO:
4474 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4475 dest_desc = &tpr->rx_jmb[dest_idx].std; 4514 dest_desc = &dpr->rx_jmb[dest_idx].std;
4476 dest_map = &tpr->rx_jmb_buffers[dest_idx]; 4515 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4477 src_desc = &tpr->rx_jmb[src_idx].std; 4516 src_desc = &spr->rx_jmb[src_idx].std;
4478 src_map = &tpr->rx_jmb_buffers[src_idx]; 4517 src_map = &spr->rx_jmb_buffers[src_idx];
4479 break; 4518 break;
4480 4519
4481 default: 4520 default:
@@ -4487,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4487 pci_unmap_addr(src_map, mapping)); 4526 pci_unmap_addr(src_map, mapping));
4488 dest_desc->addr_hi = src_desc->addr_hi; 4527 dest_desc->addr_hi = src_desc->addr_hi;
4489 dest_desc->addr_lo = src_desc->addr_lo; 4528 dest_desc->addr_lo = src_desc->addr_lo;
4490
4491 src_map->skb = NULL; 4529 src_map->skb = NULL;
4492} 4530}
4493 4531
@@ -4519,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4519{ 4557{
4520 struct tg3 *tp = tnapi->tp; 4558 struct tg3 *tp = tnapi->tp;
4521 u32 work_mask, rx_std_posted = 0; 4559 u32 work_mask, rx_std_posted = 0;
4560 u32 std_prod_idx, jmb_prod_idx;
4522 u32 sw_idx = tnapi->rx_rcb_ptr; 4561 u32 sw_idx = tnapi->rx_rcb_ptr;
4523 u16 hw_idx; 4562 u16 hw_idx;
4524 int received; 4563 int received;
4525 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4564 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4526 4565
4527 hw_idx = *(tnapi->rx_rcb_prod_idx); 4566 hw_idx = *(tnapi->rx_rcb_prod_idx);
4528 /* 4567 /*
@@ -4532,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4532 rmb(); 4571 rmb();
4533 work_mask = 0; 4572 work_mask = 0;
4534 received = 0; 4573 received = 0;
4574 std_prod_idx = tpr->rx_std_prod_idx;
4575 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4535 while (sw_idx != hw_idx && budget > 0) { 4576 while (sw_idx != hw_idx && budget > 0) {
4577 struct ring_info *ri;
4536 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 4578 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4537 unsigned int len; 4579 unsigned int len;
4538 struct sk_buff *skb; 4580 struct sk_buff *skb;
@@ -4542,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4542 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4584 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4543 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4585 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4544 if (opaque_key == RXD_OPAQUE_RING_STD) { 4586 if (opaque_key == RXD_OPAQUE_RING_STD) {
4545 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; 4587 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4546 dma_addr = pci_unmap_addr(ri, mapping); 4588 dma_addr = pci_unmap_addr(ri, mapping);
4547 skb = ri->skb; 4589 skb = ri->skb;
4548 post_ptr = &tpr->rx_std_ptr; 4590 post_ptr = &std_prod_idx;
4549 rx_std_posted++; 4591 rx_std_posted++;
4550 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4592 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4551 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; 4593 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4552 dma_addr = pci_unmap_addr(ri, mapping); 4594 dma_addr = pci_unmap_addr(ri, mapping);
4553 skb = ri->skb; 4595 skb = ri->skb;
4554 post_ptr = &tpr->rx_jmb_ptr; 4596 post_ptr = &jmb_prod_idx;
4555 } else 4597 } else
4556 goto next_pkt_nopost; 4598 goto next_pkt_nopost;
4557 4599
@@ -4560,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4560 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4602 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4561 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4603 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4562 drop_it: 4604 drop_it:
4563 tg3_recycle_rx(tnapi, opaque_key, 4605 tg3_recycle_rx(tnapi, tpr, opaque_key,
4564 desc_idx, *post_ptr); 4606 desc_idx, *post_ptr);
4565 drop_it_no_recycle: 4607 drop_it_no_recycle:
4566 /* Other statistics kept track of by card. */ 4608 /* Other statistics kept track of by card. */
@@ -4580,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4580 ) { 4622 ) {
4581 int skb_size; 4623 int skb_size;
4582 4624
4583 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, 4625 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4584 desc_idx, *post_ptr); 4626 *post_ptr);
4585 if (skb_size < 0) 4627 if (skb_size < 0)
4586 goto drop_it; 4628 goto drop_it;
4587 4629
4630 ri->skb = NULL;
4631
4588 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4632 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4589 PCI_DMA_FROMDEVICE); 4633 PCI_DMA_FROMDEVICE);
4590 4634
@@ -4592,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4592 } else { 4636 } else {
4593 struct sk_buff *copy_skb; 4637 struct sk_buff *copy_skb;
4594 4638
4595 tg3_recycle_rx(tnapi, opaque_key, 4639 tg3_recycle_rx(tnapi, tpr, opaque_key,
4596 desc_idx, *post_ptr); 4640 desc_idx, *post_ptr);
4597 4641
4598 copy_skb = netdev_alloc_skb(tp->dev, 4642 copy_skb = netdev_alloc_skb(tp->dev,
@@ -4643,9 +4687,7 @@ next_pkt:
4643 4687
4644 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4688 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4645 u32 idx = *post_ptr % TG3_RX_RING_SIZE; 4689 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4646 4690 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4647 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4648 TG3_64BIT_REG_LOW, idx);
4649 work_mask &= ~RXD_OPAQUE_RING_STD; 4691 work_mask &= ~RXD_OPAQUE_RING_STD;
4650 rx_std_posted = 0; 4692 rx_std_posted = 0;
4651 } 4693 }
@@ -4665,33 +4707,45 @@ next_pkt_nopost:
4665 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4707 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4666 4708
4667 /* Refill RX ring(s). */ 4709 /* Refill RX ring(s). */
4668 if (work_mask & RXD_OPAQUE_RING_STD) { 4710 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4669 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; 4711 if (work_mask & RXD_OPAQUE_RING_STD) {
4670 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4712 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4671 sw_idx); 4713 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4672 } 4714 tpr->rx_std_prod_idx);
4673 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4715 }
4674 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; 4716 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4675 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4717 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4676 sw_idx); 4718 TG3_RX_JUMBO_RING_SIZE;
4719 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4720 tpr->rx_jmb_prod_idx);
4721 }
4722 mmiowb();
4723 } else if (work_mask) {
4724 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4725 * updated before the producer indices can be updated.
4726 */
4727 smp_wmb();
4728
4729 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4730 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4731
4732 napi_schedule(&tp->napi[1].napi);
4677 } 4733 }
4678 mmiowb();
4679 4734
4680 return received; 4735 return received;
4681} 4736}
4682 4737
4683static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4738static void tg3_poll_link(struct tg3 *tp)
4684{ 4739{
4685 struct tg3 *tp = tnapi->tp;
4686 struct tg3_hw_status *sblk = tnapi->hw_status;
4687
4688 /* handle link change and other phy events */ 4740 /* handle link change and other phy events */
4689 if (!(tp->tg3_flags & 4741 if (!(tp->tg3_flags &
4690 (TG3_FLAG_USE_LINKCHG_REG | 4742 (TG3_FLAG_USE_LINKCHG_REG |
4691 TG3_FLAG_POLL_SERDES))) { 4743 TG3_FLAG_POLL_SERDES))) {
4744 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4745
4692 if (sblk->status & SD_STATUS_LINK_CHG) { 4746 if (sblk->status & SD_STATUS_LINK_CHG) {
4693 sblk->status = SD_STATUS_UPDATED | 4747 sblk->status = SD_STATUS_UPDATED |
4694 (sblk->status & ~SD_STATUS_LINK_CHG); 4748 (sblk->status & ~SD_STATUS_LINK_CHG);
4695 spin_lock(&tp->lock); 4749 spin_lock(&tp->lock);
4696 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 4750 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4697 tw32_f(MAC_STATUS, 4751 tw32_f(MAC_STATUS,
@@ -4705,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4705 spin_unlock(&tp->lock); 4759 spin_unlock(&tp->lock);
4706 } 4760 }
4707 } 4761 }
4762}
4763
4764static void tg3_rx_prodring_xfer(struct tg3 *tp,
4765 struct tg3_rx_prodring_set *dpr,
4766 struct tg3_rx_prodring_set *spr)
4767{
4768 u32 si, di, cpycnt, src_prod_idx;
4769 int i;
4770
4771 while (1) {
4772 src_prod_idx = spr->rx_std_prod_idx;
4773
4774 /* Make sure updates to the rx_std_buffers[] entries and the
4775 * standard producer index are seen in the correct order.
4776 */
4777 smp_rmb();
4778
4779 if (spr->rx_std_cons_idx == src_prod_idx)
4780 break;
4781
4782 if (spr->rx_std_cons_idx < src_prod_idx)
4783 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4784 else
4785 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4786
4787 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4788
4789 si = spr->rx_std_cons_idx;
4790 di = dpr->rx_std_prod_idx;
4791
4792 memcpy(&dpr->rx_std_buffers[di],
4793 &spr->rx_std_buffers[si],
4794 cpycnt * sizeof(struct ring_info));
4795
4796 for (i = 0; i < cpycnt; i++, di++, si++) {
4797 struct tg3_rx_buffer_desc *sbd, *dbd;
4798 sbd = &spr->rx_std[si];
4799 dbd = &dpr->rx_std[di];
4800 dbd->addr_hi = sbd->addr_hi;
4801 dbd->addr_lo = sbd->addr_lo;
4802 }
4803
4804 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4805 TG3_RX_RING_SIZE;
4806 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4807 TG3_RX_RING_SIZE;
4808 }
4809
4810 while (1) {
4811 src_prod_idx = spr->rx_jmb_prod_idx;
4812
4813 /* Make sure updates to the rx_jmb_buffers[] entries and
4814 * the jumbo producer index are seen in the correct order.
4815 */
4816 smp_rmb();
4817
4818 if (spr->rx_jmb_cons_idx == src_prod_idx)
4819 break;
4820
4821 if (spr->rx_jmb_cons_idx < src_prod_idx)
4822 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4823 else
4824 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4825
4826 cpycnt = min(cpycnt,
4827 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4828
4829 si = spr->rx_jmb_cons_idx;
4830 di = dpr->rx_jmb_prod_idx;
4831
4832 memcpy(&dpr->rx_jmb_buffers[di],
4833 &spr->rx_jmb_buffers[si],
4834 cpycnt * sizeof(struct ring_info));
4835
4836 for (i = 0; i < cpycnt; i++, di++, si++) {
4837 struct tg3_rx_buffer_desc *sbd, *dbd;
4838 sbd = &spr->rx_jmb[si].std;
4839 dbd = &dpr->rx_jmb[di].std;
4840 dbd->addr_hi = sbd->addr_hi;
4841 dbd->addr_lo = sbd->addr_lo;
4842 }
4843
4844 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4845 TG3_RX_JUMBO_RING_SIZE;
4846 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4847 TG3_RX_JUMBO_RING_SIZE;
4848 }
4849}
4850
4851static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4852{
4853 struct tg3 *tp = tnapi->tp;
4708 4854
4709 /* run TX completion thread */ 4855 /* run TX completion thread */
4710 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 4856 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4720,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4720 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4866 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4721 work_done += tg3_rx(tnapi, budget - work_done); 4867 work_done += tg3_rx(tnapi, budget - work_done);
4722 4868
4869 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4870 int i;
4871 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4872 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4873
4874 for (i = 2; i < tp->irq_cnt; i++)
4875 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4876 tp->napi[i].prodring);
4877
4878 wmb();
4879
4880 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4881 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4882 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4883 }
4884
4885 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4886 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4887 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4888 }
4889
4890 mmiowb();
4891 }
4892
4893 return work_done;
4894}
4895
4896static int tg3_poll_msix(struct napi_struct *napi, int budget)
4897{
4898 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4899 struct tg3 *tp = tnapi->tp;
4900 int work_done = 0;
4901 struct tg3_hw_status *sblk = tnapi->hw_status;
4902
4903 while (1) {
4904 work_done = tg3_poll_work(tnapi, work_done, budget);
4905
4906 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4907 goto tx_recovery;
4908
4909 if (unlikely(work_done >= budget))
4910 break;
4911
4912 /* tp->last_tag is used in tg3_restart_ints() below
4913 * to tell the hw how much work has been processed,
4914 * so we must read it before checking for more work.
4915 */
4916 tnapi->last_tag = sblk->status_tag;
4917 tnapi->last_irq_tag = tnapi->last_tag;
4918 rmb();
4919
4920 /* check for RX/TX work to do */
4921 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4922 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4923 napi_complete(napi);
4924 /* Reenable interrupts. */
4925 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4926 mmiowb();
4927 break;
4928 }
4929 }
4930
4931 return work_done;
4932
4933tx_recovery:
4934 /* work_done is guaranteed to be less than budget. */
4935 napi_complete(napi);
4936 schedule_work(&tp->reset_task);
4723 return work_done; 4937 return work_done;
4724} 4938}
4725 4939
@@ -4731,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4731 struct tg3_hw_status *sblk = tnapi->hw_status; 4945 struct tg3_hw_status *sblk = tnapi->hw_status;
4732 4946
4733 while (1) { 4947 while (1) {
4948 tg3_poll_link(tp);
4949
4734 work_done = tg3_poll_work(tnapi, work_done, budget); 4950 work_done = tg3_poll_work(tnapi, work_done, budget);
4735 4951
4736 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4952 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5093,11 +5309,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5093static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5309static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5094 5310
5095/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5311/* Workaround 4GB and 40-bit hardware DMA bugs. */
5096static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5312static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5097 u32 last_plus_one, u32 *start, 5313 struct sk_buff *skb, u32 last_plus_one,
5098 u32 base_flags, u32 mss) 5314 u32 *start, u32 base_flags, u32 mss)
5099{ 5315{
5100 struct tg3_napi *tnapi = &tp->napi[0]; 5316 struct tg3 *tp = tnapi->tp;
5101 struct sk_buff *new_skb; 5317 struct sk_buff *new_skb;
5102 dma_addr_t new_addr = 0; 5318 dma_addr_t new_addr = 0;
5103 u32 entry = *start; 5319 u32 entry = *start;
@@ -5124,7 +5340,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5124 /* Make sure new skb does not cross any 4G boundaries. 5340 /* Make sure new skb does not cross any 4G boundaries.
5125 * Drop the packet if it does. 5341 * Drop the packet if it does.
5126 */ 5342 */
5127 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { 5343 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5344 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5128 if (!ret) 5345 if (!ret)
5129 skb_dma_unmap(&tp->pdev->dev, new_skb, 5346 skb_dma_unmap(&tp->pdev->dev, new_skb,
5130 DMA_TO_DEVICE); 5347 DMA_TO_DEVICE);
@@ -5179,7 +5396,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5179} 5396}
5180 5397
5181/* hard_start_xmit for devices that don't have any bugs and 5398/* hard_start_xmit for devices that don't have any bugs and
5182 * support TG3_FLG2_HW_TSO_2 only. 5399 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5183 */ 5400 */
5184static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5401static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5185 struct net_device *dev) 5402 struct net_device *dev)
@@ -5238,7 +5455,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5238 hdrlen = ip_tcp_len + tcp_opt_len; 5455 hdrlen = ip_tcp_len + tcp_opt_len;
5239 } 5456 }
5240 5457
5241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 5458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5242 mss |= (hdrlen & 0xc) << 12; 5459 mss |= (hdrlen & 0xc) << 12;
5243 if (hdrlen & 0x10) 5460 if (hdrlen & 0x10)
5244 base_flags |= 0x00000010; 5461 base_flags |= 0x00000010;
@@ -5365,9 +5582,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5365 struct skb_shared_info *sp; 5582 struct skb_shared_info *sp;
5366 int would_hit_hwbug; 5583 int would_hit_hwbug;
5367 dma_addr_t mapping; 5584 dma_addr_t mapping;
5368 struct tg3_napi *tnapi = &tp->napi[0]; 5585 struct tg3_napi *tnapi;
5586 struct netdev_queue *txq;
5369 5587
5370 len = skb_headlen(skb); 5588 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5589 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5590 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5591 tnapi++;
5371 5592
5372 /* We are running in BH disabled context with netif_tx_lock 5593 /* We are running in BH disabled context with netif_tx_lock
5373 * and TX reclaim runs via tp->napi.poll inside of a software 5594 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5375,8 +5596,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5375 * no IRQ context deadlocks to worry about either. Rejoice! 5596 * no IRQ context deadlocks to worry about either. Rejoice!
5376 */ 5597 */
5377 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5598 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5378 if (!netif_queue_stopped(dev)) { 5599 if (!netif_tx_queue_stopped(txq)) {
5379 netif_stop_queue(dev); 5600 netif_tx_stop_queue(txq);
5380 5601
5381 /* This is a hard error, log it. */ 5602 /* This is a hard error, log it. */
5382 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5603 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5389,10 +5610,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5389 base_flags = 0; 5610 base_flags = 0;
5390 if (skb->ip_summed == CHECKSUM_PARTIAL) 5611 if (skb->ip_summed == CHECKSUM_PARTIAL)
5391 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5612 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5392 mss = 0; 5613
5393 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5614 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5394 struct iphdr *iph; 5615 struct iphdr *iph;
5395 int tcp_opt_len, ip_tcp_len, hdr_len; 5616 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5396 5617
5397 if (skb_header_cloned(skb) && 5618 if (skb_header_cloned(skb) &&
5398 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5619 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5644,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5423 IPPROTO_TCP, 5644 IPPROTO_TCP,
5424 0); 5645 0);
5425 5646
5426 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 5647 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5427 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { 5648 mss |= (hdr_len & 0xc) << 12;
5649 if (hdr_len & 0x10)
5650 base_flags |= 0x00000010;
5651 base_flags |= (hdr_len & 0x3e0) << 5;
5652 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5653 mss |= hdr_len << 9;
5654 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5428 if (tcp_opt_len || iph->ihl > 5) { 5656 if (tcp_opt_len || iph->ihl > 5) {
5429 int tsflags; 5657 int tsflags;
5430 5658
@@ -5446,6 +5674,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5446 (vlan_tx_tag_get(skb) << 16)); 5674 (vlan_tx_tag_get(skb) << 16));
5447#endif 5675#endif
5448 5676
5677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5678 !mss && skb->len > ETH_DATA_LEN)
5679 base_flags |= TXD_FLAG_JMB_PKT;
5680
5449 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5681 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5450 dev_kfree_skb(skb); 5682 dev_kfree_skb(skb);
5451 goto out_unlock; 5683 goto out_unlock;
@@ -5459,9 +5691,20 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5459 5691
5460 would_hit_hwbug = 0; 5692 would_hit_hwbug = 0;
5461 5693
5462 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) 5694 len = skb_headlen(skb);
5695
5696 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5463 would_hit_hwbug = 1; 5697 would_hit_hwbug = 1;
5464 else if (tg3_4g_overflow_test(mapping, len)) 5698
5699 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5700 tg3_4g_overflow_test(mapping, len))
5701 would_hit_hwbug = 1;
5702
5703 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5704 tg3_40bit_overflow_test(tp, mapping, len))
5705 would_hit_hwbug = 1;
5706
5707 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5465 would_hit_hwbug = 1; 5708 would_hit_hwbug = 1;
5466 5709
5467 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5710 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5482,10 +5725,16 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5482 5725
5483 tnapi->tx_buffers[entry].skb = NULL; 5726 tnapi->tx_buffers[entry].skb = NULL;
5484 5727
5485 if (tg3_4g_overflow_test(mapping, len)) 5728 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5729 len <= 8)
5486 would_hit_hwbug = 1; 5730 would_hit_hwbug = 1;
5487 5731
5488 if (tg3_40bit_overflow_test(tp, mapping, len)) 5732 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5733 tg3_4g_overflow_test(mapping, len))
5734 would_hit_hwbug = 1;
5735
5736 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5737 tg3_40bit_overflow_test(tp, mapping, len))
5489 would_hit_hwbug = 1; 5738 would_hit_hwbug = 1;
5490 5739
5491 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5740 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
@@ -5509,7 +5758,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5509 /* If the workaround fails due to memory/mapping 5758 /* If the workaround fails due to memory/mapping
5510 * failure, silently drop this packet. 5759 * failure, silently drop this packet.
5511 */ 5760 */
5512 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, 5761 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5513 &start, base_flags, mss)) 5762 &start, base_flags, mss))
5514 goto out_unlock; 5763 goto out_unlock;
5515 5764
@@ -5517,13 +5766,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5517 } 5766 }
5518 5767
5519 /* Packets are ready, update Tx producer idx local and on card. */ 5768 /* Packets are ready, update Tx producer idx local and on card. */
5520 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); 5769 tw32_tx_mbox(tnapi->prodmbox, entry);
5521 5770
5522 tnapi->tx_prod = entry; 5771 tnapi->tx_prod = entry;
5523 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5772 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5524 netif_stop_queue(dev); 5773 netif_tx_stop_queue(txq);
5525 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5774 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5526 netif_wake_queue(tp->dev); 5775 netif_tx_wake_queue(txq);
5527 } 5776 }
5528 5777
5529out_unlock: 5778out_unlock:
@@ -5594,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
5594 struct tg3_rx_prodring_set *tpr) 5843 struct tg3_rx_prodring_set *tpr)
5595{ 5844{
5596 int i; 5845 int i;
5597 struct ring_info *rxp;
5598
5599 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5600 rxp = &tpr->rx_std_buffers[i];
5601 5846
5602 if (rxp->skb == NULL) 5847 if (tpr != &tp->prodring[0]) {
5603 continue; 5848 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5849 i = (i + 1) % TG3_RX_RING_SIZE)
5850 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5851 tp->rx_pkt_map_sz);
5852
5853 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5854 for (i = tpr->rx_jmb_cons_idx;
5855 i != tpr->rx_jmb_prod_idx;
5856 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5857 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5858 TG3_RX_JMB_MAP_SZ);
5859 }
5860 }
5604 5861
5605 pci_unmap_single(tp->pdev, 5862 return;
5606 pci_unmap_addr(rxp, mapping),
5607 tp->rx_pkt_map_sz,
5608 PCI_DMA_FROMDEVICE);
5609 dev_kfree_skb_any(rxp->skb);
5610 rxp->skb = NULL;
5611 } 5863 }
5612 5864
5613 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5865 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5614 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5866 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5615 rxp = &tpr->rx_jmb_buffers[i]; 5867 tp->rx_pkt_map_sz);
5616 5868
5617 if (rxp->skb == NULL) 5869 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5618 continue; 5870 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5619 5871 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5620 pci_unmap_single(tp->pdev, 5872 TG3_RX_JMB_MAP_SZ);
5621 pci_unmap_addr(rxp, mapping),
5622 TG3_RX_JMB_MAP_SZ,
5623 PCI_DMA_FROMDEVICE);
5624 dev_kfree_skb_any(rxp->skb);
5625 rxp->skb = NULL;
5626 }
5627 } 5873 }
5628} 5874}
5629 5875
@@ -5638,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5638 struct tg3_rx_prodring_set *tpr) 5884 struct tg3_rx_prodring_set *tpr)
5639{ 5885{
5640 u32 i, rx_pkt_dma_sz; 5886 u32 i, rx_pkt_dma_sz;
5641 struct tg3_napi *tnapi = &tp->napi[0]; 5887
5888 tpr->rx_std_cons_idx = 0;
5889 tpr->rx_std_prod_idx = 0;
5890 tpr->rx_jmb_cons_idx = 0;
5891 tpr->rx_jmb_prod_idx = 0;
5892
5893 if (tpr != &tp->prodring[0]) {
5894 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5895 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5896 memset(&tpr->rx_jmb_buffers[0], 0,
5897 TG3_RX_JMB_BUFF_RING_SIZE);
5898 goto done;
5899 }
5642 5900
5643 /* Zero out all descriptors. */ 5901 /* Zero out all descriptors. */
5644 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 5902 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5665,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5665 5923
5666 /* Now allocate fresh SKBs for each rx ring. */ 5924 /* Now allocate fresh SKBs for each rx ring. */
5667 for (i = 0; i < tp->rx_pending; i++) { 5925 for (i = 0; i < tp->rx_pending; i++) {
5668 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { 5926 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5669 printk(KERN_WARNING PFX 5927 printk(KERN_WARNING PFX
5670 "%s: Using a smaller RX standard ring, " 5928 "%s: Using a smaller RX standard ring, "
5671 "only %d out of %d buffers were allocated " 5929 "only %d out of %d buffers were allocated "
@@ -5696,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5696 } 5954 }
5697 5955
5698 for (i = 0; i < tp->rx_jumbo_pending; i++) { 5956 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5699 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, 5957 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
5700 -1, i) < 0) { 5958 i) < 0) {
5701 printk(KERN_WARNING PFX 5959 printk(KERN_WARNING PFX
5702 "%s: Using a smaller RX jumbo ring, " 5960 "%s: Using a smaller RX jumbo ring, "
5703 "only %d out of %d buffers were " 5961 "only %d out of %d buffers were "
@@ -5741,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
5741static int tg3_rx_prodring_init(struct tg3 *tp, 5999static int tg3_rx_prodring_init(struct tg3 *tp,
5742 struct tg3_rx_prodring_set *tpr) 6000 struct tg3_rx_prodring_set *tpr)
5743{ 6001{
5744 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * 6002 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
5745 TG3_RX_RING_SIZE, GFP_KERNEL);
5746 if (!tpr->rx_std_buffers) 6003 if (!tpr->rx_std_buffers)
5747 return -ENOMEM; 6004 return -ENOMEM;
5748 6005
@@ -5752,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
5752 goto err_out; 6009 goto err_out;
5753 6010
5754 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6011 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5755 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * 6012 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
5756 TG3_RX_JUMBO_RING_SIZE,
5757 GFP_KERNEL); 6013 GFP_KERNEL);
5758 if (!tpr->rx_jmb_buffers) 6014 if (!tpr->rx_jmb_buffers)
5759 goto err_out; 6015 goto err_out;
@@ -5809,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp)
5809 6065
5810 dev_kfree_skb_any(skb); 6066 dev_kfree_skb_any(skb);
5811 } 6067 }
5812 }
5813 6068
5814 tg3_rx_prodring_free(tp, &tp->prodring[0]); 6069 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6070 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6071 }
5815} 6072}
5816 6073
5817/* Initialize tx/rx rings for packet processing. 6074/* Initialize tx/rx rings for packet processing.
@@ -5845,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp)
5845 tnapi->rx_rcb_ptr = 0; 6102 tnapi->rx_rcb_ptr = 0;
5846 if (tnapi->rx_rcb) 6103 if (tnapi->rx_rcb)
5847 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6104 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6105
6106 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6107 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6108 return -ENOMEM;
5848 } 6109 }
5849 6110
5850 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); 6111 return 0;
5851} 6112}
5852 6113
5853/* 6114/*
@@ -5891,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp)
5891 tp->hw_stats = NULL; 6152 tp->hw_stats = NULL;
5892 } 6153 }
5893 6154
5894 tg3_rx_prodring_fini(tp, &tp->prodring[0]); 6155 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6156 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
5895} 6157}
5896 6158
5897/* 6159/*
@@ -5902,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5902{ 6164{
5903 int i; 6165 int i;
5904 6166
5905 if (tg3_rx_prodring_init(tp, &tp->prodring[0])) 6167 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
5906 return -ENOMEM; 6168 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6169 goto err_out;
6170 }
5907 6171
5908 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6172 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5909 sizeof(struct tg3_hw_stats), 6173 sizeof(struct tg3_hw_stats),
@@ -5947,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5947 break; 6211 break;
5948 } 6212 }
5949 6213
6214 if (tp->irq_cnt == 1)
6215 tnapi->prodring = &tp->prodring[0];
6216 else if (i)
6217 tnapi->prodring = &tp->prodring[i - 1];
6218
5950 /* 6219 /*
5951 * If multivector RSS is enabled, vector 0 does not handle 6220 * If multivector RSS is enabled, vector 0 does not handle
5952 * rx or tx interrupts. Don't allocate any resources for it. 6221 * rx or tx interrupts. Don't allocate any resources for it.
@@ -6580,6 +6849,30 @@ static int tg3_chip_reset(struct tg3 *tp)
6580 6849
6581 tg3_mdio_start(tp); 6850 tg3_mdio_start(tp);
6582 6851
6852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6853 u8 phy_addr;
6854
6855 phy_addr = tp->phy_addr;
6856 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6857
6858 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6859 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6860 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6861 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6862 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6863 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6864 udelay(10);
6865
6866 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6867 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6868 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6869 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6870 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6871 udelay(10);
6872
6873 tp->phy_addr = phy_addr;
6874 }
6875
6583 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 6876 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6584 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 6877 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6585 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 6878 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
@@ -7162,15 +7455,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7162 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 7455 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7163 7456
7164 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 7457 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7165 }
7166 7458
7167 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { 7459 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7168 val = tr32(TG3_PCIE_LNKCTL); 7460 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7169 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7170 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7171 else
7172 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7173 tw32(TG3_PCIE_LNKCTL, val);
7174 } 7461 }
7175 7462
7176 /* This works around an issue with Athlon chipsets on 7463 /* This works around an issue with Athlon chipsets on
@@ -7217,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7217 if (err) 7504 if (err)
7218 return err; 7505 return err;
7219 7506
7220 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7221 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && 7508 val = tr32(TG3PCI_DMA_RW_CTRL) &
7222 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { 7509 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7510 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7512 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7223 /* This value is determined during the probe time DMA 7513 /* This value is determined during the probe time DMA
7224 * engine test, tg3_test_dma. 7514 * engine test, tg3_test_dma.
7225 */ 7515 */
@@ -7342,8 +7632,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7342 ((u64) tpr->rx_std_mapping >> 32)); 7632 ((u64) tpr->rx_std_mapping >> 32));
7343 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7633 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7344 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7634 ((u64) tpr->rx_std_mapping & 0xffffffff));
7345 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7635 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7346 NIC_SRAM_RX_BUFFER_DESC); 7636 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7637 NIC_SRAM_RX_BUFFER_DESC);
7347 7638
7348 /* Disable the mini ring */ 7639 /* Disable the mini ring */
7349 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
@@ -7366,8 +7657,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7366 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7657 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7367 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7658 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7368 BDINFO_FLAGS_USE_EXT_RECV); 7659 BDINFO_FLAGS_USE_EXT_RECV);
7369 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7370 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7661 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7662 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7371 } else { 7663 } else {
7372 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7664 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7373 BDINFO_FLAGS_DISABLED); 7665 BDINFO_FLAGS_DISABLED);
@@ -7383,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7383 7675
7384 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 7676 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7385 7677
7386 tpr->rx_std_ptr = tp->rx_pending; 7678 tpr->rx_std_prod_idx = tp->rx_pending;
7387 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7679 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7388 tpr->rx_std_ptr);
7389 7680
7390 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7681 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7391 tp->rx_jumbo_pending : 0; 7682 tp->rx_jumbo_pending : 0;
7392 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7683 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7393 tpr->rx_jmb_ptr);
7394 7684
7395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 7685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7396 tw32(STD_REPLENISH_LWM, 32); 7686 tw32(STD_REPLENISH_LWM, 32);
@@ -7453,7 +7743,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7453 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7743 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7454 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 7744 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7455 7745
7456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7746 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7458 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 7749 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7459 7750
@@ -7602,6 +7893,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7602 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 7893 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7603 val |= WDMAC_MODE_STATUS_TAG_FIX; 7894 val |= WDMAC_MODE_STATUS_TAG_FIX;
7604 7895
7896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7897 val |= WDMAC_MODE_BURST_ALL_DATA;
7898
7605 tw32_f(WDMAC_MODE, val); 7899 tw32_f(WDMAC_MODE, val);
7606 udelay(40); 7900 udelay(40);
7607 7901
@@ -9240,9 +9534,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9240 struct tg3 *tp = netdev_priv(dev); 9534 struct tg3 *tp = netdev_priv(dev);
9241 9535
9242 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9536 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9537 struct phy_device *phydev;
9243 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9538 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9244 return -EAGAIN; 9539 return -EAGAIN;
9245 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9540 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9541 return phy_ethtool_gset(phydev, cmd);
9246 } 9542 }
9247 9543
9248 cmd->supported = (SUPPORTED_Autoneg); 9544 cmd->supported = (SUPPORTED_Autoneg);
@@ -9281,9 +9577,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9281 struct tg3 *tp = netdev_priv(dev); 9577 struct tg3 *tp = netdev_priv(dev);
9282 9578
9283 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9579 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9580 struct phy_device *phydev;
9284 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9581 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9285 return -EAGAIN; 9582 return -EAGAIN;
9286 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9583 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9584 return phy_ethtool_sset(phydev, cmd);
9287 } 9585 }
9288 9586
9289 if (cmd->autoneg != AUTONEG_ENABLE && 9587 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -9436,15 +9734,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
9436 return 0; 9734 return 0;
9437 } 9735 }
9438 if ((dev->features & NETIF_F_IPV6_CSUM) && 9736 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9439 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { 9737 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9738 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9440 if (value) { 9739 if (value) {
9441 dev->features |= NETIF_F_TSO6; 9740 dev->features |= NETIF_F_TSO6;
9442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9741 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9443 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9743 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9444 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9744 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 9745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 9746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9448 dev->features |= NETIF_F_TSO_ECN; 9747 dev->features |= NETIF_F_TSO_ECN;
9449 } else 9748 } else
9450 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9749 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -9466,7 +9765,7 @@ static int tg3_nway_reset(struct net_device *dev)
9466 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9765 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9467 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9766 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9468 return -EAGAIN; 9767 return -EAGAIN;
9469 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); 9768 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9470 } else { 9769 } else {
9471 u32 bmcr; 9770 u32 bmcr;
9472 9771
@@ -9585,7 +9884,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9585 u32 newadv; 9884 u32 newadv;
9586 struct phy_device *phydev; 9885 struct phy_device *phydev;
9587 9886
9588 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 9887 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9589 9888
9590 if (epause->rx_pause) { 9889 if (epause->rx_pause) {
9591 if (epause->tx_pause) 9890 if (epause->tx_pause)
@@ -10338,7 +10637,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10338 for (i = 14; i < tx_len; i++) 10637 for (i = 14; i < tx_len; i++)
10339 tx_data[i] = (u8) (i & 0xff); 10638 tx_data[i] = (u8) (i & 0xff);
10340 10639
10341 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 10640 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10641 dev_kfree_skb(skb);
10642 return -EIO;
10643 }
10342 10644
10343 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10645 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10344 rnapi->coal_now); 10646 rnapi->coal_now);
@@ -10349,7 +10651,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10349 10651
10350 num_pkts = 0; 10652 num_pkts = 0;
10351 10653
10352 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); 10654 tg3_set_txd(tnapi, tnapi->tx_prod,
10655 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10353 10656
10354 tnapi->tx_prod++; 10657 tnapi->tx_prod++;
10355 num_pkts++; 10658 num_pkts++;
@@ -10359,8 +10662,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10359 10662
10360 udelay(10); 10663 udelay(10);
10361 10664
10362 /* 250 usec to allow enough time on some 10/100 Mbps devices. */ 10665 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10363 for (i = 0; i < 25; i++) { 10666 for (i = 0; i < 35; i++) {
10364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10667 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10365 coal_now); 10668 coal_now);
10366 10669
@@ -10373,7 +10676,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10373 break; 10676 break;
10374 } 10677 }
10375 10678
10376 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 10679 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10377 dev_kfree_skb(skb); 10680 dev_kfree_skb(skb);
10378 10681
10379 if (tx_idx != tnapi->tx_prod) 10682 if (tx_idx != tnapi->tx_prod)
@@ -10565,9 +10868,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10565 int err; 10868 int err;
10566 10869
10567 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10870 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10871 struct phy_device *phydev;
10568 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10872 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10569 return -EAGAIN; 10873 return -EAGAIN;
10570 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); 10874 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10875 return phy_mii_ioctl(phydev, data, cmd);
10571 } 10876 }
10572 10877
10573 switch(cmd) { 10878 switch(cmd) {
@@ -10887,7 +11192,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10887 11192
10888 /* NVRAM protection for TPM */ 11193 /* NVRAM protection for TPM */
10889 if (nvcfg1 & (1 << 27)) 11194 if (nvcfg1 & (1 << 27))
10890 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11195 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10891 11196
10892 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11197 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10893 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11198 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
@@ -10928,7 +11233,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10928 11233
10929 /* NVRAM protection for TPM */ 11234 /* NVRAM protection for TPM */
10930 if (nvcfg1 & (1 << 27)) { 11235 if (nvcfg1 & (1 << 27)) {
10931 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11236 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10932 protect = 1; 11237 protect = 1;
10933 } 11238 }
10934 11239
@@ -11022,7 +11327,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11022 11327
11023 /* NVRAM protection for TPM */ 11328 /* NVRAM protection for TPM */
11024 if (nvcfg1 & (1 << 27)) { 11329 if (nvcfg1 & (1 << 27)) {
11025 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11330 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11026 protect = 1; 11331 protect = 1;
11027 } 11332 }
11028 11333
@@ -11524,7 +11829,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11524 11829
11525 tg3_enable_nvram_access(tp); 11830 tg3_enable_nvram_access(tp);
11526 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 11831 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11527 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) 11832 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11528 tw32(NVRAM_WRITE1, 0x406); 11833 tw32(NVRAM_WRITE1, 0x406);
11529 11834
11530 grc_mode = tr32(GRC_MODE); 11835 grc_mode = tr32(GRC_MODE);
@@ -12400,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 12705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12401 u32 prod_id_asic_rev; 12706 u32 prod_id_asic_rev;
12402 12707
12403 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || 12708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || 12709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || 12710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12407 pci_read_config_dword(tp->pdev, 12711 pci_read_config_dword(tp->pdev,
12408 TG3PCI_GEN2_PRODID_ASICREV, 12712 TG3PCI_GEN2_PRODID_ASICREV,
12409 &prod_id_asic_rev); 12713 &prod_id_asic_rev);
@@ -12586,6 +12890,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12586 tp->dev->features |= NETIF_F_IPV6_CSUM; 12890 tp->dev->features |= NETIF_F_IPV6_CSUM;
12587 } 12891 }
12588 12892
12893 /* Determine TSO capabilities */
12894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12895 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12896 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12898 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12899 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12900 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12902 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12903 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12904 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12906 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12907 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12909 tp->fw_needed = FIRMWARE_TG3TSO5;
12910 else
12911 tp->fw_needed = FIRMWARE_TG3TSO;
12912 }
12913
12914 tp->irq_max = 1;
12915
12589 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 12916 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12590 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 12917 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12591 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 12918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12597,25 +12924,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12597 12924
12598 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 12925 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12600 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12601 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12927 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12602 } else {
12603 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12604 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12605 ASIC_REV_5750 &&
12606 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12607 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12608 } 12928 }
12609 }
12610 12929
12611 tp->irq_max = 1; 12930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12931 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12932 tp->irq_max = TG3_IRQ_MAX_VECS;
12933 }
12934 }
12612 12935
12613#ifdef TG3_NAPI 12936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12614 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 12937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12615 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 12938 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12616 tp->irq_max = TG3_IRQ_MAX_VECS; 12939 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12940 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12941 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12617 } 12942 }
12618#endif
12619 12943
12620 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12944 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12621 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12945 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
@@ -12926,11 +13250,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12927 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 13251 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12928 13252
12929 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12930 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12931 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12932 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12933
12934 err = tg3_mdio_init(tp); 13253 err = tg3_mdio_init(tp);
12935 if (err) 13254 if (err)
12936 return err; 13255 return err;
@@ -13220,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13220#endif 13539#endif
13221#endif 13540#endif
13222 13541
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13543 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13544 goto out;
13545 }
13546
13223 if (!goal) 13547 if (!goal)
13224 goto out; 13548 goto out;
13225 13549
@@ -13414,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13414{ 13738{
13415 dma_addr_t buf_dma; 13739 dma_addr_t buf_dma;
13416 u32 *buf, saved_dma_rwctrl; 13740 u32 *buf, saved_dma_rwctrl;
13417 int ret; 13741 int ret = 0;
13418 13742
13419 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 13743 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13420 if (!buf) { 13744 if (!buf) {
@@ -13427,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13427 13751
13428 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13752 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13429 13753
13754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13755 goto out;
13756
13430 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13757 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13431 /* DMA read watermark not used on PCIE */ 13758 /* DMA read watermark not used on PCIE */
13432 tp->dma_rwctrl |= 0x00180000; 13759 tp->dma_rwctrl |= 0x00180000;
@@ -13499,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13499 tg3_switch_clocks(tp); 13826 tg3_switch_clocks(tp);
13500#endif 13827#endif
13501 13828
13502 ret = 0;
13503 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13829 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13504 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 13830 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13505 goto out; 13831 goto out;
@@ -13678,6 +14004,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
13678 case PHY_ID_BCM5756: return "5722/5756"; 14004 case PHY_ID_BCM5756: return "5722/5756";
13679 case PHY_ID_BCM5906: return "5906"; 14005 case PHY_ID_BCM5906: return "5906";
13680 case PHY_ID_BCM5761: return "5761"; 14006 case PHY_ID_BCM5761: return "5761";
14007 case PHY_ID_BCM5717: return "5717";
13681 case PHY_ID_BCM8002: return "8002/serdes"; 14008 case PHY_ID_BCM8002: return "8002/serdes";
13682 case 0: return "serdes"; 14009 case 0: return "serdes";
13683 default: return "unknown"; 14010 default: return "unknown";
@@ -13919,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13919 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14246 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13920 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14247 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13921 14248
13922 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13923 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13924 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13925 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13926 struct tg3_napi *tnapi = &tp->napi[i];
13927
13928 tnapi->tp = tp;
13929 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13930
13931 tnapi->int_mbox = intmbx;
13932 if (i < 4)
13933 intmbx += 0x8;
13934 else
13935 intmbx += 0x4;
13936
13937 tnapi->consmbox = rcvmbx;
13938 tnapi->prodmbox = sndmbx;
13939
13940 if (i)
13941 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13942 else
13943 tnapi->coal_now = HOSTCC_MODE_NOW;
13944
13945 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13946 break;
13947
13948 /*
13949 * If we support MSIX, we'll be using RSS. If we're using
13950 * RSS, the first vector only handles link interrupts and the
13951 * remaining vectors handle rx and tx interrupts. Reuse the
13952 * mailbox values for the next iteration. The values we setup
13953 * above are still useful for the single vectored mode.
13954 */
13955 if (!i)
13956 continue;
13957
13958 rcvmbx += 0x8;
13959
13960 if (sndmbx & 0x4)
13961 sndmbx -= 0x4;
13962 else
13963 sndmbx += 0xc;
13964 }
13965
13966 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13967 dev->ethtool_ops = &tg3_ethtool_ops; 14249 dev->ethtool_ops = &tg3_ethtool_ops;
13968 dev->watchdog_timeo = TG3_TX_TIMEOUT; 14250 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13969 dev->irq = pdev->irq; 14251 dev->irq = pdev->irq;
@@ -13975,8 +14257,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13975 goto err_out_iounmap; 14257 goto err_out_iounmap;
13976 } 14258 }
13977 14259
13978 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 14260 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
13979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14261 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13980 dev->netdev_ops = &tg3_netdev_ops; 14262 dev->netdev_ops = &tg3_netdev_ops;
13981 else 14263 else
13982 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14264 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14023,46 +14305,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14023 14305
14024 tg3_init_bufmgr_config(tp); 14306 tg3_init_bufmgr_config(tp);
14025 14307
14026 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 14308 /* Selectively allow TSO based on operating conditions */
14027 tp->fw_needed = FIRMWARE_TG3; 14309 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14028 14310 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14029 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14030 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 14311 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14031 } 14312 else {
14032 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14313 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 14314 tp->fw_needed = NULL;
14034 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
14035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14036 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14037 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14038 } else {
14039 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14041 tp->fw_needed = FIRMWARE_TG3TSO5;
14042 else
14043 tp->fw_needed = FIRMWARE_TG3TSO;
14044 } 14315 }
14045 14316
14317 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14318 tp->fw_needed = FIRMWARE_TG3;
14319
14046 /* TSO is on by default on chips that support hardware TSO. 14320 /* TSO is on by default on chips that support hardware TSO.
14047 * Firmware TSO on older chips gives lower performance, so it 14321 * Firmware TSO on older chips gives lower performance, so it
14048 * is off by default, but can be enabled using ethtool. 14322 * is off by default, but can be enabled using ethtool.
14049 */ 14323 */
14050 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 14324 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14051 if (dev->features & NETIF_F_IP_CSUM) 14325 (dev->features & NETIF_F_IP_CSUM))
14052 dev->features |= NETIF_F_TSO; 14326 dev->features |= NETIF_F_TSO;
14053 if ((dev->features & NETIF_F_IPV6_CSUM) && 14327
14054 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) 14328 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14329 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14330 if (dev->features & NETIF_F_IPV6_CSUM)
14055 dev->features |= NETIF_F_TSO6; 14331 dev->features |= NETIF_F_TSO6;
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14332 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14057 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14334 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14058 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14335 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 14337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14062 dev->features |= NETIF_F_TSO_ECN; 14338 dev->features |= NETIF_F_TSO_ECN;
14063 } 14339 }
14064 14340
14065
14066 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14341 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14067 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14342 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14068 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14343 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14113,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14113 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 14388 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14114 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 14389 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14115 14390
14391 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14392 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14393 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14394 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14395 struct tg3_napi *tnapi = &tp->napi[i];
14396
14397 tnapi->tp = tp;
14398 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14399
14400 tnapi->int_mbox = intmbx;
14401 if (i < 4)
14402 intmbx += 0x8;
14403 else
14404 intmbx += 0x4;
14405
14406 tnapi->consmbox = rcvmbx;
14407 tnapi->prodmbox = sndmbx;
14408
14409 if (i) {
14410 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14411 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14412 } else {
14413 tnapi->coal_now = HOSTCC_MODE_NOW;
14414 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14415 }
14416
14417 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14418 break;
14419
14420 /*
14421 * If we support MSIX, we'll be using RSS. If we're using
14422 * RSS, the first vector only handles link interrupts and the
14423 * remaining vectors handle rx and tx interrupts. Reuse the
14424 * mailbox values for the next iteration. The values we setup
14425 * above are still useful for the single vectored mode.
14426 */
14427 if (!i)
14428 continue;
14429
14430 rcvmbx += 0x8;
14431
14432 if (sndmbx & 0x4)
14433 sndmbx -= 0x4;
14434 else
14435 sndmbx += 0xc;
14436 }
14437
14116 tg3_init_coal(tp); 14438 tg3_init_coal(tp);
14117 14439
14118 pci_set_drvdata(pdev, dev); 14440 pci_set_drvdata(pdev, dev);
@@ -14131,13 +14453,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14131 tg3_bus_string(tp, str), 14453 tg3_bus_string(tp, str),
14132 dev->dev_addr); 14454 dev->dev_addr);
14133 14455
14134 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) 14456 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14457 struct phy_device *phydev;
14458 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14135 printk(KERN_INFO 14459 printk(KERN_INFO
14136 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14460 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14137 tp->dev->name, 14461 tp->dev->name, phydev->drv->name,
14138 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, 14462 dev_name(&phydev->dev));
14139 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); 14463 } else
14140 else
14141 printk(KERN_INFO 14464 printk(KERN_INFO
14142 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14465 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14143 tp->dev->name, tg3_phy_string(tp), 14466 tp->dev->name, tg3_phy_string(tp),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bab7940158e6..453a34fb72b9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -46,10 +46,9 @@
46#define TG3PCI_DEVICE_TIGON3_57788 0x1691 46#define TG3PCI_DEVICE_TIGON3_57788 0x1691
47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ 47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
49#define TG3PCI_DEVICE_TIGON3_5717C 0x1655 49#define TG3PCI_DEVICE_TIGON3_5717 0x1655
50#define TG3PCI_DEVICE_TIGON3_5717S 0x1656 50#define TG3PCI_DEVICE_TIGON3_5718 0x1656
51#define TG3PCI_DEVICE_TIGON3_5718C 0x1665 51#define TG3PCI_DEVICE_TIGON3_5724 0x165c
52#define TG3PCI_DEVICE_TIGON3_5718S 0x1666
53/* 0x04 --> 0x64 unused */ 52/* 0x04 --> 0x64 unused */
54#define TG3PCI_MSI_DATA 0x00000064 53#define TG3PCI_MSI_DATA 0x00000064
55/* 0x66 --> 0x68 unused */ 54/* 0x66 --> 0x68 unused */
@@ -103,6 +102,7 @@
103#define CHIPREV_ID_5906_A1 0xc001 102#define CHIPREV_ID_5906_A1 0xc001
104#define CHIPREV_ID_57780_A0 0x57780000 103#define CHIPREV_ID_57780_A0 0x57780000
105#define CHIPREV_ID_57780_A1 0x57780001 104#define CHIPREV_ID_57780_A1 0x57780001
105#define CHIPREV_ID_5717_A0 0x05717000
106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
107#define ASIC_REV_5700 0x07 107#define ASIC_REV_5700 0x07
108#define ASIC_REV_5701 0x00 108#define ASIC_REV_5701 0x00
@@ -141,8 +141,7 @@
141#define METAL_REV_B1 0x01 141#define METAL_REV_B1 0x01
142#define METAL_REV_B2 0x02 142#define METAL_REV_B2 0x02
143#define TG3PCI_DMA_RW_CTRL 0x0000006c 143#define TG3PCI_DMA_RW_CTRL 0x0000006c
144#define DMA_RWCTRL_MIN_DMA 0x000000ff 144#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
145#define DMA_RWCTRL_MIN_DMA_SHIFT 0
146#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 145#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
147#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 146#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
148#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 147#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -242,7 +241,11 @@
242#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ 241#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */
243#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ 242#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */
244#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ 243#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */
244#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \
245 TG3_64BIT_REG_LOW)
245#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ 246#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */
247#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \
248 TG3_64BIT_REG_LOW)
246#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ 249#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */
247#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ 250#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */
248#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ 251#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */
@@ -1264,8 +1267,9 @@
1264#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 1267#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080
1265#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 1268#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
1266#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 1269#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
1267#define WDMAC_MODE_RX_ACCEL 0x00000400 1270#define WDMAC_MODE_RX_ACCEL 0x00000400
1268#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 1271#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
1272#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000
1269#define WDMAC_STATUS 0x00004c04 1273#define WDMAC_STATUS 0x00004c04
1270#define WDMAC_STATUS_TGTABORT 0x00000004 1274#define WDMAC_STATUS_TGTABORT 0x00000004
1271#define WDMAC_STATUS_MSTABORT 0x00000008 1275#define WDMAC_STATUS_MSTABORT 0x00000008
@@ -1953,10 +1957,34 @@
1953#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 1957#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
1954#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 1958#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
1955 1959
1960
1956/* Currently this is fixed. */ 1961/* Currently this is fixed. */
1957#define PHY_ADDR 0x01 1962#define TG3_PHY_PCIE_ADDR 0x00
1963#define TG3_PHY_MII_ADDR 0x01
1964
1965
1966/*** Tigon3 specific PHY PCIE registers. ***/
1967
1968#define TG3_PCIEPHY_BLOCK_ADDR 0x1f
1969#define TG3_PCIEPHY_XGXS_BLK1 0x0801
1970#define TG3_PCIEPHY_TXB_BLK 0x0861
1971#define TG3_PCIEPHY_BLOCK_SHIFT 4
1958 1972
1959/* Tigon3 specific PHY MII registers. */ 1973/* TG3_PCIEPHY_TXB_BLK */
1974#define TG3_PCIEPHY_TX0CTRL1 0x15
1975#define TG3_PCIEPHY_TX0CTRL1_TXOCM 0x0003
1976#define TG3_PCIEPHY_TX0CTRL1_RDCTL 0x0008
1977#define TG3_PCIEPHY_TX0CTRL1_TXCMV 0x0030
1978#define TG3_PCIEPHY_TX0CTRL1_TKSEL 0x0040
1979#define TG3_PCIEPHY_TX0CTRL1_NB_EN 0x0400
1980
1981/* TG3_PCIEPHY_XGXS_BLK1 */
1982#define TG3_PCIEPHY_PWRMGMT4 0x1a
1983#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN 0x0038
1984#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN 0x4000
1985
1986
1987/*** Tigon3 specific PHY MII registers. ***/
1960#define TG3_BMCR_SPEED1000 0x0040 1988#define TG3_BMCR_SPEED1000 0x0040
1961 1989
1962#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */ 1990#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */
@@ -2055,6 +2083,9 @@
2055#define MII_TG3_FET_SHDW_MISCCTRL 0x10 2083#define MII_TG3_FET_SHDW_MISCCTRL 0x10
2056#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 2084#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000
2057 2085
2086#define MII_TG3_FET_SHDW_AUXMODE4 0x1a
2087#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008
2088
2058#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b 2089#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
2059#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 2090#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
2060 2091
@@ -2542,8 +2573,10 @@ struct tg3_ethtool_stats {
2542}; 2573};
2543 2574
2544struct tg3_rx_prodring_set { 2575struct tg3_rx_prodring_set {
2545 u32 rx_std_ptr; 2576 u32 rx_std_prod_idx;
2546 u32 rx_jmb_ptr; 2577 u32 rx_std_cons_idx;
2578 u32 rx_jmb_prod_idx;
2579 u32 rx_jmb_cons_idx;
2547 struct tg3_rx_buffer_desc *rx_std; 2580 struct tg3_rx_buffer_desc *rx_std;
2548 struct tg3_ext_rx_buffer_desc *rx_jmb; 2581 struct tg3_ext_rx_buffer_desc *rx_jmb;
2549 struct ring_info *rx_std_buffers; 2582 struct ring_info *rx_std_buffers;
@@ -2571,6 +2604,7 @@ struct tg3_napi {
2571 u32 consmbox; 2604 u32 consmbox;
2572 u32 rx_rcb_ptr; 2605 u32 rx_rcb_ptr;
2573 u16 *rx_rcb_prod_idx; 2606 u16 *rx_rcb_prod_idx;
2607 struct tg3_rx_prodring_set *prodring;
2574 2608
2575 struct tg3_rx_buffer_desc *rx_rcb; 2609 struct tg3_rx_buffer_desc *rx_rcb;
2576 struct tg3_tx_buffer_desc *tx_ring; 2610 struct tg3_tx_buffer_desc *tx_ring;
@@ -2654,7 +2688,7 @@ struct tg3 {
2654 struct vlan_group *vlgrp; 2688 struct vlan_group *vlgrp;
2655#endif 2689#endif
2656 2690
2657 struct tg3_rx_prodring_set prodring[1]; 2691 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
2658 2692
2659 2693
2660 /* begin "everything else" cacheline(s) section */ 2694 /* begin "everything else" cacheline(s) section */
@@ -2725,7 +2759,7 @@ struct tg3 {
2725#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 2759#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
2726#define TG3_FLG2_5705_PLUS 0x00040000 2760#define TG3_FLG2_5705_PLUS 0x00040000
2727#define TG3_FLG2_5750_PLUS 0x00080000 2761#define TG3_FLG2_5750_PLUS 0x00080000
2728#define TG3_FLG2_PROTECTED_NVRAM 0x00100000 2762#define TG3_FLG2_HW_TSO_3 0x00100000
2729#define TG3_FLG2_USING_MSI 0x00200000 2763#define TG3_FLG2_USING_MSI 0x00200000
2730#define TG3_FLG2_USING_MSIX 0x00400000 2764#define TG3_FLG2_USING_MSIX 0x00400000
2731#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ 2765#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
@@ -2737,7 +2771,9 @@ struct tg3 {
2737#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2771#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2738#define TG3_FLG2_5780_CLASS 0x04000000 2772#define TG3_FLG2_5780_CLASS 0x04000000
2739#define TG3_FLG2_HW_TSO_2 0x08000000 2773#define TG3_FLG2_HW_TSO_2 0x08000000
2740#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2774#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \
2775 TG3_FLG2_HW_TSO_2 | \
2776 TG3_FLG2_HW_TSO_3)
2741#define TG3_FLG2_1SHOT_MSI 0x10000000 2777#define TG3_FLG2_1SHOT_MSI 0x10000000
2742#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2778#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2743#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2779#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
@@ -2745,6 +2781,7 @@ struct tg3 {
2745 u32 tg3_flags3; 2781 u32 tg3_flags3;
2746#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 2782#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2747#define TG3_FLG3_ENABLE_APE 0x00000002 2783#define TG3_FLG3_ENABLE_APE 0x00000002
2784#define TG3_FLG3_PROTECTED_NVRAM 0x00000004
2748#define TG3_FLG3_5701_DMA_BUG 0x00000008 2785#define TG3_FLG3_5701_DMA_BUG 0x00000008
2749#define TG3_FLG3_USE_PHYLIB 0x00000010 2786#define TG3_FLG3_USE_PHYLIB 0x00000010
2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2787#define TG3_FLG3_MDIOBUS_INITED 0x00000020
@@ -2756,9 +2793,11 @@ struct tg3 {
2756#define TG3_FLG3_PHY_ENABLE_APD 0x00001000 2793#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
2757#define TG3_FLG3_5755_PLUS 0x00002000 2794#define TG3_FLG3_5755_PLUS 0x00002000
2758#define TG3_FLG3_NO_NVRAM 0x00004000 2795#define TG3_FLG3_NO_NVRAM 0x00004000
2759#define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
2760#define TG3_FLG3_PHY_IS_FET 0x00010000 2796#define TG3_FLG3_PHY_IS_FET 0x00010000
2761#define TG3_FLG3_ENABLE_RSS 0x00020000 2797#define TG3_FLG3_ENABLE_RSS 0x00020000
2798#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
2799#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2800#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2762 2801
2763 struct timer_list timer; 2802 struct timer_list timer;
2764 u16 timer_counter; 2803 u16 timer_counter;
@@ -2825,6 +2864,7 @@ struct tg3 {
2825#define PHY_ID_BCM5756 0xbc050ed0 2864#define PHY_ID_BCM5756 0xbc050ed0
2826#define PHY_ID_BCM5784 0xbc050fa0 2865#define PHY_ID_BCM5784 0xbc050fa0
2827#define PHY_ID_BCM5761 0xbc050fd0 2866#define PHY_ID_BCM5761 0xbc050fd0
2867#define PHY_ID_BCM5717 0x5c0d8a00
2828#define PHY_ID_BCM5906 0xdc00ac40 2868#define PHY_ID_BCM5906 0xdc00ac40
2829#define PHY_ID_BCM8002 0x60010140 2869#define PHY_ID_BCM8002 0x60010140
2830#define PHY_ID_INVALID 0xffffffff 2870#define PHY_ID_INVALID 0xffffffff
@@ -2834,6 +2874,7 @@ struct tg3 {
2834#define PHY_REV_BCM5401_C0 0x6 2874#define PHY_REV_BCM5401_C0 0x6
2835#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2875#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2836#define TG3_PHY_ID_BCM50610 0x143bd60 2876#define TG3_PHY_ID_BCM50610 0x143bd60
2877#define TG3_PHY_ID_BCM50610M 0x143bd70
2837#define TG3_PHY_ID_BCMAC131 0x143bc70 2878#define TG3_PHY_ID_BCMAC131 0x143bc70
2838#define TG3_PHY_ID_RTL8211C 0x001cc910 2879#define TG3_PHY_ID_RTL8211C 0x001cc910
2839#define TG3_PHY_ID_RTL8201E 0x00008200 2880#define TG3_PHY_ID_RTL8201E 0x00008200
@@ -2865,7 +2906,7 @@ struct tg3 {
2865 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ 2906 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2866 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ 2907 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2867 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ 2908 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2868 (X) == PHY_ID_BCM8002) 2909 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
2869 2910
2870 struct tg3_hw_stats *hw_stats; 2911 struct tg3_hw_stats *hw_stats;
2871 dma_addr_t stats_mapping; 2912 dma_addr_t stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3d31b47332bb..16f23f84920b 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1549,7 +1549,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1549 if (tmpCStat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1550 eoc = 1;
1551 1551
1552 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1552 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5);
1553 if ( !new_skb ) 1554 if ( !new_skb )
1554 goto drop_and_reuse; 1555 goto drop_and_reuse;
1555 1556
@@ -1563,7 +1564,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1563 skb->protocol = eth_type_trans( skb, dev ); 1564 skb->protocol = eth_type_trans( skb, dev );
1564 netif_rx( skb ); 1565 netif_rx( skb );
1565 1566
1566 skb_reserve( new_skb, NET_IP_ALIGN );
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1567 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1568 new_skb->data, 1568 new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1569 TLAN_MAX_FRAME_SIZE,
@@ -1967,13 +1967,12 @@ static void TLan_ResetLists( struct net_device *dev )
1967 list->cStat = TLAN_CSTAT_READY; 1967 list->cStat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 1968 list->frameSize = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 1971 if ( !skb ) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 1972 pr_err("TLAN: out of memory for received data.\n" );
1973 break; 1973 break;
1974 } 1974 }
1975 1975
1976 skb_reserve( skb, NET_IP_ALIGN );
1977 list->buffer[0].address = pci_map_single(priv->pciDev, 1976 list->buffer[0].address = pci_map_single(priv->pciDev,
1978 skb->data, 1977 skb->data,
1979 TLAN_MAX_FRAME_SIZE, 1978 TLAN_MAX_FRAME_SIZE,
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 724158966ec1..cf552d1d9629 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -610,9 +610,8 @@ static int xl_open(struct net_device *dev)
610 610
611 u16 switchsettings, switchsettings_eeprom ; 611 u16 switchsettings, switchsettings_eeprom ;
612 612
613 if(request_irq(dev->irq, &xl_interrupt, IRQF_SHARED , "3c359", dev)) { 613 if (request_irq(dev->irq, xl_interrupt, IRQF_SHARED , "3c359", dev))
614 return -EAGAIN; 614 return -EAGAIN;
615 }
616 615
617 /* 616 /*
618 * Read the information from the EEPROM that we need. 617 * Read the information from the EEPROM that we need.
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 75fa32e34fd0..5db0270957ac 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -680,7 +680,7 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
680 680
681 /* The PCMCIA has already got the interrupt line and the io port, 681 /* The PCMCIA has already got the interrupt line and the io port,
682 so no chance of anybody else getting it - MLP */ 682 so no chance of anybody else getting it - MLP */
683 if (request_irq(dev->irq = irq, &tok_interrupt, 0, "ibmtr", dev) != 0) { 683 if (request_irq(dev->irq = irq, tok_interrupt, 0, "ibmtr", dev) != 0) {
684 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n", 684 DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
685 irq); 685 irq);
686 iounmap(t_mmio); 686 iounmap(t_mmio);
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 26dca2b2bdbd..7b1fe9412b6f 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -596,7 +596,7 @@ static int streamer_open(struct net_device *dev)
596 rc=streamer_reset(dev); 596 rc=streamer_reset(dev);
597 } 597 }
598 598
599 if (request_irq(dev->irq, &streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) { 599 if (request_irq(dev->irq, streamer_interrupt, IRQF_SHARED, "lanstreamer", dev)) {
600 return -EAGAIN; 600 return -EAGAIN;
601 } 601 }
602#if STREAMER_DEBUG 602#if STREAMER_DEBUG
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index d9ec7f0bbd0a..df32025c5132 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -445,9 +445,9 @@ static int olympic_open(struct net_device *dev)
445 445
446 olympic_init(dev); 446 olympic_init(dev);
447 447
448 if(request_irq(dev->irq, &olympic_interrupt, IRQF_SHARED , "olympic", dev)) { 448 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
449 dev))
449 return -EAGAIN; 450 return -EAGAIN;
450 }
451 451
452#if OLYMPIC_DEBUG 452#if OLYMPIC_DEBUG
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM)); 453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index a7b6888829b5..fa152144aacf 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1364,6 +1364,8 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1364 return (-1); 1364 return (-1);
1365} 1365}
1366 1366
1367MODULE_FIRMWARE("tms380tr.bin");
1368
1367/* 1369/*
1368 * Starts bring up diagnostics of token ring adapter and evaluates 1370 * Starts bring up diagnostics of token ring adapter and evaluates
1369 * diagnostic results. 1371 * diagnostic results.
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 7030bd5e9848..a69c4a48bab9 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -802,13 +802,11 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
802 int rx = data->rxhead; 802 int rx = data->rxhead;
803 struct sk_buff *skb; 803 struct sk_buff *skb;
804 804
805 data->rxskbs[rx] = skb = netdev_alloc_skb(dev, 805 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
806 TSI108_RXBUF_SIZE + 2); 806 data->rxskbs[rx] = skb;
807 if (!skb) 807 if (!skb)
808 break; 808 break;
809 809
810 skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
811
812 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 810 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
813 TSI108_RX_SKB_SIZE, 811 TSI108_RX_SKB_SIZE,
814 DMA_FROM_DEVICE); 812 DMA_FROM_DEVICE);
@@ -1356,7 +1354,7 @@ static int tsi108_open(struct net_device *dev)
1356 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1354 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1357 struct sk_buff *skb; 1355 struct sk_buff *skb;
1358 1356
1359 skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN); 1357 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
1360 if (!skb) { 1358 if (!skb) {
1361 /* Bah. No memory for now, but maybe we'll get 1359 /* Bah. No memory for now, but maybe we'll get
1362 * some more later. 1360 * some more later.
@@ -1370,8 +1368,6 @@ static int tsi108_open(struct net_device *dev)
1370 } 1368 }
1371 1369
1372 data->rxskbs[i] = skb; 1370 data->rxskbs[i] = skb;
1373 /* Align the payload on a 4-byte boundary */
1374 skb_reserve(skb, 2);
1375 data->rxskbs[i] = skb; 1371 data->rxskbs[i] = skb;
1376 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); 1372 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1377 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; 1373 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a45ded0538b8..ad63621913c3 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -543,7 +543,7 @@ static int dmfe_open(struct DEVICE *dev)
543 543
544 DMFE_DBUG(0, "dmfe_open", 0); 544 DMFE_DBUG(0, "dmfe_open", 0);
545 545
546 ret = request_irq(dev->irq, &dmfe_interrupt, 546 ret = request_irq(dev->irq, dmfe_interrupt,
547 IRQF_SHARED, dev->name, dev); 547 IRQF_SHARED, dev->name, dev);
548 if (ret) 548 if (ret)
549 return ret; 549 return ret;
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 6b2330e4206e..0df983bc03a6 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -506,7 +506,7 @@ tulip_open(struct net_device *dev)
506 506
507 tulip_init_ring (dev); 507 tulip_init_ring (dev);
508 508
509 retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev); 509 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
510 if (retval) 510 if (retval)
511 goto free_ring; 511 goto free_ring;
512 512
@@ -1782,7 +1782,7 @@ static int tulip_resume(struct pci_dev *pdev)
1782 return retval; 1782 return retval;
1783 } 1783 }
1784 1784
1785 if ((retval = request_irq(dev->irq, &tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1785 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1786 printk (KERN_ERR "tulip: request_irq failed in resume\n"); 1786 printk (KERN_ERR "tulip: request_irq failed in resume\n");
1787 return retval; 1787 return retval;
1788 } 1788 }
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index c457a0ca55ad..fa019cabc355 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -461,7 +461,7 @@ static int uli526x_open(struct net_device *dev)
461 /* Initialize ULI526X board */ 461 /* Initialize ULI526X board */
462 uli526x_init(dev); 462 uli526x_init(dev);
463 463
464 ret = request_irq(dev->irq, &uli526x_interrupt, IRQF_SHARED, dev->name, dev); 464 ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
465 if (ret) 465 if (ret)
466 return ret; 466 return ret;
467 467
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index b38d3b7f6e35..1a52729c9466 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -639,7 +639,7 @@ static int netdev_open(struct net_device *dev)
639 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ 639 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
640 640
641 netif_device_detach(dev); 641 netif_device_detach(dev);
642 i = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev); 642 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
643 if (i) 643 if (i)
644 goto out_err; 644 goto out_err;
645 645
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 0f2ca5980c3c..9924c4c7e2d6 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -458,7 +458,7 @@ static int xircom_open(struct net_device *dev)
458 int retval; 458 int retval;
459 enter("xircom_open"); 459 enter("xircom_open");
460 printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq); 460 printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
461 retval = request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev); 461 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
462 if (retval) { 462 if (retval) {
463 leave("xircom_open - No IRQ"); 463 leave("xircom_open - No IRQ");
464 return retval; 464 return retval;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4fdfa2ae5418..01e99f22210e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -44,7 +44,6 @@
44#include <linux/kernel.h> 44#include <linux/kernel.h>
45#include <linux/major.h> 45#include <linux/major.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/smp_lock.h>
48#include <linux/poll.h> 47#include <linux/poll.h>
49#include <linux/fcntl.h> 48#include <linux/fcntl.h>
50#include <linux/init.h> 49#include <linux/init.h>
@@ -54,6 +53,7 @@
54#include <linux/miscdevice.h> 53#include <linux/miscdevice.h>
55#include <linux/ethtool.h> 54#include <linux/ethtool.h>
56#include <linux/rtnetlink.h> 55#include <linux/rtnetlink.h>
56#include <linux/compat.h>
57#include <linux/if.h> 57#include <linux/if.h>
58#include <linux/if_arp.h> 58#include <linux/if_arp.h>
59#include <linux/if_ether.h> 59#include <linux/if_ether.h>
@@ -1110,8 +1110,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
1110 return 0; 1110 return 0;
1111} 1111}
1112 1112
1113static long tun_chr_ioctl(struct file *file, unsigned int cmd, 1113static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1114 unsigned long arg) 1114 unsigned long arg, int ifreq_len)
1115{ 1115{
1116 struct tun_file *tfile = file->private_data; 1116 struct tun_file *tfile = file->private_data;
1117 struct tun_struct *tun; 1117 struct tun_struct *tun;
@@ -1121,7 +1121,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1121 int ret; 1121 int ret;
1122 1122
1123 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) 1123 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1124 if (copy_from_user(&ifr, argp, sizeof ifr)) 1124 if (copy_from_user(&ifr, argp, ifreq_len))
1125 return -EFAULT; 1125 return -EFAULT;
1126 1126
1127 if (cmd == TUNGETFEATURES) { 1127 if (cmd == TUNGETFEATURES) {
@@ -1144,7 +1144,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1144 if (ret) 1144 if (ret)
1145 goto unlock; 1145 goto unlock;
1146 1146
1147 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1147 if (copy_to_user(argp, &ifr, ifreq_len))
1148 ret = -EFAULT; 1148 ret = -EFAULT;
1149 goto unlock; 1149 goto unlock;
1150 } 1150 }
@@ -1162,7 +1162,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1162 if (ret) 1162 if (ret)
1163 break; 1163 break;
1164 1164
1165 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1165 if (copy_to_user(argp, &ifr, ifreq_len))
1166 ret = -EFAULT; 1166 ret = -EFAULT;
1167 break; 1167 break;
1168 1168
@@ -1236,7 +1236,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1236 /* Get hw addres */ 1236 /* Get hw addres */
1237 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 1237 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1238 ifr.ifr_hwaddr.sa_family = tun->dev->type; 1238 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1239 if (copy_to_user(argp, &ifr, sizeof ifr)) 1239 if (copy_to_user(argp, &ifr, ifreq_len))
1240 ret = -EFAULT; 1240 ret = -EFAULT;
1241 break; 1241 break;
1242 1242
@@ -1275,6 +1275,41 @@ unlock:
1275 return ret; 1275 return ret;
1276} 1276}
1277 1277
1278static long tun_chr_ioctl(struct file *file,
1279 unsigned int cmd, unsigned long arg)
1280{
1281 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
1282}
1283
1284#ifdef CONFIG_COMPAT
1285static long tun_chr_compat_ioctl(struct file *file,
1286 unsigned int cmd, unsigned long arg)
1287{
1288 switch (cmd) {
1289 case TUNSETIFF:
1290 case TUNGETIFF:
1291 case TUNSETTXFILTER:
1292 case TUNGETSNDBUF:
1293 case TUNSETSNDBUF:
1294 case SIOCGIFHWADDR:
1295 case SIOCSIFHWADDR:
1296 arg = (unsigned long)compat_ptr(arg);
1297 break;
1298 default:
1299 arg = (compat_ulong_t)arg;
1300 break;
1301 }
1302
1303 /*
1304 * compat_ifreq is shorter than ifreq, so we must not access beyond
1305 * the end of that structure. All fields that are used in this
1306 * driver are compatible though, we don't need to convert the
1307 * contents.
1308 */
1309 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
1310}
1311#endif /* CONFIG_COMPAT */
1312
1278static int tun_chr_fasync(int fd, struct file *file, int on) 1313static int tun_chr_fasync(int fd, struct file *file, int on)
1279{ 1314{
1280 struct tun_struct *tun = tun_get(file); 1315 struct tun_struct *tun = tun_get(file);
@@ -1285,7 +1320,6 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1285 1320
1286 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 1321 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
1287 1322
1288 lock_kernel();
1289 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 1323 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1290 goto out; 1324 goto out;
1291 1325
@@ -1298,7 +1332,6 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1298 tun->flags &= ~TUN_FASYNC; 1332 tun->flags &= ~TUN_FASYNC;
1299 ret = 0; 1333 ret = 0;
1300out: 1334out:
1301 unlock_kernel();
1302 tun_put(tun); 1335 tun_put(tun);
1303 return ret; 1336 return ret;
1304} 1337}
@@ -1306,7 +1339,7 @@ out:
1306static int tun_chr_open(struct inode *inode, struct file * file) 1339static int tun_chr_open(struct inode *inode, struct file * file)
1307{ 1340{
1308 struct tun_file *tfile; 1341 struct tun_file *tfile;
1309 cycle_kernel_lock(); 1342
1310 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1343 DBG1(KERN_INFO "tunX: tun_chr_open\n");
1311 1344
1312 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 1345 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
@@ -1359,7 +1392,10 @@ static const struct file_operations tun_fops = {
1359 .write = do_sync_write, 1392 .write = do_sync_write,
1360 .aio_write = tun_chr_aio_write, 1393 .aio_write = tun_chr_aio_write,
1361 .poll = tun_chr_poll, 1394 .poll = tun_chr_poll,
1362 .unlocked_ioctl = tun_chr_ioctl, 1395 .unlocked_ioctl = tun_chr_ioctl,
1396#ifdef CONFIG_COMPAT
1397 .compat_ioctl = tun_chr_compat_ioctl,
1398#endif
1363 .open = tun_chr_open, 1399 .open = tun_chr_open,
1364 .release = tun_chr_close, 1400 .release = tun_chr_close,
1365 .fasync = tun_chr_fasync 1401 .fasync = tun_chr_fasync
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 5921f5bdd764..079a97000e5b 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -2151,7 +2151,7 @@ typhoon_open(struct net_device *dev)
2151 goto out_sleep; 2151 goto out_sleep;
2152 } 2152 }
2153 2153
2154 err = request_irq(dev->irq, &typhoon_interrupt, IRQF_SHARED, 2154 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2155 dev->name, dev); 2155 dev->name, dev);
2156 if(err < 0) 2156 if(err < 0)
2157 goto out_sleep; 2157 goto out_sleep;
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6ce7f775bb74..1bef39a60a62 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1327,7 +1327,7 @@ static const struct driver_info ax8817x_info = {
1327 .status = asix_status, 1327 .status = asix_status,
1328 .link_reset = ax88172_link_reset, 1328 .link_reset = ax88172_link_reset,
1329 .reset = ax88172_link_reset, 1329 .reset = ax88172_link_reset,
1330 .flags = FLAG_ETHER, 1330 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1331 .data = 0x00130103, 1331 .data = 0x00130103,
1332}; 1332};
1333 1333
@@ -1337,7 +1337,7 @@ static const struct driver_info dlink_dub_e100_info = {
1337 .status = asix_status, 1337 .status = asix_status,
1338 .link_reset = ax88172_link_reset, 1338 .link_reset = ax88172_link_reset,
1339 .reset = ax88172_link_reset, 1339 .reset = ax88172_link_reset,
1340 .flags = FLAG_ETHER, 1340 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1341 .data = 0x009f9d9f, 1341 .data = 0x009f9d9f,
1342}; 1342};
1343 1343
@@ -1347,7 +1347,7 @@ static const struct driver_info netgear_fa120_info = {
1347 .status = asix_status, 1347 .status = asix_status,
1348 .link_reset = ax88172_link_reset, 1348 .link_reset = ax88172_link_reset,
1349 .reset = ax88172_link_reset, 1349 .reset = ax88172_link_reset,
1350 .flags = FLAG_ETHER, 1350 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1351 .data = 0x00130103, 1351 .data = 0x00130103,
1352}; 1352};
1353 1353
@@ -1357,7 +1357,7 @@ static const struct driver_info hawking_uf200_info = {
1357 .status = asix_status, 1357 .status = asix_status,
1358 .link_reset = ax88172_link_reset, 1358 .link_reset = ax88172_link_reset,
1359 .reset = ax88172_link_reset, 1359 .reset = ax88172_link_reset,
1360 .flags = FLAG_ETHER, 1360 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1361 .data = 0x001f1d1f, 1361 .data = 0x001f1d1f,
1362}; 1362};
1363 1363
@@ -1367,7 +1367,7 @@ static const struct driver_info ax88772_info = {
1367 .status = asix_status, 1367 .status = asix_status,
1368 .link_reset = ax88772_link_reset, 1368 .link_reset = ax88772_link_reset,
1369 .reset = ax88772_link_reset, 1369 .reset = ax88772_link_reset,
1370 .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1370 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
1371 .rx_fixup = asix_rx_fixup, 1371 .rx_fixup = asix_rx_fixup,
1372 .tx_fixup = asix_tx_fixup, 1372 .tx_fixup = asix_tx_fixup,
1373}; 1373};
@@ -1378,7 +1378,7 @@ static const struct driver_info ax88178_info = {
1378 .status = asix_status, 1378 .status = asix_status,
1379 .link_reset = ax88178_link_reset, 1379 .link_reset = ax88178_link_reset,
1380 .reset = ax88178_link_reset, 1380 .reset = ax88178_link_reset,
1381 .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1381 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
1382 .rx_fixup = asix_rx_fixup, 1382 .rx_fixup = asix_rx_fixup,
1383 .tx_fixup = asix_tx_fixup, 1383 .tx_fixup = asix_tx_fixup,
1384}; 1384};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e1ba160008..7ec24c9b2535 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -413,13 +413,21 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
413 413
414static const struct driver_info cdc_info = { 414static const struct driver_info cdc_info = {
415 .description = "CDC Ethernet Device", 415 .description = "CDC Ethernet Device",
416 .flags = FLAG_ETHER, 416 .flags = FLAG_ETHER | FLAG_LINK_INTR,
417 // .check_connect = cdc_check_connect, 417 // .check_connect = cdc_check_connect,
418 .bind = cdc_bind, 418 .bind = cdc_bind,
419 .unbind = usbnet_cdc_unbind, 419 .unbind = usbnet_cdc_unbind,
420 .status = cdc_status, 420 .status = cdc_status,
421}; 421};
422 422
423static const struct driver_info mbm_info = {
424 .description = "Mobile Broadband Network Device",
425 .flags = FLAG_WWAN,
426 .bind = cdc_bind,
427 .unbind = usbnet_cdc_unbind,
428 .status = cdc_status,
429};
430
423/*-------------------------------------------------------------------------*/ 431/*-------------------------------------------------------------------------*/
424 432
425 433
@@ -532,72 +540,72 @@ static const struct usb_device_id products [] = {
532 /* Ericsson F3507g */ 540 /* Ericsson F3507g */
533 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, 541 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
534 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 542 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
535 .driver_info = (unsigned long) &cdc_info, 543 .driver_info = (unsigned long) &mbm_info,
536}, { 544}, {
537 /* Ericsson F3507g ver. 2 */ 545 /* Ericsson F3507g ver. 2 */
538 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM, 546 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
539 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 547 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
540 .driver_info = (unsigned long) &cdc_info, 548 .driver_info = (unsigned long) &mbm_info,
541}, { 549}, {
542 /* Ericsson F3607gw */ 550 /* Ericsson F3607gw */
543 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM, 551 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
544 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 552 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
545 .driver_info = (unsigned long) &cdc_info, 553 .driver_info = (unsigned long) &mbm_info,
546}, { 554}, {
547 /* Ericsson F3607gw ver 2 */ 555 /* Ericsson F3607gw ver 2 */
548 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM, 556 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
549 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 557 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
550 .driver_info = (unsigned long) &cdc_info, 558 .driver_info = (unsigned long) &mbm_info,
551}, { 559}, {
552 /* Ericsson F3607gw ver 3 */ 560 /* Ericsson F3607gw ver 3 */
553 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, 561 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
554 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 562 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
555 .driver_info = (unsigned long) &cdc_info, 563 .driver_info = (unsigned long) &mbm_info,
556}, { 564}, {
557 /* Ericsson F3307 */ 565 /* Ericsson F3307 */
558 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM, 566 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
559 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 567 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
560 .driver_info = (unsigned long) &cdc_info, 568 .driver_info = (unsigned long) &mbm_info,
561}, { 569}, {
562 /* Ericsson F3307 ver 2 */ 570 /* Ericsson F3307 ver 2 */
563 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM, 571 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
564 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 572 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
565 .driver_info = (unsigned long) &cdc_info, 573 .driver_info = (unsigned long) &mbm_info,
566}, { 574}, {
567 /* Ericsson C3607w */ 575 /* Ericsson C3607w */
568 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM, 576 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 577 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
570 .driver_info = (unsigned long) &cdc_info, 578 .driver_info = (unsigned long) &mbm_info,
571}, { 579}, {
572 /* Toshiba F3507g */ 580 /* Toshiba F3507g */
573 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 581 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
574 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 582 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
575 .driver_info = (unsigned long) &cdc_info, 583 .driver_info = (unsigned long) &mbm_info,
576}, { 584}, {
577 /* Toshiba F3607gw */ 585 /* Toshiba F3607gw */
578 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM, 586 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
579 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 587 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
580 .driver_info = (unsigned long) &cdc_info, 588 .driver_info = (unsigned long) &mbm_info,
581}, { 589}, {
582 /* Toshiba F3607gw ver 2 */ 590 /* Toshiba F3607gw ver 2 */
583 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM, 591 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 592 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
585 .driver_info = (unsigned long) &cdc_info, 593 .driver_info = (unsigned long) &mbm_info,
586}, { 594}, {
587 /* Dell F3507g */ 595 /* Dell F3507g */
588 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, 596 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 597 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
590 .driver_info = (unsigned long) &cdc_info, 598 .driver_info = (unsigned long) &mbm_info,
591}, { 599}, {
592 /* Dell F3607gw */ 600 /* Dell F3607gw */
593 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM, 601 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 602 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
595 .driver_info = (unsigned long) &cdc_info, 603 .driver_info = (unsigned long) &mbm_info,
596}, { 604}, {
597 /* Dell F3607gw ver 2 */ 605 /* Dell F3607gw ver 2 */
598 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM, 606 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
599 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 607 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
600 .driver_info = (unsigned long) &cdc_info, 608 .driver_info = (unsigned long) &mbm_info,
601}, 609},
602 { }, // END 610 { }, // END
603}; 611};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index a2b30a10064f..3d406f9b2f29 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -611,7 +611,7 @@ static int dm9601_link_reset(struct usbnet *dev)
611 611
612static const struct driver_info dm9601_info = { 612static const struct driver_info dm9601_info = {
613 .description = "Davicom DM9601 USB Ethernet", 613 .description = "Davicom DM9601 USB Ethernet",
614 .flags = FLAG_ETHER, 614 .flags = FLAG_ETHER | FLAG_LINK_INTR,
615 .bind = dm9601_bind, 615 .bind = dm9601_bind,
616 .rx_fixup = dm9601_rx_fixup, 616 .rx_fixup = dm9601_rx_fixup,
617 .tx_fixup = dm9601_tx_fixup, 617 .tx_fixup = dm9601_tx_fixup,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e391ef969c28..3b80e8d2d621 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -471,16 +471,7 @@ static int kaweth_reset(struct kaweth_device *kaweth)
471 int result; 471 int result;
472 472
473 dbg("kaweth_reset(%p)", kaweth); 473 dbg("kaweth_reset(%p)", kaweth);
474 result = kaweth_control(kaweth, 474 result = usb_reset_configuration(kaweth->dev);
475 usb_sndctrlpipe(kaweth->dev, 0),
476 USB_REQ_SET_CONFIGURATION,
477 0,
478 kaweth->dev->config[0].desc.bConfigurationValue,
479 0,
480 NULL,
481 0,
482 KAWETH_CONTROL_TIMEOUT);
483
484 mdelay(10); 475 mdelay(10);
485 476
486 dbg("kaweth_reset() returns %d.",result); 477 dbg("kaweth_reset() returns %d.",result);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca5ca5ae061d..04f3f289e87c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1210,6 +1210,14 @@ static const struct net_device_ops usbnet_netdev_ops = {
1210 1210
1211// precondition: never called in_interrupt 1211// precondition: never called in_interrupt
1212 1212
1213static struct device_type wlan_type = {
1214 .name = "wlan",
1215};
1216
1217static struct device_type wwan_type = {
1218 .name = "wwan",
1219};
1220
1213int 1221int
1214usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1222usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1215{ 1223{
@@ -1295,6 +1303,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1295 /* WLAN devices should always be named "wlan%d" */ 1303 /* WLAN devices should always be named "wlan%d" */
1296 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1304 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1297 strcpy(net->name, "wlan%d"); 1305 strcpy(net->name, "wlan%d");
1306 /* WWAN devices should always be named "wwan%d" */
1307 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1308 strcpy(net->name, "wwan%d");
1298 1309
1299 /* maybe the remote can't receive an Ethernet MTU */ 1310 /* maybe the remote can't receive an Ethernet MTU */
1300 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1311 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
@@ -1322,6 +1333,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1322 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1333 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1323 1334
1324 SET_NETDEV_DEV(net, &udev->dev); 1335 SET_NETDEV_DEV(net, &udev->dev);
1336
1337 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1338 SET_NETDEV_DEVTYPE(net, &wlan_type);
1339 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1340 SET_NETDEV_DEVTYPE(net, &wwan_type);
1341
1325 status = register_netdev (net); 1342 status = register_netdev (net);
1326 if (status) 1343 if (status)
1327 goto out3; 1344 goto out3;
@@ -1335,9 +1352,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1335 // ok, it's ready to go. 1352 // ok, it's ready to go.
1336 usb_set_intfdata (udev, dev); 1353 usb_set_intfdata (udev, dev);
1337 1354
1338 // start as if the link is up
1339 netif_device_attach (net); 1355 netif_device_attach (net);
1340 1356
1357 if (dev->driver_info->flags & FLAG_LINK_INTR)
1358 netif_carrier_off(net);
1359
1341 return 0; 1360 return 0;
1342 1361
1343out3: 1362out3:
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 52af5017c46b..63099c58a6dd 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -155,8 +155,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
155 struct veth_net_stats *stats, *rcv_stats; 155 struct veth_net_stats *stats, *rcv_stats;
156 int length, cpu; 156 int length, cpu;
157 157
158 skb_orphan(skb);
159
160 priv = netdev_priv(dev); 158 priv = netdev_priv(dev);
161 rcv = priv->peer; 159 rcv = priv->peer;
162 rcv_priv = netdev_priv(rcv); 160 rcv_priv = netdev_priv(rcv);
@@ -168,20 +166,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
168 if (!(rcv->flags & IFF_UP)) 166 if (!(rcv->flags & IFF_UP))
169 goto tx_drop; 167 goto tx_drop;
170 168
171 if (skb->len > (rcv->mtu + MTU_PAD))
172 goto rx_drop;
173
174 skb->tstamp.tv64 = 0;
175 skb->pkt_type = PACKET_HOST;
176 skb->protocol = eth_type_trans(skb, rcv);
177 if (dev->features & NETIF_F_NO_CSUM) 169 if (dev->features & NETIF_F_NO_CSUM)
178 skb->ip_summed = rcv_priv->ip_summed; 170 skb->ip_summed = rcv_priv->ip_summed;
179 171
180 skb->mark = 0; 172 length = skb->len + ETH_HLEN;
181 secpath_reset(skb); 173 if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
182 nf_reset(skb); 174 goto rx_drop;
183
184 length = skb->len;
185 175
186 stats->tx_bytes += length; 176 stats->tx_bytes += length;
187 stats->tx_packets++; 177 stats->tx_packets++;
@@ -189,7 +179,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
189 rcv_stats->rx_bytes += length; 179 rcv_stats->rx_bytes += length;
190 rcv_stats->rx_packets++; 180 rcv_stats->rx_packets++;
191 181
192 netif_rx(skb);
193 return NETDEV_TX_OK; 182 return NETDEV_TX_OK;
194 183
195tx_drop: 184tx_drop:
@@ -337,7 +326,7 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
337 326
338static struct rtnl_link_ops veth_link_ops; 327static struct rtnl_link_ops veth_link_ops;
339 328
340static int veth_newlink(struct net_device *dev, 329static int veth_newlink(struct net *src_net, struct net_device *dev,
341 struct nlattr *tb[], struct nlattr *data[]) 330 struct nlattr *tb[], struct nlattr *data[])
342{ 331{
343 int err; 332 int err;
@@ -345,6 +334,7 @@ static int veth_newlink(struct net_device *dev,
345 struct veth_priv *priv; 334 struct veth_priv *priv;
346 char ifname[IFNAMSIZ]; 335 char ifname[IFNAMSIZ];
347 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; 336 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
337 struct net *net;
348 338
349 /* 339 /*
350 * create and register peer first 340 * create and register peer first
@@ -377,14 +367,22 @@ static int veth_newlink(struct net_device *dev,
377 else 367 else
378 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); 368 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
379 369
380 peer = rtnl_create_link(dev_net(dev), ifname, &veth_link_ops, tbp); 370 net = rtnl_link_get_net(src_net, tbp);
381 if (IS_ERR(peer)) 371 if (IS_ERR(net))
372 return PTR_ERR(net);
373
374 peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
375 if (IS_ERR(peer)) {
376 put_net(net);
382 return PTR_ERR(peer); 377 return PTR_ERR(peer);
378 }
383 379
384 if (tbp[IFLA_ADDRESS] == NULL) 380 if (tbp[IFLA_ADDRESS] == NULL)
385 random_ether_addr(peer->dev_addr); 381 random_ether_addr(peer->dev_addr);
386 382
387 err = register_netdevice(peer); 383 err = register_netdevice(peer);
384 put_net(net);
385 net = NULL;
388 if (err < 0) 386 if (err < 0)
389 goto err_register_peer; 387 goto err_register_peer;
390 388
@@ -439,7 +437,7 @@ err_register_peer:
439 return err; 437 return err;
440} 438}
441 439
442static void veth_dellink(struct net_device *dev) 440static void veth_dellink(struct net_device *dev, struct list_head *head)
443{ 441{
444 struct veth_priv *priv; 442 struct veth_priv *priv;
445 struct net_device *peer; 443 struct net_device *peer;
@@ -447,8 +445,8 @@ static void veth_dellink(struct net_device *dev)
447 priv = netdev_priv(dev); 445 priv = netdev_priv(dev);
448 peer = priv->peer; 446 peer = priv->peer;
449 447
450 unregister_netdevice(dev); 448 unregister_netdevice_queue(dev, head);
451 unregister_netdevice(peer); 449 unregister_netdevice_queue(peer, head);
452} 450}
453 451
454static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; 452static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 1fd70583be44..ec94ddf01f56 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1150,7 +1150,7 @@ static int rhine_open(struct net_device *dev)
1150 void __iomem *ioaddr = rp->base; 1150 void __iomem *ioaddr = rp->base;
1151 int rc; 1151 int rc;
1152 1152
1153 rc = request_irq(rp->pdev->irq, &rhine_interrupt, IRQF_SHARED, dev->name, 1153 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1154 dev); 1154 dev);
1155 if (rc) 1155 if (rc)
1156 return rc; 1156 return rc;
@@ -1484,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit)
1484 } 1484 }
1485 } 1485 }
1486 } else { 1486 } else {
1487 struct sk_buff *skb; 1487 struct sk_buff *skb = NULL;
1488 /* Length should omit the CRC */ 1488 /* Length should omit the CRC */
1489 int pkt_len = data_size - 4; 1489 int pkt_len = data_size - 4;
1490 1490
1491 /* Check if the packet is long enough to accept without 1491 /* Check if the packet is long enough to accept without
1492 copying to a minimally-sized skbuff. */ 1492 copying to a minimally-sized skbuff. */
1493 if (pkt_len < rx_copybreak && 1493 if (pkt_len < rx_copybreak)
1494 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { 1494 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1495 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ 1495 if (skb) {
1496 pci_dma_sync_single_for_cpu(rp->pdev, 1496 pci_dma_sync_single_for_cpu(rp->pdev,
1497 rp->rx_skbuff_dma[entry], 1497 rp->rx_skbuff_dma[entry],
1498 rp->rx_buf_sz, 1498 rp->rx_buf_sz,
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e04e5bee005c..d4eac2a14427 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -9,7 +9,6 @@
9 * 9 *
10 * TODO 10 * TODO
11 * rx_copybreak/alignment 11 * rx_copybreak/alignment
12 * Scatter gather
13 * More testing 12 * More testing
14 * 13 *
15 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk> 14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
@@ -275,7 +274,7 @@ VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
275 274
276#define DMA_LENGTH_MIN 0 275#define DMA_LENGTH_MIN 0
277#define DMA_LENGTH_MAX 7 276#define DMA_LENGTH_MAX 7
278#define DMA_LENGTH_DEF 0 277#define DMA_LENGTH_DEF 6
279 278
280/* DMA_length[] is used for controlling the DMA length 279/* DMA_length[] is used for controlling the DMA length
281 0: 8 DWORDs 280 0: 8 DWORDs
@@ -298,14 +297,6 @@ VELOCITY_PARAM(DMA_length, "DMA length");
298*/ 297*/
299VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); 298VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
300 299
301#define TX_CSUM_DEF 1
302/* txcsum_offload[] is used for setting the checksum offload ability of NIC.
303 (We only support RX checksum offload now)
304 0: disable csum_offload[checksum offload
305 1: enable checksum offload. (Default)
306*/
307VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload");
308
309#define FLOW_CNTL_DEF 1 300#define FLOW_CNTL_DEF 1
310#define FLOW_CNTL_MIN 1 301#define FLOW_CNTL_MIN 1
311#define FLOW_CNTL_MAX 5 302#define FLOW_CNTL_MAX 5
@@ -354,21 +345,10 @@ VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
354*/ 345*/
355VELOCITY_PARAM(wol_opts, "Wake On Lan options"); 346VELOCITY_PARAM(wol_opts, "Wake On Lan options");
356 347
357#define INT_WORKS_DEF 20
358#define INT_WORKS_MIN 10
359#define INT_WORKS_MAX 64
360
361VELOCITY_PARAM(int_works, "Number of packets per interrupt services");
362
363static int rx_copybreak = 200; 348static int rx_copybreak = 200;
364module_param(rx_copybreak, int, 0644); 349module_param(rx_copybreak, int, 0644);
365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
366 351
367#ifdef CONFIG_PM
368static DEFINE_SPINLOCK(velocity_dev_list_lock);
369static LIST_HEAD(velocity_dev_list);
370#endif
371
372/* 352/*
373 * Internal board variants. At the moment we have only one 353 * Internal board variants. At the moment we have only one
374 */ 354 */
@@ -417,14 +397,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
417 struct net_device *dev = pci_get_drvdata(pdev); 397 struct net_device *dev = pci_get_drvdata(pdev);
418 struct velocity_info *vptr = netdev_priv(dev); 398 struct velocity_info *vptr = netdev_priv(dev);
419 399
420#ifdef CONFIG_PM
421 unsigned long flags;
422
423 spin_lock_irqsave(&velocity_dev_list_lock, flags);
424 if (!list_empty(&velocity_dev_list))
425 list_del(&vptr->list);
426 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
427#endif
428 unregister_netdev(dev); 400 unregister_netdev(dev);
429 iounmap(vptr->mac_regs); 401 iounmap(vptr->mac_regs);
430 pci_release_regions(pdev); 402 pci_release_regions(pdev);
@@ -510,13 +482,11 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
510 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname); 482 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
511 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname); 483 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
512 484
513 velocity_set_bool_opt(&opts->flags, txcsum_offload[index], TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, "txcsum_offload", devname);
514 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname); 485 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
515 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname); 486 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
516 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname); 487 velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
517 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname); 488 velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
518 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname); 489 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
519 velocity_set_int_opt((int *) &opts->int_works, int_works[index], INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, "Interrupt service works", devname);
520 opts->numrx = (opts->numrx & ~3); 490 opts->numrx = (opts->numrx & ~3);
521} 491}
522 492
@@ -1259,6 +1229,66 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1259 } 1229 }
1260} 1230}
1261 1231
1232/**
1233 * setup_queue_timers - Setup interrupt timers
1234 *
1235 * Setup interrupt frequency during suppression (timeout if the frame
1236 * count isn't filled).
1237 */
1238static void setup_queue_timers(struct velocity_info *vptr)
1239{
1240 /* Only for newer revisions */
1241 if (vptr->rev_id >= REV_ID_VT3216_A0) {
1242 u8 txqueue_timer = 0;
1243 u8 rxqueue_timer = 0;
1244
1245 if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1246 VELOCITY_SPEED_100)) {
1247 txqueue_timer = vptr->options.txqueue_timer;
1248 rxqueue_timer = vptr->options.rxqueue_timer;
1249 }
1250
1251 writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1252 writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1253 }
1254}
1255/**
1256 * setup_adaptive_interrupts - Setup interrupt suppression
1257 *
1258 * @vptr velocity adapter
1259 *
1260 * The velocity is able to suppress interrupt during high interrupt load.
1261 * This function turns on that feature.
1262 */
1263static void setup_adaptive_interrupts(struct velocity_info *vptr)
1264{
1265 struct mac_regs __iomem *regs = vptr->mac_regs;
1266 u16 tx_intsup = vptr->options.tx_intsup;
1267 u16 rx_intsup = vptr->options.rx_intsup;
1268
1269 /* Setup default interrupt mask (will be changed below) */
1270 vptr->int_mask = INT_MASK_DEF;
1271
1272 /* Set Tx Interrupt Suppression Threshold */
1273 writeb(CAMCR_PS0, &regs->CAMCR);
1274 if (tx_intsup != 0) {
1275 vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1276 ISR_PTX2I | ISR_PTX3I);
1277 writew(tx_intsup, &regs->ISRCTL);
1278 } else
1279 writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1280
1281 /* Set Rx Interrupt Suppression Threshold */
1282 writeb(CAMCR_PS1, &regs->CAMCR);
1283 if (rx_intsup != 0) {
1284 vptr->int_mask &= ~ISR_PRXI;
1285 writew(rx_intsup, &regs->ISRCTL);
1286 } else
1287 writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1288
1289 /* Select page to interrupt hold timer */
1290 writeb(0, &regs->CAMCR);
1291}
1262 1292
1263/** 1293/**
1264 * velocity_init_registers - initialise MAC registers 1294 * velocity_init_registers - initialise MAC registers
@@ -1345,7 +1375,7 @@ static void velocity_init_registers(struct velocity_info *vptr,
1345 */ 1375 */
1346 enable_mii_autopoll(regs); 1376 enable_mii_autopoll(regs);
1347 1377
1348 vptr->int_mask = INT_MASK_DEF; 1378 setup_adaptive_interrupts(vptr);
1349 1379
1350 writel(vptr->rx.pool_dma, &regs->RDBaseLo); 1380 writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1351 writew(vptr->options.numrx - 1, &regs->RDCSize); 1381 writew(vptr->options.numrx - 1, &regs->RDCSize);
@@ -1483,7 +1513,8 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1483 * Do the gymnastics to get the buffer head for data at 1513 * Do the gymnastics to get the buffer head for data at
1484 * 64byte alignment. 1514 * 64byte alignment.
1485 */ 1515 */
1486 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1516 skb_reserve(rd_info->skb,
1517 64 - ((unsigned long) rd_info->skb->data & 63));
1487 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1518 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1488 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1519 vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1489 1520
@@ -1602,12 +1633,10 @@ out:
1602 */ 1633 */
1603static int velocity_init_td_ring(struct velocity_info *vptr) 1634static int velocity_init_td_ring(struct velocity_info *vptr)
1604{ 1635{
1605 dma_addr_t curr;
1606 int j; 1636 int j;
1607 1637
1608 /* Init the TD ring entries */ 1638 /* Init the TD ring entries */
1609 for (j = 0; j < vptr->tx.numq; j++) { 1639 for (j = 0; j < vptr->tx.numq; j++) {
1610 curr = vptr->tx.pool_dma[j];
1611 1640
1612 vptr->tx.infos[j] = kcalloc(vptr->options.numtx, 1641 vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1613 sizeof(struct velocity_td_info), 1642 sizeof(struct velocity_td_info),
@@ -1673,21 +1702,27 @@ err_free_dma_rings_0:
1673 * Release an transmit buffer. If the buffer was preallocated then 1702 * Release an transmit buffer. If the buffer was preallocated then
1674 * recycle it, if not then unmap the buffer. 1703 * recycle it, if not then unmap the buffer.
1675 */ 1704 */
1676static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_info *tdinfo) 1705static void velocity_free_tx_buf(struct velocity_info *vptr,
1706 struct velocity_td_info *tdinfo, struct tx_desc *td)
1677{ 1707{
1678 struct sk_buff *skb = tdinfo->skb; 1708 struct sk_buff *skb = tdinfo->skb;
1679 int i;
1680 int pktlen;
1681 1709
1682 /* 1710 /*
1683 * Don't unmap the pre-allocated tx_bufs 1711 * Don't unmap the pre-allocated tx_bufs
1684 */ 1712 */
1685 if (tdinfo->skb_dma) { 1713 if (tdinfo->skb_dma) {
1714 int i;
1686 1715
1687 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
1688 for (i = 0; i < tdinfo->nskb_dma; i++) { 1716 for (i = 0; i < tdinfo->nskb_dma; i++) {
1689 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE); 1717 size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1690 tdinfo->skb_dma[i] = 0; 1718
1719 /* For scatter-gather */
1720 if (skb_shinfo(skb)->nr_frags > 0)
1721 pktlen = max_t(size_t, pktlen,
1722 td->td_buf[i].size & ~TD_QUEUE);
1723
1724 pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1725 le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1691 } 1726 }
1692 } 1727 }
1693 dev_kfree_skb_irq(skb); 1728 dev_kfree_skb_irq(skb);
@@ -1801,6 +1836,8 @@ static void velocity_error(struct velocity_info *vptr, int status)
1801 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); 1836 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1802 else 1837 else
1803 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG); 1838 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1839
1840 setup_queue_timers(vptr);
1804 } 1841 }
1805 /* 1842 /*
1806 * Get link status from PHYSR0 1843 * Get link status from PHYSR0
@@ -1887,7 +1924,7 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status)
1887 stats->tx_packets++; 1924 stats->tx_packets++;
1888 stats->tx_bytes += tdinfo->skb->len; 1925 stats->tx_bytes += tdinfo->skb->len;
1889 } 1926 }
1890 velocity_free_tx_buf(vptr, tdinfo); 1927 velocity_free_tx_buf(vptr, tdinfo, td);
1891 vptr->tx.used[qnum]--; 1928 vptr->tx.used[qnum]--;
1892 } 1929 }
1893 vptr->tx.tail[qnum] = idx; 1930 vptr->tx.tail[qnum] = idx;
@@ -1949,10 +1986,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1949 if (pkt_size < rx_copybreak) { 1986 if (pkt_size < rx_copybreak) {
1950 struct sk_buff *new_skb; 1987 struct sk_buff *new_skb;
1951 1988
1952 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); 1989 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1953 if (new_skb) { 1990 if (new_skb) {
1954 new_skb->ip_summed = rx_skb[0]->ip_summed; 1991 new_skb->ip_summed = rx_skb[0]->ip_summed;
1955 skb_reserve(new_skb, 2);
1956 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); 1992 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1957 *rx_skb = new_skb; 1993 *rx_skb = new_skb;
1958 ret = 0; 1994 ret = 0;
@@ -2060,13 +2096,14 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2060 * any received packets from the receive queue. Hand the ring 2096 * any received packets from the receive queue. Hand the ring
2061 * slots back to the adapter for reuse. 2097 * slots back to the adapter for reuse.
2062 */ 2098 */
2063static int velocity_rx_srv(struct velocity_info *vptr, int status) 2099static int velocity_rx_srv(struct velocity_info *vptr, int status,
2100 int budget_left)
2064{ 2101{
2065 struct net_device_stats *stats = &vptr->dev->stats; 2102 struct net_device_stats *stats = &vptr->dev->stats;
2066 int rd_curr = vptr->rx.curr; 2103 int rd_curr = vptr->rx.curr;
2067 int works = 0; 2104 int works = 0;
2068 2105
2069 do { 2106 while (works < budget_left) {
2070 struct rx_desc *rd = vptr->rx.ring + rd_curr; 2107 struct rx_desc *rd = vptr->rx.ring + rd_curr;
2071 2108
2072 if (!vptr->rx.info[rd_curr].skb) 2109 if (!vptr->rx.info[rd_curr].skb)
@@ -2097,7 +2134,8 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
2097 rd_curr++; 2134 rd_curr++;
2098 if (rd_curr >= vptr->options.numrx) 2135 if (rd_curr >= vptr->options.numrx)
2099 rd_curr = 0; 2136 rd_curr = 0;
2100 } while (++works <= 15); 2137 works++;
2138 }
2101 2139
2102 vptr->rx.curr = rd_curr; 2140 vptr->rx.curr = rd_curr;
2103 2141
@@ -2108,6 +2146,40 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status)
2108 return works; 2146 return works;
2109} 2147}
2110 2148
2149static int velocity_poll(struct napi_struct *napi, int budget)
2150{
2151 struct velocity_info *vptr = container_of(napi,
2152 struct velocity_info, napi);
2153 unsigned int rx_done;
2154 u32 isr_status;
2155
2156 spin_lock(&vptr->lock);
2157 isr_status = mac_read_isr(vptr->mac_regs);
2158
2159 /* Ack the interrupt */
2160 mac_write_isr(vptr->mac_regs, isr_status);
2161 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2162 velocity_error(vptr, isr_status);
2163
2164 /*
2165 * Do rx and tx twice for performance (taken from the VIA
2166 * out-of-tree driver).
2167 */
2168 rx_done = velocity_rx_srv(vptr, isr_status, budget / 2);
2169 velocity_tx_srv(vptr, isr_status);
2170 rx_done += velocity_rx_srv(vptr, isr_status, budget - rx_done);
2171 velocity_tx_srv(vptr, isr_status);
2172
2173 spin_unlock(&vptr->lock);
2174
2175 /* If budget not fully consumed, exit the polling mode */
2176 if (rx_done < budget) {
2177 napi_complete(napi);
2178 mac_enable_int(vptr->mac_regs);
2179 }
2180
2181 return rx_done;
2182}
2111 2183
2112/** 2184/**
2113 * velocity_intr - interrupt callback 2185 * velocity_intr - interrupt callback
@@ -2124,8 +2196,6 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2124 struct net_device *dev = dev_instance; 2196 struct net_device *dev = dev_instance;
2125 struct velocity_info *vptr = netdev_priv(dev); 2197 struct velocity_info *vptr = netdev_priv(dev);
2126 u32 isr_status; 2198 u32 isr_status;
2127 int max_count = 0;
2128
2129 2199
2130 spin_lock(&vptr->lock); 2200 spin_lock(&vptr->lock);
2131 isr_status = mac_read_isr(vptr->mac_regs); 2201 isr_status = mac_read_isr(vptr->mac_regs);
@@ -2136,32 +2206,13 @@ static irqreturn_t velocity_intr(int irq, void *dev_instance)
2136 return IRQ_NONE; 2206 return IRQ_NONE;
2137 } 2207 }
2138 2208
2139 mac_disable_int(vptr->mac_regs); 2209 if (likely(napi_schedule_prep(&vptr->napi))) {
2140 2210 mac_disable_int(vptr->mac_regs);
2141 /* 2211 __napi_schedule(&vptr->napi);
2142 * Keep processing the ISR until we have completed
2143 * processing and the isr_status becomes zero
2144 */
2145
2146 while (isr_status != 0) {
2147 mac_write_isr(vptr->mac_regs, isr_status);
2148 if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2149 velocity_error(vptr, isr_status);
2150 if (isr_status & (ISR_PRXI | ISR_PPRXI))
2151 max_count += velocity_rx_srv(vptr, isr_status);
2152 if (isr_status & (ISR_PTXI | ISR_PPTXI))
2153 max_count += velocity_tx_srv(vptr, isr_status);
2154 isr_status = mac_read_isr(vptr->mac_regs);
2155 if (max_count > vptr->options.int_works) {
2156 printk(KERN_WARNING "%s: excessive work at interrupt.\n",
2157 dev->name);
2158 max_count = 0;
2159 }
2160 } 2212 }
2161 spin_unlock(&vptr->lock); 2213 spin_unlock(&vptr->lock);
2162 mac_enable_int(vptr->mac_regs);
2163 return IRQ_HANDLED;
2164 2214
2215 return IRQ_HANDLED;
2165} 2216}
2166 2217
2167/** 2218/**
@@ -2190,7 +2241,7 @@ static int velocity_open(struct net_device *dev)
2190 2241
2191 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2242 velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2192 2243
2193 ret = request_irq(vptr->pdev->irq, &velocity_intr, IRQF_SHARED, 2244 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2194 dev->name, dev); 2245 dev->name, dev);
2195 if (ret < 0) { 2246 if (ret < 0) {
2196 /* Power down the chip */ 2247 /* Power down the chip */
@@ -2201,6 +2252,7 @@ static int velocity_open(struct net_device *dev)
2201 2252
2202 mac_enable_int(vptr->mac_regs); 2253 mac_enable_int(vptr->mac_regs);
2203 netif_start_queue(dev); 2254 netif_start_queue(dev);
2255 napi_enable(&vptr->napi);
2204 vptr->flags |= VELOCITY_FLAGS_OPENED; 2256 vptr->flags |= VELOCITY_FLAGS_OPENED;
2205out: 2257out:
2206 return ret; 2258 return ret;
@@ -2436,6 +2488,7 @@ static int velocity_close(struct net_device *dev)
2436{ 2488{
2437 struct velocity_info *vptr = netdev_priv(dev); 2489 struct velocity_info *vptr = netdev_priv(dev);
2438 2490
2491 napi_disable(&vptr->napi);
2439 netif_stop_queue(dev); 2492 netif_stop_queue(dev);
2440 velocity_shutdown(vptr); 2493 velocity_shutdown(vptr);
2441 2494
@@ -2470,14 +2523,22 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2470 struct velocity_td_info *tdinfo; 2523 struct velocity_td_info *tdinfo;
2471 unsigned long flags; 2524 unsigned long flags;
2472 int pktlen; 2525 int pktlen;
2473 __le16 len; 2526 int index, prev;
2474 int index; 2527 int i = 0;
2475 2528
2476 if (skb_padto(skb, ETH_ZLEN)) 2529 if (skb_padto(skb, ETH_ZLEN))
2477 goto out; 2530 goto out;
2478 pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
2479 2531
2480 len = cpu_to_le16(pktlen); 2532 /* The hardware can handle at most 7 memory segments, so merge
2533 * the skb if there are more */
2534 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2535 kfree_skb(skb);
2536 return NETDEV_TX_OK;
2537 }
2538
2539 pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2540 max_t(unsigned int, skb->len, ETH_ZLEN) :
2541 skb_headlen(skb);
2481 2542
2482 spin_lock_irqsave(&vptr->lock, flags); 2543 spin_lock_irqsave(&vptr->lock, flags);
2483 2544
@@ -2494,11 +2555,24 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2494 */ 2555 */
2495 tdinfo->skb = skb; 2556 tdinfo->skb = skb;
2496 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2557 tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2497 td_ptr->tdesc0.len = len; 2558 td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2498 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2559 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2499 td_ptr->td_buf[0].pa_high = 0; 2560 td_ptr->td_buf[0].pa_high = 0;
2500 td_ptr->td_buf[0].size = len; 2561 td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2501 tdinfo->nskb_dma = 1; 2562
2563 /* Handle fragments */
2564 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2565 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2566
2567 tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2568 frag->page_offset, frag->size,
2569 PCI_DMA_TODEVICE);
2570
2571 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2572 td_ptr->td_buf[i + 1].pa_high = 0;
2573 td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2574 }
2575 tdinfo->nskb_dma = i + 1;
2502 2576
2503 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16; 2577 td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2504 2578
@@ -2510,7 +2584,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2510 /* 2584 /*
2511 * Handle hardware checksum 2585 * Handle hardware checksum
2512 */ 2586 */
2513 if ((vptr->flags & VELOCITY_FLAGS_TX_CSUM) 2587 if ( (dev->features & NETIF_F_IP_CSUM)
2514 && (skb->ip_summed == CHECKSUM_PARTIAL)) { 2588 && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2515 const struct iphdr *ip = ip_hdr(skb); 2589 const struct iphdr *ip = ip_hdr(skb);
2516 if (ip->protocol == IPPROTO_TCP) 2590 if (ip->protocol == IPPROTO_TCP)
@@ -2519,23 +2593,21 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2519 td_ptr->tdesc1.TCR |= (TCR0_UDPCK); 2593 td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2520 td_ptr->tdesc1.TCR |= TCR0_IPCK; 2594 td_ptr->tdesc1.TCR |= TCR0_IPCK;
2521 } 2595 }
2522 {
2523 2596
2524 int prev = index - 1; 2597 prev = index - 1;
2598 if (prev < 0)
2599 prev = vptr->options.numtx - 1;
2600 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2601 vptr->tx.used[qnum]++;
2602 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2525 2603
2526 if (prev < 0) 2604 if (AVAIL_TD(vptr, qnum) < 1)
2527 prev = vptr->options.numtx - 1; 2605 netif_stop_queue(dev);
2528 td_ptr->tdesc0.len |= OWNED_BY_NIC;
2529 vptr->tx.used[qnum]++;
2530 vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2531 2606
2532 if (AVAIL_TD(vptr, qnum) < 1) 2607 td_ptr = &(vptr->tx.rings[qnum][prev]);
2533 netif_stop_queue(dev); 2608 td_ptr->td_buf[0].size |= TD_QUEUE;
2609 mac_tx_queue_wake(vptr->mac_regs, qnum);
2534 2610
2535 td_ptr = &(vptr->tx.rings[qnum][prev]);
2536 td_ptr->td_buf[0].size |= TD_QUEUE;
2537 mac_tx_queue_wake(vptr->mac_regs, qnum);
2538 }
2539 dev->trans_start = jiffies; 2611 dev->trans_start = jiffies;
2540 spin_unlock_irqrestore(&vptr->lock, flags); 2612 spin_unlock_irqrestore(&vptr->lock, flags);
2541out: 2613out:
@@ -2578,7 +2650,6 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
2578 vptr->tx.numq = info->txqueue; 2650 vptr->tx.numq = info->txqueue;
2579 vptr->multicast_limit = MCAM_SIZE; 2651 vptr->multicast_limit = MCAM_SIZE;
2580 spin_lock_init(&vptr->lock); 2652 spin_lock_init(&vptr->lock);
2581 INIT_LIST_HEAD(&vptr->list);
2582} 2653}
2583 2654
2584/** 2655/**
@@ -2755,12 +2826,10 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2755 dev->irq = pdev->irq; 2826 dev->irq = pdev->irq;
2756 dev->netdev_ops = &velocity_netdev_ops; 2827 dev->netdev_ops = &velocity_netdev_ops;
2757 dev->ethtool_ops = &velocity_ethtool_ops; 2828 dev->ethtool_ops = &velocity_ethtool_ops;
2829 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2758 2830
2759 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | 2831 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2760 NETIF_F_HW_VLAN_RX; 2832 NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2761
2762 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM)
2763 dev->features |= NETIF_F_IP_CSUM;
2764 2833
2765 ret = register_netdev(dev); 2834 ret = register_netdev(dev);
2766 if (ret < 0) 2835 if (ret < 0)
@@ -2777,15 +2846,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2777 /* and leave the chip powered down */ 2846 /* and leave the chip powered down */
2778 2847
2779 pci_set_power_state(pdev, PCI_D3hot); 2848 pci_set_power_state(pdev, PCI_D3hot);
2780#ifdef CONFIG_PM
2781 {
2782 unsigned long flags;
2783
2784 spin_lock_irqsave(&velocity_dev_list_lock, flags);
2785 list_add(&vptr->list, &velocity_dev_list);
2786 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
2787 }
2788#endif
2789 velocity_nics++; 2849 velocity_nics++;
2790out: 2850out:
2791 return ret; 2851 return ret;
@@ -3222,15 +3282,114 @@ static void velocity_set_msglevel(struct net_device *dev, u32 value)
3222 msglevel = value; 3282 msglevel = value;
3223} 3283}
3224 3284
3285static int get_pending_timer_val(int val)
3286{
3287 int mult_bits = val >> 6;
3288 int mult = 1;
3289
3290 switch (mult_bits)
3291 {
3292 case 1:
3293 mult = 4; break;
3294 case 2:
3295 mult = 16; break;
3296 case 3:
3297 mult = 64; break;
3298 case 0:
3299 default:
3300 break;
3301 }
3302
3303 return (val & 0x3f) * mult;
3304}
3305
3306static void set_pending_timer_val(int *val, u32 us)
3307{
3308 u8 mult = 0;
3309 u8 shift = 0;
3310
3311 if (us >= 0x3f) {
3312 mult = 1; /* mult with 4 */
3313 shift = 2;
3314 }
3315 if (us >= 0x3f * 4) {
3316 mult = 2; /* mult with 16 */
3317 shift = 4;
3318 }
3319 if (us >= 0x3f * 16) {
3320 mult = 3; /* mult with 64 */
3321 shift = 6;
3322 }
3323
3324 *val = (mult << 6) | ((us >> shift) & 0x3f);
3325}
3326
3327
3328static int velocity_get_coalesce(struct net_device *dev,
3329 struct ethtool_coalesce *ecmd)
3330{
3331 struct velocity_info *vptr = netdev_priv(dev);
3332
3333 ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3334 ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3335
3336 ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3337 ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3338
3339 return 0;
3340}
3341
3342static int velocity_set_coalesce(struct net_device *dev,
3343 struct ethtool_coalesce *ecmd)
3344{
3345 struct velocity_info *vptr = netdev_priv(dev);
3346 int max_us = 0x3f * 64;
3347
3348 /* 6 bits of */
3349 if (ecmd->tx_coalesce_usecs > max_us)
3350 return -EINVAL;
3351 if (ecmd->rx_coalesce_usecs > max_us)
3352 return -EINVAL;
3353
3354 if (ecmd->tx_max_coalesced_frames > 0xff)
3355 return -EINVAL;
3356 if (ecmd->rx_max_coalesced_frames > 0xff)
3357 return -EINVAL;
3358
3359 vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3360 vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3361
3362 set_pending_timer_val(&vptr->options.rxqueue_timer,
3363 ecmd->rx_coalesce_usecs);
3364 set_pending_timer_val(&vptr->options.txqueue_timer,
3365 ecmd->tx_coalesce_usecs);
3366
3367 /* Setup the interrupt suppression and queue timers */
3368 mac_disable_int(vptr->mac_regs);
3369 setup_adaptive_interrupts(vptr);
3370 setup_queue_timers(vptr);
3371
3372 mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3373 mac_clear_isr(vptr->mac_regs);
3374 mac_enable_int(vptr->mac_regs);
3375
3376 return 0;
3377}
3378
3225static const struct ethtool_ops velocity_ethtool_ops = { 3379static const struct ethtool_ops velocity_ethtool_ops = {
3226 .get_settings = velocity_get_settings, 3380 .get_settings = velocity_get_settings,
3227 .set_settings = velocity_set_settings, 3381 .set_settings = velocity_set_settings,
3228 .get_drvinfo = velocity_get_drvinfo, 3382 .get_drvinfo = velocity_get_drvinfo,
3383 .set_tx_csum = ethtool_op_set_tx_csum,
3384 .get_tx_csum = ethtool_op_get_tx_csum,
3229 .get_wol = velocity_ethtool_get_wol, 3385 .get_wol = velocity_ethtool_get_wol,
3230 .set_wol = velocity_ethtool_set_wol, 3386 .set_wol = velocity_ethtool_set_wol,
3231 .get_msglevel = velocity_get_msglevel, 3387 .get_msglevel = velocity_get_msglevel,
3232 .set_msglevel = velocity_set_msglevel, 3388 .set_msglevel = velocity_set_msglevel,
3389 .set_sg = ethtool_op_set_sg,
3233 .get_link = velocity_get_link, 3390 .get_link = velocity_get_link,
3391 .get_coalesce = velocity_get_coalesce,
3392 .set_coalesce = velocity_set_coalesce,
3234 .begin = velocity_ethtool_up, 3393 .begin = velocity_ethtool_up,
3235 .complete = velocity_ethtool_down 3394 .complete = velocity_ethtool_down
3236}; 3395};
@@ -3241,20 +3400,10 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
3241{ 3400{
3242 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3401 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3243 struct net_device *dev = ifa->ifa_dev->dev; 3402 struct net_device *dev = ifa->ifa_dev->dev;
3244 struct velocity_info *vptr;
3245 unsigned long flags;
3246 3403
3247 if (dev_net(dev) != &init_net) 3404 if (dev_net(dev) == &init_net &&
3248 return NOTIFY_DONE; 3405 dev->netdev_ops == &velocity_netdev_ops)
3249 3406 velocity_get_ip(netdev_priv(dev));
3250 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3251 list_for_each_entry(vptr, &velocity_dev_list, list) {
3252 if (vptr->dev == dev) {
3253 velocity_get_ip(vptr);
3254 break;
3255 }
3256 }
3257 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3258 3407
3259 return NOTIFY_DONE; 3408 return NOTIFY_DONE;
3260} 3409}
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 2f00c13ab502..ef4a0f64ba16 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -29,9 +29,10 @@
29 29
30#define VELOCITY_NAME "via-velocity" 30#define VELOCITY_NAME "via-velocity"
31#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver" 31#define VELOCITY_FULL_DRV_NAM "VIA Networking Velocity Family Gigabit Ethernet Adapter Driver"
32#define VELOCITY_VERSION "1.14" 32#define VELOCITY_VERSION "1.15"
33 33
34#define VELOCITY_IO_SIZE 256 34#define VELOCITY_IO_SIZE 256
35#define VELOCITY_NAPI_WEIGHT 64
35 36
36#define PKT_BUF_SZ 1540 37#define PKT_BUF_SZ 1540
37 38
@@ -1005,7 +1006,8 @@ struct mac_regs {
1005 1006
1006 volatile __le32 RDBaseLo; /* 0x38 */ 1007 volatile __le32 RDBaseLo; /* 0x38 */
1007 volatile __le16 RDIdx; /* 0x3C */ 1008 volatile __le16 RDIdx; /* 0x3C */
1008 volatile __le16 reserved_3E; 1009 volatile u8 TQETMR; /* 0x3E, VT3216 and above only */
1010 volatile u8 RQETMR; /* 0x3F, VT3216 and above only */
1009 1011
1010 volatile __le32 TDBaseLo[4]; /* 0x40 */ 1012 volatile __le32 TDBaseLo[4]; /* 0x40 */
1011 1013
@@ -1421,7 +1423,6 @@ enum velocity_msg_level {
1421 */ 1423 */
1422 1424
1423#define VELOCITY_FLAGS_TAGGING 0x00000001UL 1425#define VELOCITY_FLAGS_TAGGING 0x00000001UL
1424#define VELOCITY_FLAGS_TX_CSUM 0x00000002UL
1425#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL 1426#define VELOCITY_FLAGS_RX_CSUM 0x00000004UL
1426#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL 1427#define VELOCITY_FLAGS_IP_ALIGN 0x00000008UL
1427#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL 1428#define VELOCITY_FLAGS_VAL_PKT_LEN 0x00000010UL
@@ -1491,6 +1492,10 @@ struct velocity_opt {
1491 int rx_bandwidth_hi; 1492 int rx_bandwidth_hi;
1492 int rx_bandwidth_lo; 1493 int rx_bandwidth_lo;
1493 int rx_bandwidth_en; 1494 int rx_bandwidth_en;
1495 int rxqueue_timer;
1496 int txqueue_timer;
1497 int tx_intsup;
1498 int rx_intsup;
1494 u32 flags; 1499 u32 flags;
1495}; 1500};
1496 1501
@@ -1499,8 +1504,6 @@ struct velocity_opt {
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1504#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500 1505
1501struct velocity_info { 1506struct velocity_info {
1502 struct list_head list;
1503
1504 struct pci_dev *pdev; 1507 struct pci_dev *pdev;
1505 struct net_device *dev; 1508 struct net_device *dev;
1506 1509
@@ -1559,6 +1562,8 @@ struct velocity_info {
1559 u32 ticks; 1562 u32 ticks;
1560 1563
1561 u8 rev_id; 1564 u8 rev_id;
1565
1566 struct napi_struct napi;
1562}; 1567};
1563 1568
1564/** 1569/**
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b9e002fccbca..74636c5c41f0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -282,13 +282,12 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
282 do { 282 do {
283 struct skb_vnet_hdr *hdr; 283 struct skb_vnet_hdr *hdr;
284 284
285 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 285 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
286 if (unlikely(!skb)) { 286 if (unlikely(!skb)) {
287 oom = true; 287 oom = true;
288 break; 288 break;
289 } 289 }
290 290
291 skb_reserve(skb, NET_IP_ALIGN);
292 skb_put(skb, MAX_PACKET_LEN); 291 skb_put(skb, MAX_PACKET_LEN);
293 292
294 hdr = skb_vnet_hdr(skb); 293 hdr = skb_vnet_hdr(skb);
@@ -343,14 +342,12 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
343 do { 342 do {
344 skb_frag_t *f; 343 skb_frag_t *f;
345 344
346 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 345 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
347 if (unlikely(!skb)) { 346 if (unlikely(!skb)) {
348 oom = true; 347 oom = true;
349 break; 348 break;
350 } 349 }
351 350
352 skb_reserve(skb, NET_IP_ALIGN);
353
354 f = &skb_shinfo(skb)->frags[0]; 351 f = &skb_shinfo(skb)->frags[0];
355 f->page = get_a_page(vi, gfp); 352 f->page = get_a_page(vi, gfp);
356 if (!f->page) { 353 if (!f->page) {
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index dc8ee4438a4f..b4889e6c4a57 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -90,23 +90,60 @@ enum {
90 VMXNET3_CMD_GET_CONF_INTR 90 VMXNET3_CMD_GET_CONF_INTR
91}; 91};
92 92
93struct Vmxnet3_TxDesc { 93/*
94 u64 addr; 94 * Little Endian layout of bitfields -
95 * Byte 0 : 7.....len.....0
96 * Byte 1 : rsvd gen 13.len.8
97 * Byte 2 : 5.msscof.0 ext1 dtype
98 * Byte 3 : 13...msscof...6
99 *
100 * Big Endian layout of bitfields -
101 * Byte 0: 13...msscof...6
102 * Byte 1 : 5.msscof.0 ext1 dtype
103 * Byte 2 : rsvd gen 13.len.8
104 * Byte 3 : 7.....len.....0
105 *
106 * Thus, le32_to_cpu on the dword will allow the big endian driver to read
107 * the bit fields correctly. And cpu_to_le32 will convert bitfields
108 * bit fields written by big endian driver to format required by device.
109 */
95 110
96 u32 len:14; 111struct Vmxnet3_TxDesc {
97 u32 gen:1; /* generation bit */ 112 __le64 addr;
98 u32 rsvd:1; 113
99 u32 dtype:1; /* descriptor type */ 114#ifdef __BIG_ENDIAN_BITFIELD
100 u32 ext1:1; 115 u32 msscof:14; /* MSS, checksum offset, flags */
101 u32 msscof:14; /* MSS, checksum offset, flags */ 116 u32 ext1:1;
102 117 u32 dtype:1; /* descriptor type */
103 u32 hlen:10; /* header len */ 118 u32 rsvd:1;
104 u32 om:2; /* offload mode */ 119 u32 gen:1; /* generation bit */
105 u32 eop:1; /* End Of Packet */ 120 u32 len:14;
106 u32 cq:1; /* completion request */ 121#else
107 u32 ext2:1; 122 u32 len:14;
108 u32 ti:1; /* VLAN Tag Insertion */ 123 u32 gen:1; /* generation bit */
109 u32 tci:16; /* Tag to Insert */ 124 u32 rsvd:1;
125 u32 dtype:1; /* descriptor type */
126 u32 ext1:1;
127 u32 msscof:14; /* MSS, checksum offset, flags */
128#endif /* __BIG_ENDIAN_BITFIELD */
129
130#ifdef __BIG_ENDIAN_BITFIELD
131 u32 tci:16; /* Tag to Insert */
132 u32 ti:1; /* VLAN Tag Insertion */
133 u32 ext2:1;
134 u32 cq:1; /* completion request */
135 u32 eop:1; /* End Of Packet */
136 u32 om:2; /* offload mode */
137 u32 hlen:10; /* header len */
138#else
139 u32 hlen:10; /* header len */
140 u32 om:2; /* offload mode */
141 u32 eop:1; /* End Of Packet */
142 u32 cq:1; /* completion request */
143 u32 ext2:1;
144 u32 ti:1; /* VLAN Tag Insertion */
145 u32 tci:16; /* Tag to Insert */
146#endif /* __BIG_ENDIAN_BITFIELD */
110}; 147};
111 148
112/* TxDesc.OM values */ 149/* TxDesc.OM values */
@@ -118,6 +155,8 @@ struct Vmxnet3_TxDesc {
118#define VMXNET3_TXD_EOP_SHIFT 12 155#define VMXNET3_TXD_EOP_SHIFT 12
119#define VMXNET3_TXD_CQ_SHIFT 13 156#define VMXNET3_TXD_CQ_SHIFT 13
120#define VMXNET3_TXD_GEN_SHIFT 14 157#define VMXNET3_TXD_GEN_SHIFT 14
158#define VMXNET3_TXD_EOP_DWORD_SHIFT 3
159#define VMXNET3_TXD_GEN_DWORD_SHIFT 2
121 160
122#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) 161#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT)
123#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) 162#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT)
@@ -130,29 +169,40 @@ struct Vmxnet3_TxDataDesc {
130 u8 data[VMXNET3_HDR_COPY_SIZE]; 169 u8 data[VMXNET3_HDR_COPY_SIZE];
131}; 170};
132 171
172#define VMXNET3_TCD_GEN_SHIFT 31
173#define VMXNET3_TCD_GEN_SIZE 1
174#define VMXNET3_TCD_TXIDX_SHIFT 0
175#define VMXNET3_TCD_TXIDX_SIZE 12
176#define VMXNET3_TCD_GEN_DWORD_SHIFT 3
133 177
134struct Vmxnet3_TxCompDesc { 178struct Vmxnet3_TxCompDesc {
135 u32 txdIdx:12; /* Index of the EOP TxDesc */ 179 u32 txdIdx:12; /* Index of the EOP TxDesc */
136 u32 ext1:20; 180 u32 ext1:20;
137 181
138 u32 ext2; 182 __le32 ext2;
139 u32 ext3; 183 __le32 ext3;
140 184
141 u32 rsvd:24; 185 u32 rsvd:24;
142 u32 type:7; /* completion type */ 186 u32 type:7; /* completion type */
143 u32 gen:1; /* generation bit */ 187 u32 gen:1; /* generation bit */
144}; 188};
145 189
146
147struct Vmxnet3_RxDesc { 190struct Vmxnet3_RxDesc {
148 u64 addr; 191 __le64 addr;
149 192
193#ifdef __BIG_ENDIAN_BITFIELD
194 u32 gen:1; /* Generation bit */
195 u32 rsvd:15;
196 u32 dtype:1; /* Descriptor type */
197 u32 btype:1; /* Buffer Type */
198 u32 len:14;
199#else
150 u32 len:14; 200 u32 len:14;
151 u32 btype:1; /* Buffer Type */ 201 u32 btype:1; /* Buffer Type */
152 u32 dtype:1; /* Descriptor type */ 202 u32 dtype:1; /* Descriptor type */
153 u32 rsvd:15; 203 u32 rsvd:15;
154 u32 gen:1; /* Generation bit */ 204 u32 gen:1; /* Generation bit */
155 205#endif
156 u32 ext1; 206 u32 ext1;
157}; 207};
158 208
@@ -164,8 +214,17 @@ struct Vmxnet3_RxDesc {
164#define VMXNET3_RXD_BTYPE_SHIFT 14 214#define VMXNET3_RXD_BTYPE_SHIFT 14
165#define VMXNET3_RXD_GEN_SHIFT 31 215#define VMXNET3_RXD_GEN_SHIFT 31
166 216
167
168struct Vmxnet3_RxCompDesc { 217struct Vmxnet3_RxCompDesc {
218#ifdef __BIG_ENDIAN_BITFIELD
219 u32 ext2:1;
220 u32 cnc:1; /* Checksum Not Calculated */
221 u32 rssType:4; /* RSS hash type used */
222 u32 rqID:10; /* rx queue/ring ID */
223 u32 sop:1; /* Start of Packet */
224 u32 eop:1; /* End of Packet */
225 u32 ext1:2;
226 u32 rxdIdx:12; /* Index of the RxDesc */
227#else
169 u32 rxdIdx:12; /* Index of the RxDesc */ 228 u32 rxdIdx:12; /* Index of the RxDesc */
170 u32 ext1:2; 229 u32 ext1:2;
171 u32 eop:1; /* End of Packet */ 230 u32 eop:1; /* End of Packet */
@@ -174,14 +233,36 @@ struct Vmxnet3_RxCompDesc {
174 u32 rssType:4; /* RSS hash type used */ 233 u32 rssType:4; /* RSS hash type used */
175 u32 cnc:1; /* Checksum Not Calculated */ 234 u32 cnc:1; /* Checksum Not Calculated */
176 u32 ext2:1; 235 u32 ext2:1;
236#endif /* __BIG_ENDIAN_BITFIELD */
177 237
178 u32 rssHash; /* RSS hash value */ 238 __le32 rssHash; /* RSS hash value */
179 239
240#ifdef __BIG_ENDIAN_BITFIELD
241 u32 tci:16; /* Tag stripped */
242 u32 ts:1; /* Tag is stripped */
243 u32 err:1; /* Error */
244 u32 len:14; /* data length */
245#else
180 u32 len:14; /* data length */ 246 u32 len:14; /* data length */
181 u32 err:1; /* Error */ 247 u32 err:1; /* Error */
182 u32 ts:1; /* Tag is stripped */ 248 u32 ts:1; /* Tag is stripped */
183 u32 tci:16; /* Tag stripped */ 249 u32 tci:16; /* Tag stripped */
250#endif /* __BIG_ENDIAN_BITFIELD */
251
184 252
253#ifdef __BIG_ENDIAN_BITFIELD
254 u32 gen:1; /* generation bit */
255 u32 type:7; /* completion type */
256 u32 fcs:1; /* Frame CRC correct */
257 u32 frg:1; /* IP Fragment */
258 u32 v4:1; /* IPv4 */
259 u32 v6:1; /* IPv6 */
260 u32 ipc:1; /* IP Checksum Correct */
261 u32 tcp:1; /* TCP packet */
262 u32 udp:1; /* UDP packet */
263 u32 tuc:1; /* TCP/UDP Checksum Correct */
264 u32 csum:16;
265#else
185 u32 csum:16; 266 u32 csum:16;
186 u32 tuc:1; /* TCP/UDP Checksum Correct */ 267 u32 tuc:1; /* TCP/UDP Checksum Correct */
187 u32 udp:1; /* UDP packet */ 268 u32 udp:1; /* UDP packet */
@@ -193,6 +274,7 @@ struct Vmxnet3_RxCompDesc {
193 u32 fcs:1; /* Frame CRC correct */ 274 u32 fcs:1; /* Frame CRC correct */
194 u32 type:7; /* completion type */ 275 u32 type:7; /* completion type */
195 u32 gen:1; /* generation bit */ 276 u32 gen:1; /* generation bit */
277#endif /* __BIG_ENDIAN_BITFIELD */
196}; 278};
197 279
198/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ 280/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */
@@ -206,6 +288,8 @@ struct Vmxnet3_RxCompDesc {
206/* csum OK for TCP/UDP pkts over IP */ 288/* csum OK for TCP/UDP pkts over IP */
207#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ 289#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \
208 1 << VMXNET3_RCD_IPC_SHIFT) 290 1 << VMXNET3_RCD_IPC_SHIFT)
291#define VMXNET3_TXD_GEN_SIZE 1
292#define VMXNET3_TXD_EOP_SIZE 1
209 293
210/* value of RxCompDesc.rssType */ 294/* value of RxCompDesc.rssType */
211enum { 295enum {
@@ -219,9 +303,9 @@ enum {
219 303
220/* a union for accessing all cmd/completion descriptors */ 304/* a union for accessing all cmd/completion descriptors */
221union Vmxnet3_GenericDesc { 305union Vmxnet3_GenericDesc {
222 u64 qword[2]; 306 __le64 qword[2];
223 u32 dword[4]; 307 __le32 dword[4];
224 u16 word[8]; 308 __le16 word[8];
225 struct Vmxnet3_TxDesc txd; 309 struct Vmxnet3_TxDesc txd;
226 struct Vmxnet3_RxDesc rxd; 310 struct Vmxnet3_RxDesc rxd;
227 struct Vmxnet3_TxCompDesc tcd; 311 struct Vmxnet3_TxCompDesc tcd;
@@ -287,18 +371,24 @@ enum {
287 371
288 372
289struct Vmxnet3_GOSInfo { 373struct Vmxnet3_GOSInfo {
290 u32 gosBits:2; /* 32-bit or 64-bit? */ 374#ifdef __BIG_ENDIAN_BITFIELD
291 u32 gosType:4; /* which guest */ 375 u32 gosMisc:10; /* other info about gos */
292 u32 gosVer:16; /* gos version */ 376 u32 gosVer:16; /* gos version */
293 u32 gosMisc:10; /* other info about gos */ 377 u32 gosType:4; /* which guest */
378 u32 gosBits:2; /* 32-bit or 64-bit? */
379#else
380 u32 gosBits:2; /* 32-bit or 64-bit? */
381 u32 gosType:4; /* which guest */
382 u32 gosVer:16; /* gos version */
383 u32 gosMisc:10; /* other info about gos */
384#endif /* __BIG_ENDIAN_BITFIELD */
294}; 385};
295 386
296
297struct Vmxnet3_DriverInfo { 387struct Vmxnet3_DriverInfo {
298 u32 version; 388 __le32 version;
299 struct Vmxnet3_GOSInfo gos; 389 struct Vmxnet3_GOSInfo gos;
300 u32 vmxnet3RevSpt; 390 __le32 vmxnet3RevSpt;
301 u32 uptVerSpt; 391 __le32 uptVerSpt;
302}; 392};
303 393
304 394
@@ -315,42 +405,42 @@ struct Vmxnet3_DriverInfo {
315 405
316struct Vmxnet3_MiscConf { 406struct Vmxnet3_MiscConf {
317 struct Vmxnet3_DriverInfo driverInfo; 407 struct Vmxnet3_DriverInfo driverInfo;
318 u64 uptFeatures; 408 __le64 uptFeatures;
319 u64 ddPA; /* driver data PA */ 409 __le64 ddPA; /* driver data PA */
320 u64 queueDescPA; /* queue descriptor table PA */ 410 __le64 queueDescPA; /* queue descriptor table PA */
321 u32 ddLen; /* driver data len */ 411 __le32 ddLen; /* driver data len */
322 u32 queueDescLen; /* queue desc. table len in bytes */ 412 __le32 queueDescLen; /* queue desc. table len in bytes */
323 u32 mtu; 413 __le32 mtu;
324 u16 maxNumRxSG; 414 __le16 maxNumRxSG;
325 u8 numTxQueues; 415 u8 numTxQueues;
326 u8 numRxQueues; 416 u8 numRxQueues;
327 u32 reserved[4]; 417 __le32 reserved[4];
328}; 418};
329 419
330 420
331struct Vmxnet3_TxQueueConf { 421struct Vmxnet3_TxQueueConf {
332 u64 txRingBasePA; 422 __le64 txRingBasePA;
333 u64 dataRingBasePA; 423 __le64 dataRingBasePA;
334 u64 compRingBasePA; 424 __le64 compRingBasePA;
335 u64 ddPA; /* driver data */ 425 __le64 ddPA; /* driver data */
336 u64 reserved; 426 __le64 reserved;
337 u32 txRingSize; /* # of tx desc */ 427 __le32 txRingSize; /* # of tx desc */
338 u32 dataRingSize; /* # of data desc */ 428 __le32 dataRingSize; /* # of data desc */
339 u32 compRingSize; /* # of comp desc */ 429 __le32 compRingSize; /* # of comp desc */
340 u32 ddLen; /* size of driver data */ 430 __le32 ddLen; /* size of driver data */
341 u8 intrIdx; 431 u8 intrIdx;
342 u8 _pad[7]; 432 u8 _pad[7];
343}; 433};
344 434
345 435
346struct Vmxnet3_RxQueueConf { 436struct Vmxnet3_RxQueueConf {
347 u64 rxRingBasePA[2]; 437 __le64 rxRingBasePA[2];
348 u64 compRingBasePA; 438 __le64 compRingBasePA;
349 u64 ddPA; /* driver data */ 439 __le64 ddPA; /* driver data */
350 u64 reserved; 440 __le64 reserved;
351 u32 rxRingSize[2]; /* # of rx desc */ 441 __le32 rxRingSize[2]; /* # of rx desc */
352 u32 compRingSize; /* # of rx comp desc */ 442 __le32 compRingSize; /* # of rx comp desc */
353 u32 ddLen; /* size of driver data */ 443 __le32 ddLen; /* size of driver data */
354 u8 intrIdx; 444 u8 intrIdx;
355 u8 _pad[7]; 445 u8 _pad[7];
356}; 446};
@@ -381,7 +471,7 @@ struct Vmxnet3_IntrConf {
381 u8 eventIntrIdx; 471 u8 eventIntrIdx;
382 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for 472 u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for
383 * each intr */ 473 * each intr */
384 u32 reserved[3]; 474 __le32 reserved[3];
385}; 475};
386 476
387/* one bit per VLAN ID, the size is in the units of u32 */ 477/* one bit per VLAN ID, the size is in the units of u32 */
@@ -391,21 +481,21 @@ struct Vmxnet3_IntrConf {
391struct Vmxnet3_QueueStatus { 481struct Vmxnet3_QueueStatus {
392 bool stopped; 482 bool stopped;
393 u8 _pad[3]; 483 u8 _pad[3];
394 u32 error; 484 __le32 error;
395}; 485};
396 486
397 487
398struct Vmxnet3_TxQueueCtrl { 488struct Vmxnet3_TxQueueCtrl {
399 u32 txNumDeferred; 489 __le32 txNumDeferred;
400 u32 txThreshold; 490 __le32 txThreshold;
401 u64 reserved; 491 __le64 reserved;
402}; 492};
403 493
404 494
405struct Vmxnet3_RxQueueCtrl { 495struct Vmxnet3_RxQueueCtrl {
406 bool updateRxProd; 496 bool updateRxProd;
407 u8 _pad[7]; 497 u8 _pad[7];
408 u64 reserved; 498 __le64 reserved;
409}; 499};
410 500
411enum { 501enum {
@@ -417,11 +507,11 @@ enum {
417}; 507};
418 508
419struct Vmxnet3_RxFilterConf { 509struct Vmxnet3_RxFilterConf {
420 u32 rxMode; /* VMXNET3_RXM_xxx */ 510 __le32 rxMode; /* VMXNET3_RXM_xxx */
421 u16 mfTableLen; /* size of the multicast filter table */ 511 __le16 mfTableLen; /* size of the multicast filter table */
422 u16 _pad1; 512 __le16 _pad1;
423 u64 mfTablePA; /* PA of the multicast filters table */ 513 __le64 mfTablePA; /* PA of the multicast filters table */
424 u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ 514 __le32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */
425}; 515};
426 516
427 517
@@ -444,7 +534,7 @@ struct Vmxnet3_PM_PktFilter {
444 534
445 535
446struct Vmxnet3_PMConf { 536struct Vmxnet3_PMConf {
447 u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ 537 __le16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */
448 u8 numFilters; 538 u8 numFilters;
449 u8 pad[5]; 539 u8 pad[5];
450 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; 540 struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS];
@@ -452,9 +542,9 @@ struct Vmxnet3_PMConf {
452 542
453 543
454struct Vmxnet3_VariableLenConfDesc { 544struct Vmxnet3_VariableLenConfDesc {
455 u32 confVer; 545 __le32 confVer;
456 u32 confLen; 546 __le32 confLen;
457 u64 confPA; 547 __le64 confPA;
458}; 548};
459 549
460 550
@@ -491,12 +581,12 @@ struct Vmxnet3_DSDevRead {
491 581
492/* All structures in DriverShared are padded to multiples of 8 bytes */ 582/* All structures in DriverShared are padded to multiples of 8 bytes */
493struct Vmxnet3_DriverShared { 583struct Vmxnet3_DriverShared {
494 u32 magic; 584 __le32 magic;
495 /* make devRead start at 64bit boundaries */ 585 /* make devRead start at 64bit boundaries */
496 u32 pad; 586 __le32 pad;
497 struct Vmxnet3_DSDevRead devRead; 587 struct Vmxnet3_DSDevRead devRead;
498 u32 ecr; 588 __le32 ecr;
499 u32 reserved[5]; 589 __le32 reserved[5];
500}; 590};
501 591
502 592
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 004353a46af0..a4c97e786ee5 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -24,12 +24,13 @@
24 * 24 *
25 */ 25 */
26 26
27#include <net/ip6_checksum.h>
28
27#include "vmxnet3_int.h" 29#include "vmxnet3_int.h"
28 30
29char vmxnet3_driver_name[] = "vmxnet3"; 31char vmxnet3_driver_name[] = "vmxnet3";
30#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" 32#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
31 33
32
33/* 34/*
34 * PCI Device ID Table 35 * PCI Device ID Table
35 * Last entry must be all 0s 36 * Last entry must be all 0s
@@ -151,11 +152,10 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter)
151 } 152 }
152} 153}
153 154
154
155static void 155static void
156vmxnet3_process_events(struct vmxnet3_adapter *adapter) 156vmxnet3_process_events(struct vmxnet3_adapter *adapter)
157{ 157{
158 u32 events = adapter->shared->ecr; 158 u32 events = le32_to_cpu(adapter->shared->ecr);
159 if (!events) 159 if (!events)
160 return; 160 return;
161 161
@@ -173,7 +173,7 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
173 if (adapter->tqd_start->status.stopped) { 173 if (adapter->tqd_start->status.stopped) {
174 printk(KERN_ERR "%s: tq error 0x%x\n", 174 printk(KERN_ERR "%s: tq error 0x%x\n",
175 adapter->netdev->name, 175 adapter->netdev->name,
176 adapter->tqd_start->status.error); 176 le32_to_cpu(adapter->tqd_start->status.error));
177 } 177 }
178 if (adapter->rqd_start->status.stopped) { 178 if (adapter->rqd_start->status.stopped) {
179 printk(KERN_ERR "%s: rq error 0x%x\n", 179 printk(KERN_ERR "%s: rq error 0x%x\n",
@@ -185,6 +185,106 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
185 } 185 }
186} 186}
187 187
188#ifdef __BIG_ENDIAN_BITFIELD
189/*
190 * The device expects the bitfields in shared structures to be written in
191 * little endian. When CPU is big endian, the following routines are used to
192 * correctly read and write into ABI.
193 * The general technique used here is : double word bitfields are defined in
194 * opposite order for big endian architecture. Then before reading them in
195 * driver the complete double word is translated using le32_to_cpu. Similarly
196 * After the driver writes into bitfields, cpu_to_le32 is used to translate the
197 * double words into required format.
198 * In order to avoid touching bits in shared structure more than once, temporary
199 * descriptors are used. These are passed as srcDesc to following functions.
200 */
201static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
202 struct Vmxnet3_RxDesc *dstDesc)
203{
204 u32 *src = (u32 *)srcDesc + 2;
205 u32 *dst = (u32 *)dstDesc + 2;
206 dstDesc->addr = le64_to_cpu(srcDesc->addr);
207 *dst = le32_to_cpu(*src);
208 dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
209}
210
211static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
212 struct Vmxnet3_TxDesc *dstDesc)
213{
214 int i;
215 u32 *src = (u32 *)(srcDesc + 1);
216 u32 *dst = (u32 *)(dstDesc + 1);
217
218 /* Working backwards so that the gen bit is set at the end. */
219 for (i = 2; i > 0; i--) {
220 src--;
221 dst--;
222 *dst = cpu_to_le32(*src);
223 }
224}
225
226
227static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
228 struct Vmxnet3_RxCompDesc *dstDesc)
229{
230 int i = 0;
231 u32 *src = (u32 *)srcDesc;
232 u32 *dst = (u32 *)dstDesc;
233 for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
234 *dst = le32_to_cpu(*src);
235 src++;
236 dst++;
237 }
238}
239
240
241/* Used to read bitfield values from double words. */
242static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
243{
244 u32 temp = le32_to_cpu(*bitfield);
245 u32 mask = ((1 << size) - 1) << pos;
246 temp &= mask;
247 temp >>= pos;
248 return temp;
249}
250
251
252
253#endif /* __BIG_ENDIAN_BITFIELD */
254
255#ifdef __BIG_ENDIAN_BITFIELD
256
257# define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
258 txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
259 VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
260# define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
261 txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
262 VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
263# define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
264 VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
265 VMXNET3_TCD_GEN_SIZE)
266# define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
267 VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
268# define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
269 (dstrcd) = (tmp); \
270 vmxnet3_RxCompToCPU((rcd), (tmp)); \
271 } while (0)
272# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
273 (dstrxd) = (tmp); \
274 vmxnet3_RxDescToCPU((rxd), (tmp)); \
275 } while (0)
276
277#else
278
279# define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
280# define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
281# define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
282# define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
283# define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
284# define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
285
286#endif /* __BIG_ENDIAN_BITFIELD */
287
188 288
189static void 289static void
190vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, 290vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
@@ -212,7 +312,7 @@ vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
212 312
213 /* no out of order completion */ 313 /* no out of order completion */
214 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); 314 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
215 BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); 315 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
216 316
217 skb = tq->buf_info[eop_idx].skb; 317 skb = tq->buf_info[eop_idx].skb;
218 BUG_ON(skb == NULL); 318 BUG_ON(skb == NULL);
@@ -246,9 +346,10 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
246 union Vmxnet3_GenericDesc *gdesc; 346 union Vmxnet3_GenericDesc *gdesc;
247 347
248 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 348 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
249 while (gdesc->tcd.gen == tq->comp_ring.gen) { 349 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
250 completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, 350 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
251 adapter->pdev, adapter); 351 &gdesc->tcd), tq, adapter->pdev,
352 adapter);
252 353
253 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); 354 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
254 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; 355 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
@@ -472,9 +573,9 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
472 } 573 }
473 574
474 BUG_ON(rbi->dma_addr == 0); 575 BUG_ON(rbi->dma_addr == 0);
475 gd->rxd.addr = rbi->dma_addr; 576 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
476 gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | 577 gd->dword[2] = cpu_to_le32((ring->gen << VMXNET3_RXD_GEN_SHIFT)
477 rbi->len; 578 | val | rbi->len);
478 579
479 num_allocated++; 580 num_allocated++;
480 vmxnet3_cmd_ring_adv_next2fill(ring); 581 vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -531,10 +632,10 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
531 632
532 /* no need to map the buffer if headers are copied */ 633 /* no need to map the buffer if headers are copied */
533 if (ctx->copy_size) { 634 if (ctx->copy_size) {
534 ctx->sop_txd->txd.addr = tq->data_ring.basePA + 635 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
535 tq->tx_ring.next2fill * 636 tq->tx_ring.next2fill *
536 sizeof(struct Vmxnet3_TxDataDesc); 637 sizeof(struct Vmxnet3_TxDataDesc));
537 ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; 638 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
538 ctx->sop_txd->dword[3] = 0; 639 ctx->sop_txd->dword[3] = 0;
539 640
540 tbi = tq->buf_info + tq->tx_ring.next2fill; 641 tbi = tq->buf_info + tq->tx_ring.next2fill;
@@ -542,7 +643,8 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
542 643
543 dev_dbg(&adapter->netdev->dev, 644 dev_dbg(&adapter->netdev->dev,
544 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 645 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
545 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 646 tq->tx_ring.next2fill,
647 le64_to_cpu(ctx->sop_txd->txd.addr),
546 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 648 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
547 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 649 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
548 650
@@ -570,14 +672,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
570 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 672 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
571 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 673 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
572 674
573 gdesc->txd.addr = tbi->dma_addr; 675 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
574 gdesc->dword[2] = dw2 | buf_size; 676 gdesc->dword[2] = cpu_to_le32(dw2 | buf_size);
575 gdesc->dword[3] = 0; 677 gdesc->dword[3] = 0;
576 678
577 dev_dbg(&adapter->netdev->dev, 679 dev_dbg(&adapter->netdev->dev,
578 "txd[%u]: 0x%Lx 0x%x 0x%x\n", 680 "txd[%u]: 0x%Lx 0x%x 0x%x\n",
579 tq->tx_ring.next2fill, gdesc->txd.addr, 681 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
580 gdesc->dword[2], gdesc->dword[3]); 682 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
581 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 683 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
582 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 684 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
583 685
@@ -599,14 +701,14 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
599 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 701 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
600 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 702 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
601 703
602 gdesc->txd.addr = tbi->dma_addr; 704 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
603 gdesc->dword[2] = dw2 | frag->size; 705 gdesc->dword[2] = cpu_to_le32(dw2 | frag->size);
604 gdesc->dword[3] = 0; 706 gdesc->dword[3] = 0;
605 707
606 dev_dbg(&adapter->netdev->dev, 708 dev_dbg(&adapter->netdev->dev,
607 "txd[%u]: 0x%llu %u %u\n", 709 "txd[%u]: 0x%llu %u %u\n",
608 tq->tx_ring.next2fill, gdesc->txd.addr, 710 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
609 gdesc->dword[2], gdesc->dword[3]); 711 le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
610 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 712 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
611 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 713 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
612 } 714 }
@@ -751,6 +853,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
751 unsigned long flags; 853 unsigned long flags;
752 struct vmxnet3_tx_ctx ctx; 854 struct vmxnet3_tx_ctx ctx;
753 union Vmxnet3_GenericDesc *gdesc; 855 union Vmxnet3_GenericDesc *gdesc;
856#ifdef __BIG_ENDIAN_BITFIELD
857 /* Use temporary descriptor to avoid touching bits multiple times */
858 union Vmxnet3_GenericDesc tempTxDesc;
859#endif
754 860
755 /* conservatively estimate # of descriptors to use */ 861 /* conservatively estimate # of descriptors to use */
756 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 862 count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
@@ -827,16 +933,22 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
827 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); 933 vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
828 934
829 /* setup the EOP desc */ 935 /* setup the EOP desc */
830 ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; 936 ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
831 937
832 /* setup the SOP desc */ 938 /* setup the SOP desc */
939#ifdef __BIG_ENDIAN_BITFIELD
940 gdesc = &tempTxDesc;
941 gdesc->dword[2] = ctx.sop_txd->dword[2];
942 gdesc->dword[3] = ctx.sop_txd->dword[3];
943#else
833 gdesc = ctx.sop_txd; 944 gdesc = ctx.sop_txd;
945#endif
834 if (ctx.mss) { 946 if (ctx.mss) {
835 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 947 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
836 gdesc->txd.om = VMXNET3_OM_TSO; 948 gdesc->txd.om = VMXNET3_OM_TSO;
837 gdesc->txd.msscof = ctx.mss; 949 gdesc->txd.msscof = ctx.mss;
838 tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + 950 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
839 ctx.mss - 1) / ctx.mss; 951 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
840 } else { 952 } else {
841 if (skb->ip_summed == CHECKSUM_PARTIAL) { 953 if (skb->ip_summed == CHECKSUM_PARTIAL) {
842 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 954 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -847,7 +959,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
847 gdesc->txd.om = 0; 959 gdesc->txd.om = 0;
848 gdesc->txd.msscof = 0; 960 gdesc->txd.msscof = 0;
849 } 961 }
850 tq->shared->txNumDeferred++; 962 le32_add_cpu(&tq->shared->txNumDeferred, 1);
851 } 963 }
852 964
853 if (vlan_tx_tag_present(skb)) { 965 if (vlan_tx_tag_present(skb)) {
@@ -855,19 +967,27 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
855 gdesc->txd.tci = vlan_tx_tag_get(skb); 967 gdesc->txd.tci = vlan_tx_tag_get(skb);
856 } 968 }
857 969
858 wmb(); 970 /* finally flips the GEN bit of the SOP desc. */
859 971 gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
860 /* finally flips the GEN bit of the SOP desc */ 972 VMXNET3_TXD_GEN);
861 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 973#ifdef __BIG_ENDIAN_BITFIELD
974 /* Finished updating in bitfields of Tx Desc, so write them in original
975 * place.
976 */
977 vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
978 (struct Vmxnet3_TxDesc *)ctx.sop_txd);
979 gdesc = ctx.sop_txd;
980#endif
862 dev_dbg(&adapter->netdev->dev, 981 dev_dbg(&adapter->netdev->dev,
863 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 982 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
864 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 983 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
865 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 984 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
866 gdesc->dword[3]); 985 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
867 986
868 spin_unlock_irqrestore(&tq->tx_lock, flags); 987 spin_unlock_irqrestore(&tq->tx_lock, flags);
869 988
870 if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { 989 if (le32_to_cpu(tq->shared->txNumDeferred) >=
990 le32_to_cpu(tq->shared->txThreshold)) {
871 tq->shared->txNumDeferred = 0; 991 tq->shared->txNumDeferred = 0;
872 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 992 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
873 tq->tx_ring.next2fill); 993 tq->tx_ring.next2fill);
@@ -889,9 +1009,8 @@ static netdev_tx_t
889vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1009vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
890{ 1010{
891 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1011 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
892 struct vmxnet3_tx_queue *tq = &adapter->tx_queue;
893 1012
894 return vmxnet3_tq_xmit(skb, tq, adapter, netdev); 1013 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
895} 1014}
896 1015
897 1016
@@ -902,7 +1021,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
902{ 1021{
903 if (!gdesc->rcd.cnc && adapter->rxcsum) { 1022 if (!gdesc->rcd.cnc && adapter->rxcsum) {
904 /* typical case: TCP/UDP over IP and both csums are correct */ 1023 /* typical case: TCP/UDP over IP and both csums are correct */
905 if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == 1024 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
906 VMXNET3_RCD_CSUM_OK) { 1025 VMXNET3_RCD_CSUM_OK) {
907 skb->ip_summed = CHECKSUM_UNNECESSARY; 1026 skb->ip_summed = CHECKSUM_UNNECESSARY;
908 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); 1027 BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
@@ -957,8 +1076,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
957 u32 num_rxd = 0; 1076 u32 num_rxd = 0;
958 struct Vmxnet3_RxCompDesc *rcd; 1077 struct Vmxnet3_RxCompDesc *rcd;
959 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1078 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
960 1079#ifdef __BIG_ENDIAN_BITFIELD
961 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1080 struct Vmxnet3_RxDesc rxCmdDesc;
1081 struct Vmxnet3_RxCompDesc rxComp;
1082#endif
1083 vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1084 &rxComp);
962 while (rcd->gen == rq->comp_ring.gen) { 1085 while (rcd->gen == rq->comp_ring.gen) {
963 struct vmxnet3_rx_buf_info *rbi; 1086 struct vmxnet3_rx_buf_info *rbi;
964 struct sk_buff *skb; 1087 struct sk_buff *skb;
@@ -976,11 +1099,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
976 1099
977 idx = rcd->rxdIdx; 1100 idx = rcd->rxdIdx;
978 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1101 ring_idx = rcd->rqID == rq->qid ? 0 : 1;
979 1102 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
980 rxd = &rq->rx_ring[ring_idx].base[idx].rxd; 1103 &rxCmdDesc);
981 rbi = rq->buf_info[ring_idx] + idx; 1104 rbi = rq->buf_info[ring_idx] + idx;
982 1105
983 BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); 1106 BUG_ON(rxd->addr != rbi->dma_addr ||
1107 rxd->len != rbi->len);
984 1108
985 if (unlikely(rcd->eop && rcd->err)) { 1109 if (unlikely(rcd->eop && rcd->err)) {
986 vmxnet3_rx_error(rq, rcd, ctx, adapter); 1110 vmxnet3_rx_error(rq, rcd, ctx, adapter);
@@ -1078,7 +1202,8 @@ rcd_done:
1078 } 1202 }
1079 1203
1080 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); 1204 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1081 rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; 1205 vmxnet3_getRxComp(rcd,
1206 &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1082 } 1207 }
1083 1208
1084 return num_rxd; 1209 return num_rxd;
@@ -1094,7 +1219,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1094 1219
1095 for (ring_idx = 0; ring_idx < 2; ring_idx++) { 1220 for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1096 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { 1221 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1097 rxd = &rq->rx_ring[ring_idx].base[i].rxd; 1222#ifdef __BIG_ENDIAN_BITFIELD
1223 struct Vmxnet3_RxDesc rxDesc;
1224#endif
1225 vmxnet3_getRxDesc(rxd,
1226 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1098 1227
1099 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && 1228 if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1100 rq->buf_info[ring_idx][i].skb) { 1229 rq->buf_info[ring_idx][i].skb) {
@@ -1346,12 +1475,12 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1346 err = request_irq(adapter->intr.msix_entries[0].vector, 1475 err = request_irq(adapter->intr.msix_entries[0].vector,
1347 vmxnet3_intr, 0, adapter->netdev->name, 1476 vmxnet3_intr, 0, adapter->netdev->name,
1348 adapter->netdev); 1477 adapter->netdev);
1349 } else 1478 } else if (adapter->intr.type == VMXNET3_IT_MSI) {
1350#endif
1351 if (adapter->intr.type == VMXNET3_IT_MSI) {
1352 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1479 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1353 adapter->netdev->name, adapter->netdev); 1480 adapter->netdev->name, adapter->netdev);
1354 } else { 1481 } else
1482#endif
1483 {
1355 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1484 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1356 IRQF_SHARED, adapter->netdev->name, 1485 IRQF_SHARED, adapter->netdev->name,
1357 adapter->netdev); 1486 adapter->netdev);
@@ -1412,6 +1541,22 @@ vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1412} 1541}
1413 1542
1414 1543
1544inline void set_flag_le16(__le16 *data, u16 flag)
1545{
1546 *data = cpu_to_le16(le16_to_cpu(*data) | flag);
1547}
1548
1549inline void set_flag_le64(__le64 *data, u64 flag)
1550{
1551 *data = cpu_to_le64(le64_to_cpu(*data) | flag);
1552}
1553
1554inline void reset_flag_le64(__le64 *data, u64 flag)
1555{
1556 *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
1557}
1558
1559
1415static void 1560static void
1416vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 1561vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1417{ 1562{
@@ -1427,7 +1572,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1427 adapter->vlan_grp = grp; 1572 adapter->vlan_grp = grp;
1428 1573
1429 /* update FEATURES to device */ 1574 /* update FEATURES to device */
1430 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1575 set_flag_le64(&devRead->misc.uptFeatures,
1576 UPT1_F_RXVLAN);
1431 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1577 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1432 VMXNET3_CMD_UPDATE_FEATURE); 1578 VMXNET3_CMD_UPDATE_FEATURE);
1433 /* 1579 /*
@@ -1450,7 +1596,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1450 struct Vmxnet3_DSDevRead *devRead = &shared->devRead; 1596 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1451 adapter->vlan_grp = NULL; 1597 adapter->vlan_grp = NULL;
1452 1598
1453 if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { 1599 if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
1454 int i; 1600 int i;
1455 1601
1456 for (i = 0; i < VMXNET3_VFT_SIZE; i++) { 1602 for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1463,7 +1609,8 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1463 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1609 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1464 1610
1465 /* update FEATURES to device */ 1611 /* update FEATURES to device */
1466 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; 1612 reset_flag_le64(&devRead->misc.uptFeatures,
1613 UPT1_F_RXVLAN);
1467 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1614 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1468 VMXNET3_CMD_UPDATE_FEATURE); 1615 VMXNET3_CMD_UPDATE_FEATURE);
1469 } 1616 }
@@ -1565,9 +1712,10 @@ vmxnet3_set_mc(struct net_device *netdev)
1565 new_table = vmxnet3_copy_mc(netdev); 1712 new_table = vmxnet3_copy_mc(netdev);
1566 if (new_table) { 1713 if (new_table) {
1567 new_mode |= VMXNET3_RXM_MCAST; 1714 new_mode |= VMXNET3_RXM_MCAST;
1568 rxConf->mfTableLen = netdev->mc_count * 1715 rxConf->mfTableLen = cpu_to_le16(
1569 ETH_ALEN; 1716 netdev->mc_count * ETH_ALEN);
1570 rxConf->mfTablePA = virt_to_phys(new_table); 1717 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
1718 new_table));
1571 } else { 1719 } else {
1572 printk(KERN_INFO "%s: failed to copy mcast list" 1720 printk(KERN_INFO "%s: failed to copy mcast list"
1573 ", setting ALL_MULTI\n", netdev->name); 1721 ", setting ALL_MULTI\n", netdev->name);
@@ -1582,7 +1730,7 @@ vmxnet3_set_mc(struct net_device *netdev)
1582 } 1730 }
1583 1731
1584 if (new_mode != rxConf->rxMode) { 1732 if (new_mode != rxConf->rxMode) {
1585 rxConf->rxMode = new_mode; 1733 rxConf->rxMode = cpu_to_le32(new_mode);
1586 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1734 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1587 VMXNET3_CMD_UPDATE_RX_MODE); 1735 VMXNET3_CMD_UPDATE_RX_MODE);
1588 } 1736 }
@@ -1610,63 +1758,69 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1610 memset(shared, 0, sizeof(*shared)); 1758 memset(shared, 0, sizeof(*shared));
1611 1759
1612 /* driver settings */ 1760 /* driver settings */
1613 shared->magic = VMXNET3_REV1_MAGIC; 1761 shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
1614 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; 1762 devRead->misc.driverInfo.version = cpu_to_le32(
1763 VMXNET3_DRIVER_VERSION_NUM);
1615 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? 1764 devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
1616 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); 1765 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
1617 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; 1766 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
1618 devRead->misc.driverInfo.vmxnet3RevSpt = 1; 1767 *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
1619 devRead->misc.driverInfo.uptVerSpt = 1; 1768 *((u32 *)&devRead->misc.driverInfo.gos));
1769 devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
1770 devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
1620 1771
1621 devRead->misc.ddPA = virt_to_phys(adapter); 1772 devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
1622 devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); 1773 devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
1623 1774
1624 /* set up feature flags */ 1775 /* set up feature flags */
1625 if (adapter->rxcsum) 1776 if (adapter->rxcsum)
1626 devRead->misc.uptFeatures |= UPT1_F_RXCSUM; 1777 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
1627 1778
1628 if (adapter->lro) { 1779 if (adapter->lro) {
1629 devRead->misc.uptFeatures |= UPT1_F_LRO; 1780 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
1630 devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; 1781 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
1631 } 1782 }
1632 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) 1783 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX)
1633 && adapter->vlan_grp) { 1784 && adapter->vlan_grp) {
1634 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 1785 set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
1635 } 1786 }
1636 1787
1637 devRead->misc.mtu = adapter->netdev->mtu; 1788 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1638 devRead->misc.queueDescPA = adapter->queue_desc_pa; 1789 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1639 devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + 1790 devRead->misc.queueDescLen = cpu_to_le32(
1640 sizeof(struct Vmxnet3_RxQueueDesc); 1791 sizeof(struct Vmxnet3_TxQueueDesc) +
1792 sizeof(struct Vmxnet3_RxQueueDesc));
1641 1793
1642 /* tx queue settings */ 1794 /* tx queue settings */
1643 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 1795 BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
1644 1796
1645 devRead->misc.numTxQueues = 1; 1797 devRead->misc.numTxQueues = 1;
1646 tqc = &adapter->tqd_start->conf; 1798 tqc = &adapter->tqd_start->conf;
1647 tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; 1799 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
1648 tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; 1800 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
1649 tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; 1801 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
1650 tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); 1802 tqc->ddPA = cpu_to_le64(virt_to_phys(
1651 tqc->txRingSize = adapter->tx_queue.tx_ring.size; 1803 adapter->tx_queue.buf_info));
1652 tqc->dataRingSize = adapter->tx_queue.data_ring.size; 1804 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size);
1653 tqc->compRingSize = adapter->tx_queue.comp_ring.size; 1805 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size);
1654 tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * 1806 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size);
1655 tqc->txRingSize; 1807 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
1808 tqc->txRingSize);
1656 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 1809 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx;
1657 1810
1658 /* rx queue settings */ 1811 /* rx queue settings */
1659 devRead->misc.numRxQueues = 1; 1812 devRead->misc.numRxQueues = 1;
1660 rqc = &adapter->rqd_start->conf; 1813 rqc = &adapter->rqd_start->conf;
1661 rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; 1814 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
1662 rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; 1815 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
1663 rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; 1816 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
1664 rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); 1817 rqc->ddPA = cpu_to_le64(virt_to_phys(
1665 rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; 1818 adapter->rx_queue.buf_info));
1666 rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; 1819 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
1667 rqc->compRingSize = adapter->rx_queue.comp_ring.size; 1820 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
1668 rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * 1821 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size);
1669 (rqc->rxRingSize[0] + rqc->rxRingSize[1]); 1822 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
1823 (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
1670 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 1824 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx;
1671 1825
1672 /* intr settings */ 1826 /* intr settings */
@@ -1715,11 +1869,10 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1715 1869
1716 vmxnet3_setup_driver_shared(adapter); 1870 vmxnet3_setup_driver_shared(adapter);
1717 1871
1718 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
1719 VMXNET3_GET_ADDR_LO(adapter->shared_pa)); 1873 adapter->shared_pa));
1720 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 1874 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
1721 VMXNET3_GET_ADDR_HI(adapter->shared_pa)); 1875 adapter->shared_pa));
1722
1723 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1876 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1724 VMXNET3_CMD_ACTIVATE_DEV); 1877 VMXNET3_CMD_ACTIVATE_DEV);
1725 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 1878 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -2425,7 +2578,7 @@ vmxnet3_suspend(struct device *device)
2425 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); 2578 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
2426 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ 2579 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
2427 2580
2428 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2581 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2429 i++; 2582 i++;
2430 } 2583 }
2431 2584
@@ -2467,19 +2620,21 @@ vmxnet3_suspend(struct device *device)
2467 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ 2620 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
2468 in_dev_put(in_dev); 2621 in_dev_put(in_dev);
2469 2622
2470 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; 2623 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
2471 i++; 2624 i++;
2472 } 2625 }
2473 2626
2474skip_arp: 2627skip_arp:
2475 if (adapter->wol & WAKE_MAGIC) 2628 if (adapter->wol & WAKE_MAGIC)
2476 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; 2629 set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
2477 2630
2478 pmConf->numFilters = i; 2631 pmConf->numFilters = i;
2479 2632
2480 adapter->shared->devRead.pmConfDesc.confVer = 1; 2633 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2481 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2634 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2482 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2635 *pmConf));
2636 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
2637 pmConf));
2483 2638
2484 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2639 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2485 VMXNET3_CMD_UPDATE_PMCFG); 2640 VMXNET3_CMD_UPDATE_PMCFG);
@@ -2510,9 +2665,11 @@ vmxnet3_resume(struct device *device)
2510 pmConf = adapter->pm_conf; 2665 pmConf = adapter->pm_conf;
2511 memset(pmConf, 0, sizeof(*pmConf)); 2666 memset(pmConf, 0, sizeof(*pmConf));
2512 2667
2513 adapter->shared->devRead.pmConfDesc.confVer = 1; 2668 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
2514 adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); 2669 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
2515 adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); 2670 *pmConf));
2671 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
2672 pmConf));
2516 2673
2517 netif_device_attach(netdev); 2674 netif_device_attach(netdev);
2518 pci_set_power_state(pdev, PCI_D0); 2675 pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c2c15e4cafc7..3935c4493fb7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,11 +50,13 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
50 adapter->rxcsum = val; 50 adapter->rxcsum = val;
51 if (netif_running(netdev)) { 51 if (netif_running(netdev)) {
52 if (val) 52 if (val)
53 adapter->shared->devRead.misc.uptFeatures |= 53 set_flag_le64(
54 UPT1_F_RXCSUM; 54 &adapter->shared->devRead.misc.uptFeatures,
55 UPT1_F_RXCSUM);
55 else 56 else
56 adapter->shared->devRead.misc.uptFeatures &= 57 reset_flag_le64(
57 ~UPT1_F_RXCSUM; 58 &adapter->shared->devRead.misc.uptFeatures,
59 UPT1_F_RXCSUM);
58 60
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3c0d70d58111..34f392f46fb1 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -27,16 +27,11 @@
27#ifndef _VMXNET3_INT_H 27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H 28#define _VMXNET3_INT_H
29 29
30#include <linux/types.h>
31#include <linux/ethtool.h> 30#include <linux/ethtool.h>
32#include <linux/delay.h> 31#include <linux/delay.h>
33#include <linux/device.h>
34#include <linux/netdevice.h> 32#include <linux/netdevice.h>
35#include <linux/pci.h> 33#include <linux/pci.h>
36#include <linux/ethtool.h>
37#include <linux/compiler.h> 34#include <linux/compiler.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/slab.h> 35#include <linux/slab.h>
41#include <linux/spinlock.h> 36#include <linux/spinlock.h>
42#include <linux/ioport.h> 37#include <linux/ioport.h>
@@ -335,14 +330,14 @@ struct vmxnet3_adapter {
335}; 330};
336 331
337#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
338 writel((val), (adapter)->hw_addr0 + (reg)) 333 writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
339#define VMXNET3_READ_BAR0_REG(adapter, reg) \ 334#define VMXNET3_READ_BAR0_REG(adapter, reg) \
340 readl((adapter)->hw_addr0 + (reg)) 335 le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
341 336
342#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ 337#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
343 writel((val), (adapter)->hw_addr1 + (reg)) 338 writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
344#define VMXNET3_READ_BAR1_REG(adapter, reg) \ 339#define VMXNET3_READ_BAR1_REG(adapter, reg) \
345 readl((adapter)->hw_addr1 + (reg)) 340 le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
346 341
347#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) 342#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
348#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ 343#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -358,6 +353,10 @@ struct vmxnet3_adapter {
358#define VMXNET3_MAX_ETH_HDR_SIZE 22 353#define VMXNET3_MAX_ETH_HDR_SIZE 22
359#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) 354#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
360 355
356void set_flag_le16(__le16 *data, u16 flag);
357void set_flag_le64(__le64 *data, u64 flag);
358void reset_flag_le64(__le64 *data, u64 flag);
359
361int 360int
362vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); 361vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
363 362
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 9e94c4b0fb18..32a75fa935ed 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -356,10 +356,8 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
356 356
357 switch (host_type) { 357 switch (host_type) {
358 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: 358 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
359 if (func_id == 0) { 359 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
360 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 360 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
361 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
362 }
363 break; 361 break;
364 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: 362 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
365 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 363 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -382,6 +380,22 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
382 return access_rights; 380 return access_rights;
383} 381}
384/* 382/*
383 * __vxge_hw_device_is_privilaged
384 * This routine checks if the device function is privilaged or not
385 */
386
387enum vxge_hw_status
388__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
389{
390 if (__vxge_hw_device_access_rights_get(host_type,
391 func_id) &
392 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
393 return VXGE_HW_OK;
394 else
395 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
396}
397
398/*
385 * __vxge_hw_device_host_info_get 399 * __vxge_hw_device_host_info_get
386 * This routine returns the host type assignments 400 * This routine returns the host type assignments
387 */ 401 */
@@ -446,220 +460,6 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
446 return VXGE_HW_OK; 460 return VXGE_HW_OK;
447} 461}
448 462
449enum vxge_hw_status
450__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
451{
452 if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
453 hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION ||
454 hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) &&
455 (hldev->func_id == 0))
456 return VXGE_HW_OK;
457 else
458 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
459}
460
461/*
462 * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
463 * Rebalance the RX_WRR and KDFC_WRR calandars.
464 */
465static enum
466vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
467{
468 u64 val64;
469 u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
470 u32 i, j, how_often = 1;
471 enum vxge_hw_status status = VXGE_HW_OK;
472
473 status = __vxge_hw_device_is_privilaged(hldev);
474 if (status != VXGE_HW_OK)
475 goto exit;
476
477 /* Reset the priorities assigned to the WRR arbitration
478 phases for the receive traffic */
479 for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
480 writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
481
482 /* Reset the transmit FIFO servicing calendar for FIFOs */
483 for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
484 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
485 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
486 }
487
488 /* Assign WRR priority 0 for all FIFOs */
489 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
490 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
491 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
492
493 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
494 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
495 }
496
497 /* Reset to service non-offload doorbells */
498 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
499 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
500
501 /* Set priority 0 to all receive queues */
502 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
503 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
504 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
505
506 /* Initialize all the slots as unused */
507 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
508 wrr_states[i] = -1;
509
510 /* Prepare the Fifo service states */
511 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
512
513 if (!hldev->config.vp_config[i].min_bandwidth)
514 continue;
515
516 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
517 hldev->config.vp_config[i].min_bandwidth;
518 if (how_often) {
519
520 for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
521 if (wrr_states[j] == -1) {
522 wrr_states[j] = i;
523 /* Make sure each fifo is serviced
524 * atleast once */
525 if (i == j)
526 j += VXGE_HW_MAX_VIRTUAL_PATHS;
527 else
528 j += how_often;
529 } else
530 j++;
531 }
532 }
533 }
534
535 /* Fill the unused slots with 0 */
536 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
537 if (wrr_states[j] == -1)
538 wrr_states[j] = 0;
539 }
540
541 /* Assign WRR priority number for FIFOs */
542 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
543 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
544 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
545
546 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
547 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
548 }
549
550 /* Modify the servicing algorithm applied to the 3 types of doorbells.
551 i.e, none-offload, message and offload */
552 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
553 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
554 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
555 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
556 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
557 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
558 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
559 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
560 &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
561
562 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
563 &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
564
565 for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
566
567 val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
568 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
569 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
570 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
571 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
572 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
573 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
574 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
575
576 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
577 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
578 }
579
580 /* Set up the priorities assigned to receive queues */
581 writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
582 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
583 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
584 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
585 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
586 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
587 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
588 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
589 &hldev->mrpcim_reg->rx_queue_priority_0);
590
591 writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
592 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
593 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
594 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
595 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
596 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
597 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
598 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
599 &hldev->mrpcim_reg->rx_queue_priority_1);
600
601 writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
602 &hldev->mrpcim_reg->rx_queue_priority_2);
603
604 /* Initialize all the slots as unused */
605 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
606 wrr_states[i] = -1;
607
608 /* Prepare the Ring service states */
609 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
610
611 if (!hldev->config.vp_config[i].min_bandwidth)
612 continue;
613
614 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
615 hldev->config.vp_config[i].min_bandwidth;
616
617 if (how_often) {
618 for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
619 if (wrr_states[j] == -1) {
620 wrr_states[j] = i;
621 /* Make sure each ring is
622 * serviced atleast once */
623 if (i == j)
624 j += VXGE_HW_MAX_VIRTUAL_PATHS;
625 else
626 j += how_often;
627 } else
628 j++;
629 }
630 }
631 }
632
633 /* Fill the unused slots with 0 */
634 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
635 if (wrr_states[j] == -1)
636 wrr_states[j] = 0;
637 }
638
639 for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
640 val64 = VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
641 wrr_states[j++]);
642 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
643 wrr_states[j++]);
644 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
645 wrr_states[j++]);
646 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
647 wrr_states[j++]);
648 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
649 wrr_states[j++]);
650 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
651 wrr_states[j++]);
652 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
653 wrr_states[j++]);
654 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
655 wrr_states[j++]);
656
657 writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
658 }
659exit:
660 return status;
661}
662
663/* 463/*
664 * __vxge_hw_device_initialize 464 * __vxge_hw_device_initialize
665 * Initialize Titan-V hardware. 465 * Initialize Titan-V hardware.
@@ -668,14 +468,14 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
668{ 468{
669 enum vxge_hw_status status = VXGE_HW_OK; 469 enum vxge_hw_status status = VXGE_HW_OK;
670 470
671 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) { 471 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
472 hldev->func_id)) {
672 /* Validate the pci-e link width and speed */ 473 /* Validate the pci-e link width and speed */
673 status = __vxge_hw_verify_pci_e_info(hldev); 474 status = __vxge_hw_verify_pci_e_info(hldev);
674 if (status != VXGE_HW_OK) 475 if (status != VXGE_HW_OK)
675 goto exit; 476 goto exit;
676 } 477 }
677 478
678 vxge_hw_wrr_rebalance(hldev);
679exit: 479exit:
680 return status; 480 return status;
681} 481}
@@ -953,7 +753,8 @@ vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
953 u64 val64; 753 u64 val64;
954 enum vxge_hw_status status = VXGE_HW_OK; 754 enum vxge_hw_status status = VXGE_HW_OK;
955 755
956 status = __vxge_hw_device_is_privilaged(hldev); 756 status = __vxge_hw_device_is_privilaged(hldev->host_type,
757 hldev->func_id);
957 if (status != VXGE_HW_OK) 758 if (status != VXGE_HW_OK)
958 goto exit; 759 goto exit;
959 760
@@ -990,7 +791,8 @@ vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
990 791
991 val64 = (u64 *)aggr_stats; 792 val64 = (u64 *)aggr_stats;
992 793
993 status = __vxge_hw_device_is_privilaged(hldev); 794 status = __vxge_hw_device_is_privilaged(hldev->host_type,
795 hldev->func_id);
994 if (status != VXGE_HW_OK) 796 if (status != VXGE_HW_OK)
995 goto exit; 797 goto exit;
996 798
@@ -1023,7 +825,8 @@ vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1023 u32 offset = 0x0; 825 u32 offset = 0x0;
1024 val64 = (u64 *) port_stats; 826 val64 = (u64 *) port_stats;
1025 827
1026 status = __vxge_hw_device_is_privilaged(hldev); 828 status = __vxge_hw_device_is_privilaged(hldev->host_type,
829 hldev->func_id);
1027 if (status != VXGE_HW_OK) 830 if (status != VXGE_HW_OK)
1028 goto exit; 831 goto exit;
1029 832
@@ -1221,7 +1024,8 @@ enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1221 goto exit; 1024 goto exit;
1222 } 1025 }
1223 1026
1224 status = __vxge_hw_device_is_privilaged(hldev); 1027 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1028 hldev->func_id);
1225 if (status != VXGE_HW_OK) 1029 if (status != VXGE_HW_OK)
1226 goto exit; 1030 goto exit;
1227 1031
@@ -2353,6 +2157,28 @@ exit:
2353} 2157}
2354 2158
2355/* 2159/*
2160 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2161 */
2162enum vxge_hw_status
2163vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2164{
2165 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
2166 enum vxge_hw_status status = VXGE_HW_OK;
2167 int i = 0, j = 0;
2168
2169 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2170 if (!((vpath_mask) & vxge_mBIT(i)))
2171 continue;
2172 vpmgmt_reg = hldev->vpmgmt_reg[i];
2173 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2174 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2175 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2176 return VXGE_HW_FAIL;
2177 }
2178 }
2179 return status;
2180}
2181/*
2356 * vxge_hw_mgmt_reg_Write - Write Titan register. 2182 * vxge_hw_mgmt_reg_Write - Write Titan register.
2357 */ 2183 */
2358enum vxge_hw_status 2184enum vxge_hw_status
@@ -4056,6 +3882,30 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4056 return status; 3882 return status;
4057} 3883}
4058 3884
3885void
3886vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3887{
3888 struct __vxge_hw_virtualpath *vpath;
3889 struct vxge_hw_vpath_reg __iomem *vp_reg;
3890 struct vxge_hw_vp_config *config;
3891 u64 val64;
3892
3893 vpath = &hldev->virtual_paths[vp_id];
3894 vp_reg = vpath->vp_reg;
3895 config = vpath->vp_config;
3896
3897 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3898 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3899
3900 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3901 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3902 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3903 writeq(val64,
3904 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3905 }
3906 }
3907 return;
3908}
4059/* 3909/*
4060 * __vxge_hw_vpath_initialize 3910 * __vxge_hw_vpath_initialize
4061 * This routine is the final phase of init which initializes the 3911 * This routine is the final phase of init which initializes the
@@ -4098,8 +3948,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4098 if (status != VXGE_HW_OK) 3948 if (status != VXGE_HW_OK)
4099 goto exit; 3949 goto exit;
4100 3950
4101 writeq(0, &vp_reg->gendma_int);
4102
4103 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); 3951 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4104 3952
4105 /* Get MRRS value from device control */ 3953 /* Get MRRS value from device control */
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 3e94f0ce0900..e7877df092f3 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -2201,6 +2201,8 @@ __vxge_hw_vpath_func_id_get(
2201enum vxge_hw_status 2201enum vxge_hw_status
2202__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); 2202__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
2203 2203
2204enum vxge_hw_status
2205vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2204/** 2206/**
2205 * vxge_debug 2207 * vxge_debug
2206 * @level: level of debug verbosity. 2208 * @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 068d7a9d3e36..e21358e82c74 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -2435,7 +2435,6 @@ static int vxge_add_isr(struct vxgedev *vdev)
2435 int ret = 0; 2435 int ret = 0;
2436#ifdef CONFIG_PCI_MSI 2436#ifdef CONFIG_PCI_MSI
2437 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; 2437 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2438 u64 function_mode = vdev->config.device_hw_info.function_mode;
2439 int pci_fun = PCI_FUNC(vdev->pdev->devfn); 2438 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2440 2439
2441 if (vdev->config.intr_type == MSI_X) 2440 if (vdev->config.intr_type == MSI_X)
@@ -2444,20 +2443,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
2444 if (ret) { 2443 if (ret) {
2445 vxge_debug_init(VXGE_ERR, 2444 vxge_debug_init(VXGE_ERR,
2446 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); 2445 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2447 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2446 vxge_debug_init(VXGE_ERR,
2448 test_and_set_bit(__VXGE_STATE_CARD_UP, 2447 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2449 &driver_config->inta_dev_open)) 2448 vdev->config.intr_type = INTA;
2450 return VXGE_HW_FAIL;
2451 else {
2452 vxge_debug_init(VXGE_ERR,
2453 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2454 vdev->config.intr_type = INTA;
2455 vxge_hw_device_set_intr_type(vdev->devh,
2456 VXGE_HW_INTR_MODE_IRQLINE);
2457 vxge_close_vpaths(vdev, 1);
2458 vdev->no_of_vpath = 1;
2459 vdev->stats.vpaths_open = 1;
2460 }
2461 } 2449 }
2462 2450
2463 if (vdev->config.intr_type == MSI_X) { 2451 if (vdev->config.intr_type == MSI_X) {
@@ -2505,24 +2493,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2505 "%s: MSIX - %d Registration failed", 2493 "%s: MSIX - %d Registration failed",
2506 vdev->ndev->name, intr_cnt); 2494 vdev->ndev->name, intr_cnt);
2507 vxge_rem_msix_isr(vdev); 2495 vxge_rem_msix_isr(vdev);
2508 if ((function_mode == 2496 vdev->config.intr_type = INTA;
2509 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2497 vxge_debug_init(VXGE_ERR,
2510 test_and_set_bit(__VXGE_STATE_CARD_UP, 2498 "%s: Defaulting to INTA"
2511 &driver_config->inta_dev_open)) 2499 , vdev->ndev->name);
2512 return VXGE_HW_FAIL;
2513 else {
2514 vxge_hw_device_set_intr_type(
2515 vdev->devh,
2516 VXGE_HW_INTR_MODE_IRQLINE);
2517 vdev->config.intr_type = INTA;
2518 vxge_debug_init(VXGE_ERR,
2519 "%s: Defaulting to INTA"
2520 , vdev->ndev->name);
2521 vxge_close_vpaths(vdev, 1);
2522 vdev->no_of_vpath = 1;
2523 vdev->stats.vpaths_open = 1;
2524 goto INTA_MODE; 2500 goto INTA_MODE;
2525 }
2526 } 2501 }
2527 2502
2528 if (irq_req) { 2503 if (irq_req) {
@@ -2555,23 +2530,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2555 "%s: MSIX - %d Registration failed", 2530 "%s: MSIX - %d Registration failed",
2556 vdev->ndev->name, intr_cnt); 2531 vdev->ndev->name, intr_cnt);
2557 vxge_rem_msix_isr(vdev); 2532 vxge_rem_msix_isr(vdev);
2558 if ((function_mode == 2533 vdev->config.intr_type = INTA;
2559 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2534 vxge_debug_init(VXGE_ERR,
2560 test_and_set_bit(__VXGE_STATE_CARD_UP, 2535 "%s: Defaulting to INTA",
2561 &driver_config->inta_dev_open)) 2536 vdev->ndev->name);
2562 return VXGE_HW_FAIL;
2563 else {
2564 vxge_hw_device_set_intr_type(vdev->devh,
2565 VXGE_HW_INTR_MODE_IRQLINE);
2566 vdev->config.intr_type = INTA;
2567 vxge_debug_init(VXGE_ERR,
2568 "%s: Defaulting to INTA",
2569 vdev->ndev->name);
2570 vxge_close_vpaths(vdev, 1);
2571 vdev->no_of_vpath = 1;
2572 vdev->stats.vpaths_open = 1;
2573 goto INTA_MODE; 2537 goto INTA_MODE;
2574 }
2575 } 2538 }
2576 2539
2577 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2540 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
@@ -2584,6 +2547,10 @@ INTA_MODE:
2584 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); 2547 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2585 2548
2586 if (vdev->config.intr_type == INTA) { 2549 if (vdev->config.intr_type == INTA) {
2550 vxge_hw_device_set_intr_type(vdev->devh,
2551 VXGE_HW_INTR_MODE_IRQLINE);
2552 vxge_hw_vpath_tti_ci_set(vdev->devh,
2553 vdev->vpaths[0].device_id);
2587 ret = request_irq((int) vdev->pdev->irq, 2554 ret = request_irq((int) vdev->pdev->irq,
2588 vxge_isr_napi, 2555 vxge_isr_napi,
2589 IRQF_SHARED, vdev->desc[0], vdev); 2556 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2688,13 +2655,6 @@ vxge_open(struct net_device *dev)
2688 * initialized */ 2655 * initialized */
2689 netif_carrier_off(dev); 2656 netif_carrier_off(dev);
2690 2657
2691 /* Check for another device already opn with INTA */
2692 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2693 test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
2694 ret = -EPERM;
2695 goto out0;
2696 }
2697
2698 /* Open VPATHs */ 2658 /* Open VPATHs */
2699 status = vxge_open_vpaths(vdev); 2659 status = vxge_open_vpaths(vdev);
2700 if (status != VXGE_HW_OK) { 2660 if (status != VXGE_HW_OK) {
@@ -2983,7 +2943,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
2983 vxge_debug_entryexit(VXGE_TRACE, 2943 vxge_debug_entryexit(VXGE_TRACE,
2984 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); 2944 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2985 2945
2986 clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
2987 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 2946 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2988 2947
2989 return 0; 2948 return 0;
@@ -3653,11 +3612,12 @@ static int __devinit vxge_config_vpaths(
3653 device_config->vp_config[i].fifo.enable = 3612 device_config->vp_config[i].fifo.enable =
3654 VXGE_HW_FIFO_ENABLE; 3613 VXGE_HW_FIFO_ENABLE;
3655 device_config->vp_config[i].fifo.max_frags = 3614 device_config->vp_config[i].fifo.max_frags =
3656 MAX_SKB_FRAGS; 3615 MAX_SKB_FRAGS + 1;
3657 device_config->vp_config[i].fifo.memblock_size = 3616 device_config->vp_config[i].fifo.memblock_size =
3658 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; 3617 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3659 3618
3660 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd); 3619 txdl_size = device_config->vp_config[i].fifo.max_frags *
3620 sizeof(struct vxge_hw_fifo_txd);
3661 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; 3621 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3662 3622
3663 device_config->vp_config[i].fifo.fifo_blocks = 3623 device_config->vp_config[i].fifo.fifo_blocks =
@@ -4088,9 +4048,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4088 driver_config->config_dev_cnt = 0; 4048 driver_config->config_dev_cnt = 0;
4089 driver_config->total_dev_cnt = 0; 4049 driver_config->total_dev_cnt = 0;
4090 driver_config->g_no_cpus = 0; 4050 driver_config->g_no_cpus = 0;
4091 driver_config->vpath_per_dev = max_config_vpath;
4092 } 4051 }
4093 4052
4053 driver_config->vpath_per_dev = max_config_vpath;
4054
4094 driver_config->total_dev_cnt++; 4055 driver_config->total_dev_cnt++;
4095 if (++driver_config->config_dev_cnt > max_config_dev) { 4056 if (++driver_config->config_dev_cnt > max_config_dev) {
4096 ret = 0; 4057 ret = 0;
@@ -4243,6 +4204,15 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4243 goto _exit3; 4204 goto _exit3;
4244 } 4205 }
4245 4206
4207 /* if FCS stripping is not disabled in MAC fail driver load */
4208 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
4209 vxge_debug_init(VXGE_ERR,
4210 "%s: FCS stripping is not disabled in MAC"
4211 " failing driver load", VXGE_DRIVER_NAME);
4212 ret = -EINVAL;
4213 goto _exit4;
4214 }
4215
4246 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4216 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4247 4217
4248 /* set private device info */ 4218 /* set private device info */
@@ -4387,6 +4357,27 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4387 } 4357 }
4388 4358
4389 kfree(device_config); 4359 kfree(device_config);
4360
4361 /*
4362 * INTA is shared in multi-function mode. This is unlike the INTA
4363 * implementation in MR mode, where each VH has its own INTA message.
4364 * - INTA is masked (disabled) as long as at least one function sets
4365 * its TITAN_MASK_ALL_INT.ALARM bit.
4366 * - INTA is unmasked (enabled) when all enabled functions have cleared
4367 * their own TITAN_MASK_ALL_INT.ALARM bit.
4368 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4369 * Though this driver leaves the top level interrupts unmasked while
4370 * leaving the required module interrupt bits masked on exit, there
4371 * could be a rougue driver around that does not follow this procedure
4372 * resulting in a failure to generate interrupts. The following code is
4373 * present to prevent such a failure.
4374 */
4375
4376 if (ll_config.device_hw_info.function_mode ==
4377 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4378 if (vdev->config.intr_type == INTA)
4379 vxge_hw_device_unmask_all(hldev);
4380
4390 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 4381 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4391 vdev->ndev->name, __func__, __LINE__); 4382 vdev->ndev->name, __func__, __LINE__);
4392 4383
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 9c36b3a9a63d..7c83ba4be9d7 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -112,7 +112,6 @@ enum vxge_mac_addr_state {
112struct vxge_drv_config { 112struct vxge_drv_config {
113 int config_dev_cnt; 113 int config_dev_cnt;
114 int total_dev_cnt; 114 int total_dev_cnt;
115 unsigned long inta_dev_open;
116 int g_no_cpus; 115 int g_no_cpus;
117 unsigned int vpath_per_dev; 116 unsigned int vpath_per_dev;
118}; 117};
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 9a3b823e08d4..9a0cf8eaa328 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -4326,10 +4326,6 @@ struct vxge_hw_vpath_reg {
4326/*0x011e0*/ u64 umq_bwr_init_byte; 4326/*0x011e0*/ u64 umq_bwr_init_byte;
4327#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32) 4327#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4328/*0x011e8*/ u64 gendma_int; 4328/*0x011e8*/ u64 gendma_int;
4329#define VXGE_HW_GENDMA_INT_IMMED_ENABLE vxge_mBIT(6)
4330#define VXGE_HW_GENDMA_INT_EVENT_ENABLE vxge_mBIT(7)
4331#define VXGE_HW_GENDMA_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4332#define VXGE_HW_GENDMA_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4333/*0x011f0*/ u64 umqdmq_ir_init_notify; 4329/*0x011f0*/ u64 umqdmq_ir_init_notify;
4334#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3) 4330#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
4335/*0x011f8*/ u64 dmq_init_notify; 4331/*0x011f8*/ u64 dmq_init_notify;
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index fe3ae518c69c..61ce754fa9d0 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -295,6 +295,8 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
295 u64 val64; 295 u64 val64;
296 u32 val32; 296 u32 val32;
297 297
298 vxge_hw_device_mask_all(hldev);
299
298 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 300 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
299 301
300 if (!(hldev->vpaths_deployed & vxge_mBIT(i))) 302 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
@@ -1232,7 +1234,7 @@ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1232 vxge_hw_channel_dtr_post(&fifo->channel, txdlh); 1234 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1233 1235
1234 __vxge_hw_non_offload_db_post(fifo, 1236 __vxge_hw_non_offload_db_post(fifo,
1235 (u64)(size_t)txdl_priv->dma_addr, 1237 (u64)txdl_priv->dma_addr,
1236 txdl_priv->frags - 1, 1238 txdl_priv->frags - 1,
1237 fifo->no_snoop_bits); 1239 fifo->no_snoop_bits);
1238 1240
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 461742b4442b..861c853e3e84 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2389,6 +2389,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2389 2389
2390int 2390int
2391vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2391vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2392void
2393vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
2392 2394
2393/* ========================== PRIVATE API ================================= */ 2395/* ========================== PRIVATE API ================================= */
2394 2396
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 8fbce7552035..77c2a754b7b8 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
17 17
18#define VXGE_VERSION_MAJOR "2" 18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "5" 20#define VXGE_VERSION_FIX "6"
21#define VXGE_VERSION_BUILD "18053" 21#define VXGE_VERSION_BUILD "18937"
22#define VXGE_VERSION_FOR "k" 22#define VXGE_VERSION_FOR "k"
23#endif 23#endif
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index e2c33c06190b..cd8f04afed8f 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -297,8 +297,8 @@ static ssize_t cosa_write(struct file *file,
297static unsigned int cosa_poll(struct file *file, poll_table *poll); 297static unsigned int cosa_poll(struct file *file, poll_table *poll);
298static int cosa_open(struct inode *inode, struct file *file); 298static int cosa_open(struct inode *inode, struct file *file);
299static int cosa_release(struct inode *inode, struct file *file); 299static int cosa_release(struct inode *inode, struct file *file);
300static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 300static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
301 unsigned int cmd, unsigned long arg); 301 unsigned long arg);
302#ifdef COSA_FASYNC_WORKING 302#ifdef COSA_FASYNC_WORKING
303static int cosa_fasync(struct inode *inode, struct file *file, int on); 303static int cosa_fasync(struct inode *inode, struct file *file, int on);
304#endif 304#endif
@@ -309,7 +309,7 @@ static const struct file_operations cosa_fops = {
309 .read = cosa_read, 309 .read = cosa_read,
310 .write = cosa_write, 310 .write = cosa_write,
311 .poll = cosa_poll, 311 .poll = cosa_poll,
312 .ioctl = cosa_chardev_ioctl, 312 .unlocked_ioctl = cosa_chardev_ioctl,
313 .open = cosa_open, 313 .open = cosa_open,
314 .release = cosa_release, 314 .release = cosa_release,
315#ifdef COSA_FASYNC_WORKING 315#ifdef COSA_FASYNC_WORKING
@@ -1204,12 +1204,18 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1204 return hdlc_ioctl(dev, ifr, cmd); 1204 return hdlc_ioctl(dev, ifr, cmd);
1205} 1205}
1206 1206
1207static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 1207static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
1208 unsigned int cmd, unsigned long arg) 1208 unsigned long arg)
1209{ 1209{
1210 struct channel_data *channel = file->private_data; 1210 struct channel_data *channel = file->private_data;
1211 struct cosa_data *cosa = channel->cosa; 1211 struct cosa_data *cosa;
1212 return cosa_ioctl_common(cosa, channel, cmd, arg); 1212 long ret;
1213
1214 lock_kernel();
1215 cosa = channel->cosa;
1216 ret = cosa_ioctl_common(cosa, channel, cmd, arg);
1217 unlock_kernel();
1218 return ret;
1213} 1219}
1214 1220
1215 1221
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 07d00b4cf48a..3f759daf3ca4 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1128,7 +1128,7 @@ done:
1128 init_timer(&dpriv->timer); 1128 init_timer(&dpriv->timer);
1129 dpriv->timer.expires = jiffies + 10*HZ; 1129 dpriv->timer.expires = jiffies + 10*HZ;
1130 dpriv->timer.data = (unsigned long)dev; 1130 dpriv->timer.data = (unsigned long)dev;
1131 dpriv->timer.function = &dscc4_timer; 1131 dpriv->timer.function = dscc4_timer;
1132 add_timer(&dpriv->timer); 1132 add_timer(&dpriv->timer);
1133 netif_carrier_on(dev); 1133 netif_carrier_on(dev);
1134 1134
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index cc07236ea734..9937bbab938d 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -57,7 +57,7 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
57{ 57{
58 struct hdlc_device *hdlc = dev_to_hdlc(dev); 58 struct hdlc_device *hdlc = dev_to_hdlc(dev);
59 59
60 if (dev_net(dev) != &init_net) { 60 if (!net_eq(dev_net(dev), &init_net)) {
61 kfree_skb(skb); 61 kfree_skb(skb);
62 return 0; 62 return 0;
63 } 63 }
@@ -102,7 +102,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
102 unsigned long flags; 102 unsigned long flags;
103 int on; 103 int on;
104 104
105 if (dev_net(dev) != &init_net) 105 if (!net_eq(dev_net(dev), &init_net))
106 return NOTIFY_DONE; 106 return NOTIFY_DONE;
107 107
108 if (!(dev->priv_flags & IFF_WAN_HDLC)) 108 if (!(dev->priv_flags & IFF_WAN_HDLC))
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 15002c3d0d95..74164d29524c 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -218,7 +218,7 @@ static struct z8530_dev *sv11_init(int iobase, int irq)
218 /* We want a fast IRQ for this device. Actually we'd like an even faster 218 /* We want a fast IRQ for this device. Actually we'd like an even faster
219 IRQ ;) - This is one driver RtLinux is made for */ 219 IRQ ;) - This is one driver RtLinux is made for */
220 220
221 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, 221 if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
222 "Hostess SV11", sv) < 0) { 222 "Hostess SV11", sv) < 0) {
223 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); 223 printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
224 goto err_irq; 224 goto err_irq;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 7ea71b33d2e9..2ebe935d1058 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1028,7 +1028,7 @@ static int lmc_open(struct net_device *dev)
1028 lmc_softreset (sc); 1028 lmc_softreset (sc);
1029 1029
1030 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */ 1030 /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
1031 if (request_irq (dev->irq, &lmc_interrupt, IRQF_SHARED, dev->name, dev)){ 1031 if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
1032 printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq); 1032 printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
1033 lmc_trace(dev, "lmc_open irq failed out"); 1033 lmc_trace(dev, "lmc_open irq failed out");
1034 return -EAGAIN; 1034 return -EAGAIN;
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 58c66819f39b..5394b51bdb2f 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -376,7 +376,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
376 } 376 }
377 card->io = io; 377 card->io = io;
378 378
379 if (request_irq(irq, &sca_intr, 0, devname, card)) { 379 if (request_irq(irq, sca_intr, 0, devname, card)) {
380 printk(KERN_ERR "n2: could not allocate IRQ\n"); 380 printk(KERN_ERR "n2: could not allocate IRQ\n");
381 n2_destroy_card(card); 381 n2_destroy_card(card);
382 return(-EBUSY); 382 return(-EBUSY);
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 2b15a7e40d5b..31c41af2246d 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1457,7 +1457,7 @@ got_type:
1457 } 1457 }
1458 1458
1459 err = -EAGAIN; 1459 err = -EAGAIN;
1460 if (request_irq(dev->irq, &sdla_isr, 0, dev->name, dev)) 1460 if (request_irq(dev->irq, sdla_isr, 0, dev->name, dev))
1461 goto fail; 1461 goto fail;
1462 1462
1463 if (flp->type == SDLA_S507) { 1463 if (flp->type == SDLA_S507) {
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0c525e24b247..3b3ee05bc462 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -266,7 +266,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
266 /* We want a fast IRQ for this device. Actually we'd like an even faster 266 /* We want a fast IRQ for this device. Actually we'd like an even faster
267 IRQ ;) - This is one driver RtLinux is made for */ 267 IRQ ;) - This is one driver RtLinux is made for */
268 268
269 if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, 269 if (request_irq(irq, z8530_interrupt, IRQF_DISABLED,
270 "SeaLevel", dev) < 0) { 270 "SeaLevel", dev) < 0) {
271 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); 271 printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
272 goto err_request_irq; 272 goto err_request_irq;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 27945049c9e1..3c325d77939b 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -33,6 +33,7 @@
33#include <linux/lapb.h> 33#include <linux/lapb.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/compat.h>
36#include "x25_asy.h" 37#include "x25_asy.h"
37 38
38#include <net/x25device.h> 39#include <net/x25device.h>
@@ -705,6 +706,21 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
705 } 706 }
706} 707}
707 708
709#ifdef CONFIG_COMPAT
710static long x25_asy_compat_ioctl(struct tty_struct *tty, struct file *file,
711 unsigned int cmd, unsigned long arg)
712{
713 switch (cmd) {
714 case SIOCGIFNAME:
715 case SIOCSIFHWADDR:
716 return x25_asy_ioctl(tty, file, cmd,
717 (unsigned long)compat_ptr(arg));
718 }
719
720 return -ENOIOCTLCMD;
721}
722#endif
723
708static int x25_asy_open_dev(struct net_device *dev) 724static int x25_asy_open_dev(struct net_device *dev)
709{ 725{
710 struct x25_asy *sl = netdev_priv(dev); 726 struct x25_asy *sl = netdev_priv(dev);
@@ -754,6 +770,9 @@ static struct tty_ldisc_ops x25_ldisc = {
754 .open = x25_asy_open_tty, 770 .open = x25_asy_open_tty,
755 .close = x25_asy_close_tty, 771 .close = x25_asy_close_tty,
756 .ioctl = x25_asy_ioctl, 772 .ioctl = x25_asy_ioctl,
773#ifdef CONFIG_COMPAT
774 .compat_ioctl = x25_asy_compat_ioctl,
775#endif
757 .receive_buf = x25_asy_receive_buf, 776 .receive_buf = x25_asy_receive_buf,
758 .write_wakeup = x25_asy_write_wakeup, 777 .write_wakeup = x25_asy_write_wakeup,
759}; 778};
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index d623b3d99a4b..3f703384295e 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -31,6 +31,14 @@ config WIMAX_I2400M_SDIO
31 31
32 If unsure, it is safe to select M (module). 32 If unsure, it is safe to select M (module).
33 33
34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
36 depends on WIMAX_I2400M_SDIO
37 select IWMC3200TOP
38 help
39 Select if you have a device based on the Intel Multicom WiMAX
40 Connection 3200 over SDIO.
41
34config WIMAX_I2400M_DEBUG_LEVEL 42config WIMAX_I2400M_DEBUG_LEVEL
35 int "WiMAX i2400m debug level" 43 int "WiMAX i2400m debug level"
36 depends on WIMAX_I2400M 44 depends on WIMAX_I2400M
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 07308686dbcf..944945540391 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -54,7 +54,7 @@
54 * i2400m_set_init_config() 54 * i2400m_set_init_config()
55 * i2400m_cmd_get_state() 55 * i2400m_cmd_get_state()
56 * i2400m_dev_shutdown() Called by i2400m_dev_stop() 56 * i2400m_dev_shutdown() Called by i2400m_dev_stop()
57 * i2400m->bus_reset() 57 * i2400m_reset()
58 * 58 *
59 * i2400m_{cmd,get,set}_*() 59 * i2400m_{cmd,get,set}_*()
60 * i2400m_msg_to_dev() 60 * i2400m_msg_to_dev()
@@ -82,6 +82,13 @@
82#define D_SUBMODULE control 82#define D_SUBMODULE control
83#include "debug-levels.h" 83#include "debug-levels.h"
84 84
85int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
86module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
87MODULE_PARM_DESC(passive_mode,
88 "If true, the driver will not do any device setup "
89 "and leave it up to user space, who must be properly "
90 "setup.");
91
85 92
86/* 93/*
87 * Return if a TLV is of a give type and size 94 * Return if a TLV is of a give type and size
@@ -263,7 +270,7 @@ int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
263 270
264 if (status == 0) 271 if (status == 0)
265 return 0; 272 return 0;
266 if (status > ARRAY_SIZE(ms_to_errno)) { 273 if (status >= ARRAY_SIZE(ms_to_errno)) {
267 str = "unknown status code"; 274 str = "unknown status code";
268 result = -EBADR; 275 result = -EBADR;
269 } else { 276 } else {
@@ -336,7 +343,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
336 /* Huh? just in case, shut it down */ 343 /* Huh? just in case, shut it down */
337 dev_err(dev, "HW BUG? unknown state %u: shutting down\n", 344 dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
338 i2400m_state); 345 i2400m_state);
339 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 346 i2400m_reset(i2400m, I2400M_RT_WARM);
340 break; 347 break;
341 }; 348 };
342 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", 349 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
@@ -1335,6 +1342,8 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1335 unsigned argc = 0; 1342 unsigned argc = 0;
1336 1343
1337 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 1344 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1345 if (i2400m_passive_mode)
1346 goto out_passive;
1338 /* Disable idle mode? (enabled by default) */ 1347 /* Disable idle mode? (enabled by default) */
1339 if (i2400m_idle_mode_disabled) { 1348 if (i2400m_idle_mode_disabled) {
1340 if (i2400m_le_v1_3(i2400m)) { 1349 if (i2400m_le_v1_3(i2400m)) {
@@ -1377,6 +1386,7 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1377 result = i2400m_set_init_config(i2400m, args, argc); 1386 result = i2400m_set_init_config(i2400m, args, argc);
1378 if (result < 0) 1387 if (result < 0)
1379 goto error; 1388 goto error;
1389out_passive:
1380 /* 1390 /*
1381 * Update state: Here it just calls a get state; parsing the 1391 * Update state: Here it just calls a get state; parsing the
1382 * result (System State TLV and RF Status TLV [done in the rx 1392 * result (System State TLV and RF Status TLV [done in the rx
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 9b81af3f80a9..b1aec3e1892f 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -214,7 +214,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
214 case I2400M_RT_WARM: 214 case I2400M_RT_WARM:
215 case I2400M_RT_COLD: 215 case I2400M_RT_COLD:
216 case I2400M_RT_BUS: 216 case I2400M_RT_BUS:
217 result = i2400m->bus_reset(i2400m, rt); 217 result = i2400m_reset(i2400m, rt);
218 if (result >= 0) 218 if (result >= 0)
219 result = 0; 219 result = 0;
220 default: 220 default:
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 304f0443ca4b..96a615fe09de 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -41,8 +41,10 @@
41 * __i2400m_dev_start() 41 * __i2400m_dev_start()
42 * 42 *
43 * i2400m_setup() 43 * i2400m_setup()
44 * i2400m->bus_setup()
44 * i2400m_bootrom_init() 45 * i2400m_bootrom_init()
45 * register_netdev() 46 * register_netdev()
47 * wimax_dev_add()
46 * i2400m_dev_start() 48 * i2400m_dev_start()
47 * __i2400m_dev_start() 49 * __i2400m_dev_start()
48 * i2400m_dev_bootstrap() 50 * i2400m_dev_bootstrap()
@@ -50,15 +52,15 @@
50 * i2400m->bus_dev_start() 52 * i2400m->bus_dev_start()
51 * i2400m_firmware_check() 53 * i2400m_firmware_check()
52 * i2400m_check_mac_addr() 54 * i2400m_check_mac_addr()
53 * wimax_dev_add()
54 * 55 *
55 * i2400m_release() 56 * i2400m_release()
56 * wimax_dev_rm()
57 * i2400m_dev_stop() 57 * i2400m_dev_stop()
58 * __i2400m_dev_stop() 58 * __i2400m_dev_stop()
59 * i2400m_dev_shutdown() 59 * i2400m_dev_shutdown()
60 * i2400m->bus_dev_stop() 60 * i2400m->bus_dev_stop()
61 * i2400m_tx_release() 61 * i2400m_tx_release()
62 * i2400m->bus_release()
63 * wimax_dev_rm()
62 * unregister_netdev() 64 * unregister_netdev()
63 */ 65 */
64#include "i2400m.h" 66#include "i2400m.h"
@@ -66,6 +68,7 @@
66#include <linux/wimax/i2400m.h> 68#include <linux/wimax/i2400m.h>
67#include <linux/module.h> 69#include <linux/module.h>
68#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/suspend.h>
69 72
70#define D_SUBMODULE driver 73#define D_SUBMODULE driver
71#include "debug-levels.h" 74#include "debug-levels.h"
@@ -90,76 +93,39 @@ MODULE_PARM_DESC(power_save_disabled,
90 "False by default (so the device is told to do power " 93 "False by default (so the device is told to do power "
91 "saving)."); 94 "saving).");
92 95
93/** 96static char i2400m_debug_params[128];
94 * i2400m_queue_work - schedule work on a i2400m's queue 97module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
95 * 98 0644);
96 * @i2400m: device descriptor 99MODULE_PARM_DESC(debug,
97 * 100 "String of space-separated NAME:VALUE pairs, where NAMEs "
98 * @fn: function to run to execute work. It gets passed a 'struct 101 "are the different debug submodules and VALUE are the "
99 * work_struct' that is wrapped in a 'struct i2400m_work'. Once 102 "initial debug value to set.");
100 * done, you have to (1) i2400m_put(i2400m_work->i2400m) and then 103
101 * (2) kfree(i2400m_work). 104static char i2400m_barkers_params[128];
102 * 105module_param_string(barkers, i2400m_barkers_params,
103 * @gfp_flags: GFP flags for memory allocation. 106 sizeof(i2400m_barkers_params), 0644);
104 * 107MODULE_PARM_DESC(barkers,
105 * @pl: pointer to a payload buffer that you want to pass to the _work 108 "String of comma-separated 32-bit values; each is "
106 * function. Use this to pack (for example) a struct with extra 109 "recognized as the value the device sends as a reboot "
107 * arguments. 110 "signal; values are appended to a list--setting one value "
108 * 111 "as zero cleans the existing list and starts a new one.");
109 * @pl_size: size of the payload buffer. 112
110 * 113static
111 * We do this quite often, so this just saves typing; allocate a 114struct i2400m_work *__i2400m_work_setup(
112 * wrapper for a i2400m, get a ref to it, pack arguments and launch 115 struct i2400m *i2400m, void (*fn)(struct work_struct *),
113 * the work. 116 gfp_t gfp_flags, const void *pl, size_t pl_size)
114 *
115 * A usual workflow is:
116 *
117 * struct my_work_args {
118 * void *something;
119 * int whatever;
120 * };
121 * ...
122 *
123 * struct my_work_args my_args = {
124 * .something = FOO,
125 * .whaetever = BLAH
126 * };
127 * i2400m_queue_work(i2400m, 1, my_work_function, GFP_KERNEL,
128 * &args, sizeof(args))
129 *
130 * And now the work function can unpack the arguments and call the
131 * real function (or do the job itself):
132 *
133 * static
134 * void my_work_fn((struct work_struct *ws)
135 * {
136 * struct i2400m_work *iw =
137 * container_of(ws, struct i2400m_work, ws);
138 * struct my_work_args *my_args = (void *) iw->pl;
139 *
140 * my_work(iw->i2400m, my_args->something, my_args->whatevert);
141 * }
142 */
143int i2400m_queue_work(struct i2400m *i2400m,
144 void (*fn)(struct work_struct *), gfp_t gfp_flags,
145 const void *pl, size_t pl_size)
146{ 117{
147 int result;
148 struct i2400m_work *iw; 118 struct i2400m_work *iw;
149 119
150 BUG_ON(i2400m->work_queue == NULL);
151 result = -ENOMEM;
152 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags); 120 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
153 if (iw == NULL) 121 if (iw == NULL)
154 goto error_kzalloc; 122 return NULL;
155 iw->i2400m = i2400m_get(i2400m); 123 iw->i2400m = i2400m_get(i2400m);
124 iw->pl_size = pl_size;
156 memcpy(iw->pl, pl, pl_size); 125 memcpy(iw->pl, pl, pl_size);
157 INIT_WORK(&iw->ws, fn); 126 INIT_WORK(&iw->ws, fn);
158 result = queue_work(i2400m->work_queue, &iw->ws); 127 return iw;
159error_kzalloc:
160 return result;
161} 128}
162EXPORT_SYMBOL_GPL(i2400m_queue_work);
163 129
164 130
165/* 131/*
@@ -175,21 +141,19 @@ EXPORT_SYMBOL_GPL(i2400m_queue_work);
175 * it should not happen. 141 * it should not happen.
176 */ 142 */
177int i2400m_schedule_work(struct i2400m *i2400m, 143int i2400m_schedule_work(struct i2400m *i2400m,
178 void (*fn)(struct work_struct *), gfp_t gfp_flags) 144 void (*fn)(struct work_struct *), gfp_t gfp_flags,
145 const void *pl, size_t pl_size)
179{ 146{
180 int result; 147 int result;
181 struct i2400m_work *iw; 148 struct i2400m_work *iw;
182 149
183 result = -ENOMEM; 150 result = -ENOMEM;
184 iw = kzalloc(sizeof(*iw), gfp_flags); 151 iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
185 if (iw == NULL) 152 if (iw != NULL) {
186 goto error_kzalloc; 153 result = schedule_work(&iw->ws);
187 iw->i2400m = i2400m_get(i2400m); 154 if (WARN_ON(result == 0))
188 INIT_WORK(&iw->ws, fn); 155 result = -ENXIO;
189 result = schedule_work(&iw->ws); 156 }
190 if (result == 0)
191 result = -ENXIO;
192error_kzalloc:
193 return result; 157 return result;
194} 158}
195 159
@@ -291,7 +255,7 @@ int i2400m_op_reset(struct wimax_dev *wimax_dev)
291 mutex_lock(&i2400m->init_mutex); 255 mutex_lock(&i2400m->init_mutex);
292 i2400m->reset_ctx = &ctx; 256 i2400m->reset_ctx = &ctx;
293 mutex_unlock(&i2400m->init_mutex); 257 mutex_unlock(&i2400m->init_mutex);
294 result = i2400m->bus_reset(i2400m, I2400M_RT_WARM); 258 result = i2400m_reset(i2400m, I2400M_RT_WARM);
295 if (result < 0) 259 if (result < 0)
296 goto out; 260 goto out;
297 result = wait_for_completion_timeout(&ctx.completion, 4*HZ); 261 result = wait_for_completion_timeout(&ctx.completion, 4*HZ);
@@ -420,9 +384,15 @@ retry:
420 dev_err(dev, "cannot create workqueue\n"); 384 dev_err(dev, "cannot create workqueue\n");
421 goto error_create_workqueue; 385 goto error_create_workqueue;
422 } 386 }
423 result = i2400m->bus_dev_start(i2400m); 387 if (i2400m->bus_dev_start) {
424 if (result < 0) 388 result = i2400m->bus_dev_start(i2400m);
425 goto error_bus_dev_start; 389 if (result < 0)
390 goto error_bus_dev_start;
391 }
392 i2400m->ready = 1;
393 wmb(); /* see i2400m->ready's documentation */
394 /* process pending reports from the device */
395 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
426 result = i2400m_firmware_check(i2400m); /* fw versions ok? */ 396 result = i2400m_firmware_check(i2400m); /* fw versions ok? */
427 if (result < 0) 397 if (result < 0)
428 goto error_fw_check; 398 goto error_fw_check;
@@ -430,8 +400,6 @@ retry:
430 result = i2400m_check_mac_addr(i2400m); 400 result = i2400m_check_mac_addr(i2400m);
431 if (result < 0) 401 if (result < 0)
432 goto error_check_mac_addr; 402 goto error_check_mac_addr;
433 i2400m->ready = 1;
434 wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
435 result = i2400m_dev_initialize(i2400m); 403 result = i2400m_dev_initialize(i2400m);
436 if (result < 0) 404 if (result < 0)
437 goto error_dev_initialize; 405 goto error_dev_initialize;
@@ -443,8 +411,12 @@ retry:
443 411
444error_dev_initialize: 412error_dev_initialize:
445error_check_mac_addr: 413error_check_mac_addr:
414 i2400m->ready = 0;
415 wmb(); /* see i2400m->ready's documentation */
416 flush_workqueue(i2400m->work_queue);
446error_fw_check: 417error_fw_check:
447 i2400m->bus_dev_stop(i2400m); 418 if (i2400m->bus_dev_stop)
419 i2400m->bus_dev_stop(i2400m);
448error_bus_dev_start: 420error_bus_dev_start:
449 destroy_workqueue(i2400m->work_queue); 421 destroy_workqueue(i2400m->work_queue);
450error_create_workqueue: 422error_create_workqueue:
@@ -466,11 +438,15 @@ error_bootstrap:
466static 438static
467int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags) 439int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
468{ 440{
469 int result; 441 int result = 0;
470 mutex_lock(&i2400m->init_mutex); /* Well, start the device */ 442 mutex_lock(&i2400m->init_mutex); /* Well, start the device */
471 result = __i2400m_dev_start(i2400m, bm_flags); 443 if (i2400m->updown == 0) {
472 if (result >= 0) 444 result = __i2400m_dev_start(i2400m, bm_flags);
473 i2400m->updown = 1; 445 if (result >= 0) {
446 i2400m->updown = 1;
447 wmb(); /* see i2400m->updown's documentation */
448 }
449 }
474 mutex_unlock(&i2400m->init_mutex); 450 mutex_unlock(&i2400m->init_mutex);
475 return result; 451 return result;
476} 452}
@@ -495,9 +471,20 @@ void __i2400m_dev_stop(struct i2400m *i2400m)
495 471
496 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 472 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
497 wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING); 473 wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
474 i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
475 complete(&i2400m->msg_completion);
476 i2400m_net_wake_stop(i2400m);
498 i2400m_dev_shutdown(i2400m); 477 i2400m_dev_shutdown(i2400m);
499 i2400m->ready = 0; 478 /*
500 i2400m->bus_dev_stop(i2400m); 479 * Make sure no report hooks are running *before* we stop the
480 * communication infrastructure with the device.
481 */
482 i2400m->ready = 0; /* nobody can queue work anymore */
483 wmb(); /* see i2400m->ready's documentation */
484 flush_workqueue(i2400m->work_queue);
485
486 if (i2400m->bus_dev_stop)
487 i2400m->bus_dev_stop(i2400m);
501 destroy_workqueue(i2400m->work_queue); 488 destroy_workqueue(i2400m->work_queue);
502 i2400m_rx_release(i2400m); 489 i2400m_rx_release(i2400m);
503 i2400m_tx_release(i2400m); 490 i2400m_tx_release(i2400m);
@@ -518,12 +505,139 @@ void i2400m_dev_stop(struct i2400m *i2400m)
518 if (i2400m->updown) { 505 if (i2400m->updown) {
519 __i2400m_dev_stop(i2400m); 506 __i2400m_dev_stop(i2400m);
520 i2400m->updown = 0; 507 i2400m->updown = 0;
508 wmb(); /* see i2400m->updown's documentation */
521 } 509 }
522 mutex_unlock(&i2400m->init_mutex); 510 mutex_unlock(&i2400m->init_mutex);
523} 511}
524 512
525 513
526/* 514/*
515 * Listen to PM events to cache the firmware before suspend/hibernation
516 *
517 * When the device comes out of suspend, it might go into reset and
518 * firmware has to be uploaded again. At resume, most of the times, we
519 * can't load firmware images from disk, so we need to cache it.
520 *
521 * i2400m_fw_cache() will allocate a kobject and attach the firmware
522 * to it; that way we don't have to worry too much about the fw loader
523 * hitting a race condition.
524 *
525 * Note: modus operandi stolen from the Orinoco driver; thx.
526 */
527static
528int i2400m_pm_notifier(struct notifier_block *notifier,
529 unsigned long pm_event,
530 void *unused)
531{
532 struct i2400m *i2400m =
533 container_of(notifier, struct i2400m, pm_notifier);
534 struct device *dev = i2400m_dev(i2400m);
535
536 d_fnstart(3, dev, "(i2400m %p pm_event %lx)\n", i2400m, pm_event);
537 switch (pm_event) {
538 case PM_HIBERNATION_PREPARE:
539 case PM_SUSPEND_PREPARE:
540 i2400m_fw_cache(i2400m);
541 break;
542 case PM_POST_RESTORE:
543 /* Restore from hibernation failed. We need to clean
544 * up in exactly the same way, so fall through. */
545 case PM_POST_HIBERNATION:
546 case PM_POST_SUSPEND:
547 i2400m_fw_uncache(i2400m);
548 break;
549
550 case PM_RESTORE_PREPARE:
551 default:
552 break;
553 }
554 d_fnend(3, dev, "(i2400m %p pm_event %lx) = void\n", i2400m, pm_event);
555 return NOTIFY_DONE;
556}
557
558
559/*
560 * pre-reset is called before a device is going on reset
561 *
562 * This has to be followed by a call to i2400m_post_reset(), otherwise
563 * bad things might happen.
564 */
565int i2400m_pre_reset(struct i2400m *i2400m)
566{
567 int result;
568 struct device *dev = i2400m_dev(i2400m);
569
570 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
571 d_printf(1, dev, "pre-reset shut down\n");
572
573 result = 0;
574 mutex_lock(&i2400m->init_mutex);
575 if (i2400m->updown) {
576 netif_tx_disable(i2400m->wimax_dev.net_dev);
577 __i2400m_dev_stop(i2400m);
578 result = 0;
579 /* down't set updown to zero -- this way
580 * post_reset can restore properly */
581 }
582 mutex_unlock(&i2400m->init_mutex);
583 if (i2400m->bus_release)
584 i2400m->bus_release(i2400m);
585 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
586 return result;
587}
588EXPORT_SYMBOL_GPL(i2400m_pre_reset);
589
590
591/*
592 * Restore device state after a reset
593 *
594 * Do the work needed after a device reset to bring it up to the same
595 * state as it was before the reset.
596 *
597 * NOTE: this requires i2400m->init_mutex taken
598 */
599int i2400m_post_reset(struct i2400m *i2400m)
600{
601 int result = 0;
602 struct device *dev = i2400m_dev(i2400m);
603
604 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
605 d_printf(1, dev, "post-reset start\n");
606 if (i2400m->bus_setup) {
607 result = i2400m->bus_setup(i2400m);
608 if (result < 0) {
609 dev_err(dev, "bus-specific setup failed: %d\n",
610 result);
611 goto error_bus_setup;
612 }
613 }
614 mutex_lock(&i2400m->init_mutex);
615 if (i2400m->updown) {
616 result = __i2400m_dev_start(
617 i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
618 if (result < 0)
619 goto error_dev_start;
620 }
621 mutex_unlock(&i2400m->init_mutex);
622 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
623 return result;
624
625error_dev_start:
626 if (i2400m->bus_release)
627 i2400m->bus_release(i2400m);
628error_bus_setup:
629 /* even if the device was up, it could not be recovered, so we
630 * mark it as down. */
631 i2400m->updown = 0;
632 wmb(); /* see i2400m->updown's documentation */
633 mutex_unlock(&i2400m->init_mutex);
634 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
635 return result;
636}
637EXPORT_SYMBOL_GPL(i2400m_post_reset);
638
639
640/*
527 * The device has rebooted; fix up the device and the driver 641 * The device has rebooted; fix up the device and the driver
528 * 642 *
529 * Tear down the driver communication with the device, reload the 643 * Tear down the driver communication with the device, reload the
@@ -542,56 +656,69 @@ void i2400m_dev_stop(struct i2400m *i2400m)
542 * _stop()], don't do anything, let it fail and handle it. 656 * _stop()], don't do anything, let it fail and handle it.
543 * 657 *
544 * This function is ran always in a thread context 658 * This function is ran always in a thread context
659 *
660 * This function gets passed, as payload to i2400m_work() a 'const
661 * char *' ptr with a "reason" why the reset happened (for messages).
545 */ 662 */
546static 663static
547void __i2400m_dev_reset_handle(struct work_struct *ws) 664void __i2400m_dev_reset_handle(struct work_struct *ws)
548{ 665{
549 int result; 666 int result;
550 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 667 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
668 const char *reason;
551 struct i2400m *i2400m = iw->i2400m; 669 struct i2400m *i2400m = iw->i2400m;
552 struct device *dev = i2400m_dev(i2400m); 670 struct device *dev = i2400m_dev(i2400m);
553 enum wimax_st wimax_state;
554 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; 671 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
555 672
556 d_fnstart(3, dev, "(ws %p i2400m %p)\n", ws, i2400m); 673 if (WARN_ON(iw->pl_size != sizeof(reason)))
674 reason = "SW BUG: reason n/a";
675 else
676 memcpy(&reason, iw->pl, sizeof(reason));
677
678 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
679
557 result = 0; 680 result = 0;
558 if (mutex_trylock(&i2400m->init_mutex) == 0) { 681 if (mutex_trylock(&i2400m->init_mutex) == 0) {
559 /* We are still in i2400m_dev_start() [let it fail] or 682 /* We are still in i2400m_dev_start() [let it fail] or
560 * i2400m_dev_stop() [we are shutting down anyway, so 683 * i2400m_dev_stop() [we are shutting down anyway, so
561 * ignore it] or we are resetting somewhere else. */ 684 * ignore it] or we are resetting somewhere else. */
562 dev_err(dev, "device rebooted\n"); 685 dev_err(dev, "device rebooted somewhere else?\n");
563 i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST); 686 i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
564 complete(&i2400m->msg_completion); 687 complete(&i2400m->msg_completion);
565 goto out; 688 goto out;
566 } 689 }
567 wimax_state = wimax_state_get(&i2400m->wimax_dev); 690 if (i2400m->updown == 0) {
568 if (wimax_state < WIMAX_ST_UNINITIALIZED) { 691 dev_info(dev, "%s: device is down, doing nothing\n", reason);
569 dev_info(dev, "device rebooted: it is down, ignoring\n"); 692 goto out_unlock;
570 goto out_unlock; /* ifconfig up/down wasn't called */
571 } 693 }
572 dev_err(dev, "device rebooted: reinitializing driver\n"); 694 dev_err(dev, "%s: reinitializing driver\n", reason);
573 __i2400m_dev_stop(i2400m); 695 __i2400m_dev_stop(i2400m);
574 i2400m->updown = 0;
575 result = __i2400m_dev_start(i2400m, 696 result = __i2400m_dev_start(i2400m,
576 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); 697 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
577 if (result < 0) { 698 if (result < 0) {
578 dev_err(dev, "device reboot: cannot start the device: %d\n", 699 i2400m->updown = 0;
579 result); 700 wmb(); /* see i2400m->updown's documentation */
580 result = i2400m->bus_reset(i2400m, I2400M_RT_BUS); 701 dev_err(dev, "%s: cannot start the device: %d\n",
581 if (result >= 0) 702 reason, result);
582 result = -ENODEV; 703 result = -EUCLEAN;
583 } else 704 }
584 i2400m->updown = 1;
585out_unlock: 705out_unlock:
586 if (i2400m->reset_ctx) { 706 if (i2400m->reset_ctx) {
587 ctx->result = result; 707 ctx->result = result;
588 complete(&ctx->completion); 708 complete(&ctx->completion);
589 } 709 }
590 mutex_unlock(&i2400m->init_mutex); 710 mutex_unlock(&i2400m->init_mutex);
711 if (result == -EUCLEAN) {
712 /* ops, need to clean up [w/ init_mutex not held] */
713 result = i2400m_reset(i2400m, I2400M_RT_BUS);
714 if (result >= 0)
715 result = -ENODEV;
716 }
591out: 717out:
592 i2400m_put(i2400m); 718 i2400m_put(i2400m);
593 kfree(iw); 719 kfree(iw);
594 d_fnend(3, dev, "(ws %p i2400m %p) = void\n", ws, i2400m); 720 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
721 ws, i2400m, reason);
595 return; 722 return;
596} 723}
597 724
@@ -608,16 +735,104 @@ out:
608 * reinitializing the driver to handle the reset, calling into the 735 * reinitializing the driver to handle the reset, calling into the
609 * bus-specific functions ops as needed. 736 * bus-specific functions ops as needed.
610 */ 737 */
611int i2400m_dev_reset_handle(struct i2400m *i2400m) 738int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
612{ 739{
613 i2400m->boot_mode = 1; 740 i2400m->boot_mode = 1;
614 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */ 741 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
615 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 742 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
616 GFP_ATOMIC); 743 GFP_ATOMIC, &reason, sizeof(reason));
617} 744}
618EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 745EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
619 746
620 747
748/*
749 * Alloc the command and ack buffers for boot mode
750 *
751 * Get the buffers needed to deal with boot mode messages. These
752 * buffers need to be allocated before the sdio recieve irq is setup.
753 */
754static
755int i2400m_bm_buf_alloc(struct i2400m *i2400m)
756{
757 int result;
758
759 result = -ENOMEM;
760 i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
761 if (i2400m->bm_cmd_buf == NULL)
762 goto error_bm_cmd_kzalloc;
763 i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
764 if (i2400m->bm_ack_buf == NULL)
765 goto error_bm_ack_buf_kzalloc;
766 return 0;
767
768error_bm_ack_buf_kzalloc:
769 kfree(i2400m->bm_cmd_buf);
770error_bm_cmd_kzalloc:
771 return result;
772}
773
774
775/*
776 * Free boot mode command and ack buffers.
777 */
778static
779void i2400m_bm_buf_free(struct i2400m *i2400m)
780{
781 kfree(i2400m->bm_ack_buf);
782 kfree(i2400m->bm_cmd_buf);
783}
784
785
786/**
787 * i2400m_init - Initialize a 'struct i2400m' from all zeroes
788 *
789 * This is a bus-generic API call.
790 */
791void i2400m_init(struct i2400m *i2400m)
792{
793 wimax_dev_init(&i2400m->wimax_dev);
794
795 i2400m->boot_mode = 1;
796 i2400m->rx_reorder = 1;
797 init_waitqueue_head(&i2400m->state_wq);
798
799 spin_lock_init(&i2400m->tx_lock);
800 i2400m->tx_pl_min = UINT_MAX;
801 i2400m->tx_size_min = UINT_MAX;
802
803 spin_lock_init(&i2400m->rx_lock);
804 i2400m->rx_pl_min = UINT_MAX;
805 i2400m->rx_size_min = UINT_MAX;
806 INIT_LIST_HEAD(&i2400m->rx_reports);
807 INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work);
808
809 mutex_init(&i2400m->msg_mutex);
810 init_completion(&i2400m->msg_completion);
811
812 mutex_init(&i2400m->init_mutex);
813 /* wake_tx_ws is initialized in i2400m_tx_setup() */
814}
815EXPORT_SYMBOL_GPL(i2400m_init);
816
817
818int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
819{
820 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
821
822 /*
823 * Make sure we stop TXs and down the carrier before
824 * resetting; this is needed to avoid things like
825 * i2400m_wake_tx() scheduling stuff in parallel.
826 */
827 if (net_dev->reg_state == NETREG_REGISTERED) {
828 netif_tx_disable(net_dev);
829 netif_carrier_off(net_dev);
830 }
831 return i2400m->bus_reset(i2400m, rt);
832}
833EXPORT_SYMBOL_GPL(i2400m_reset);
834
835
621/** 836/**
622 * i2400m_setup - bus-generic setup function for the i2400m device 837 * i2400m_setup - bus-generic setup function for the i2400m device
623 * 838 *
@@ -625,13 +840,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
625 * 840 *
626 * Returns: 0 if ok, < 0 errno code on error. 841 * Returns: 0 if ok, < 0 errno code on error.
627 * 842 *
628 * Initializes the bus-generic parts of the i2400m driver; the 843 * Sets up basic device comunication infrastructure, boots the ROM to
629 * bus-specific parts have been initialized, function pointers filled 844 * read the MAC address, registers with the WiMAX and network stacks
630 * out by the bus-specific probe function. 845 * and then brings up the device.
631 *
632 * As well, this registers the WiMAX and net device nodes. Once this
633 * function returns, the device is operative and has to be ready to
634 * receive and send network traffic and WiMAX control operations.
635 */ 846 */
636int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) 847int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
637{ 848{
@@ -645,16 +856,21 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
645 snprintf(wimax_dev->name, sizeof(wimax_dev->name), 856 snprintf(wimax_dev->name, sizeof(wimax_dev->name),
646 "i2400m-%s:%s", dev->bus->name, dev_name(dev)); 857 "i2400m-%s:%s", dev->bus->name, dev_name(dev));
647 858
648 i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL); 859 result = i2400m_bm_buf_alloc(i2400m);
649 if (i2400m->bm_cmd_buf == NULL) { 860 if (result < 0) {
650 dev_err(dev, "cannot allocate USB command buffer\n"); 861 dev_err(dev, "cannot allocate bootmode scratch buffers\n");
651 goto error_bm_cmd_kzalloc; 862 goto error_bm_buf_alloc;
652 } 863 }
653 i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL); 864
654 if (i2400m->bm_ack_buf == NULL) { 865 if (i2400m->bus_setup) {
655 dev_err(dev, "cannot allocate USB ack buffer\n"); 866 result = i2400m->bus_setup(i2400m);
656 goto error_bm_ack_buf_kzalloc; 867 if (result < 0) {
868 dev_err(dev, "bus-specific setup failed: %d\n",
869 result);
870 goto error_bus_setup;
871 }
657 } 872 }
873
658 result = i2400m_bootrom_init(i2400m, bm_flags); 874 result = i2400m_bootrom_init(i2400m, bm_flags);
659 if (result < 0) { 875 if (result < 0) {
660 dev_err(dev, "read mac addr: bootrom init " 876 dev_err(dev, "read mac addr: bootrom init "
@@ -666,6 +882,9 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
666 goto error_read_mac_addr; 882 goto error_read_mac_addr;
667 random_ether_addr(i2400m->src_mac_addr); 883 random_ether_addr(i2400m->src_mac_addr);
668 884
885 i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
886 register_pm_notifier(&i2400m->pm_notifier);
887
669 result = register_netdev(net_dev); /* Okey dokey, bring it up */ 888 result = register_netdev(net_dev); /* Okey dokey, bring it up */
670 if (result < 0) { 889 if (result < 0) {
671 dev_err(dev, "cannot register i2400m network device: %d\n", 890 dev_err(dev, "cannot register i2400m network device: %d\n",
@@ -674,18 +893,13 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
674 } 893 }
675 netif_carrier_off(net_dev); 894 netif_carrier_off(net_dev);
676 895
677 result = i2400m_dev_start(i2400m, bm_flags);
678 if (result < 0)
679 goto error_dev_start;
680
681 i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user; 896 i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user;
682 i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle; 897 i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle;
683 i2400m->wimax_dev.op_reset = i2400m_op_reset; 898 i2400m->wimax_dev.op_reset = i2400m_op_reset;
899
684 result = wimax_dev_add(&i2400m->wimax_dev, net_dev); 900 result = wimax_dev_add(&i2400m->wimax_dev, net_dev);
685 if (result < 0) 901 if (result < 0)
686 goto error_wimax_dev_add; 902 goto error_wimax_dev_add;
687 /* User space needs to do some init stuff */
688 wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
689 903
690 /* Now setup all that requires a registered net and wimax device. */ 904 /* Now setup all that requires a registered net and wimax device. */
691 result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group); 905 result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group);
@@ -693,30 +907,37 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
693 dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result); 907 dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result);
694 goto error_sysfs_setup; 908 goto error_sysfs_setup;
695 } 909 }
910
696 result = i2400m_debugfs_add(i2400m); 911 result = i2400m_debugfs_add(i2400m);
697 if (result < 0) { 912 if (result < 0) {
698 dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result); 913 dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
699 goto error_debugfs_setup; 914 goto error_debugfs_setup;
700 } 915 }
916
917 result = i2400m_dev_start(i2400m, bm_flags);
918 if (result < 0)
919 goto error_dev_start;
701 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 920 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
702 return result; 921 return result;
703 922
923error_dev_start:
924 i2400m_debugfs_rm(i2400m);
704error_debugfs_setup: 925error_debugfs_setup:
705 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 926 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
706 &i2400m_dev_attr_group); 927 &i2400m_dev_attr_group);
707error_sysfs_setup: 928error_sysfs_setup:
708 wimax_dev_rm(&i2400m->wimax_dev); 929 wimax_dev_rm(&i2400m->wimax_dev);
709error_wimax_dev_add: 930error_wimax_dev_add:
710 i2400m_dev_stop(i2400m);
711error_dev_start:
712 unregister_netdev(net_dev); 931 unregister_netdev(net_dev);
713error_register_netdev: 932error_register_netdev:
933 unregister_pm_notifier(&i2400m->pm_notifier);
714error_read_mac_addr: 934error_read_mac_addr:
715error_bootrom_init: 935error_bootrom_init:
716 kfree(i2400m->bm_ack_buf); 936 if (i2400m->bus_release)
717error_bm_ack_buf_kzalloc: 937 i2400m->bus_release(i2400m);
718 kfree(i2400m->bm_cmd_buf); 938error_bus_setup:
719error_bm_cmd_kzalloc: 939 i2400m_bm_buf_free(i2400m);
940error_bm_buf_alloc:
720 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 941 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
721 return result; 942 return result;
722} 943}
@@ -735,14 +956,17 @@ void i2400m_release(struct i2400m *i2400m)
735 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 956 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
736 netif_stop_queue(i2400m->wimax_dev.net_dev); 957 netif_stop_queue(i2400m->wimax_dev.net_dev);
737 958
959 i2400m_dev_stop(i2400m);
960
738 i2400m_debugfs_rm(i2400m); 961 i2400m_debugfs_rm(i2400m);
739 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 962 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
740 &i2400m_dev_attr_group); 963 &i2400m_dev_attr_group);
741 wimax_dev_rm(&i2400m->wimax_dev); 964 wimax_dev_rm(&i2400m->wimax_dev);
742 i2400m_dev_stop(i2400m);
743 unregister_netdev(i2400m->wimax_dev.net_dev); 965 unregister_netdev(i2400m->wimax_dev.net_dev);
744 kfree(i2400m->bm_ack_buf); 966 unregister_pm_notifier(&i2400m->pm_notifier);
745 kfree(i2400m->bm_cmd_buf); 967 if (i2400m->bus_release)
968 i2400m->bus_release(i2400m);
969 i2400m_bm_buf_free(i2400m);
746 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 970 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
747} 971}
748EXPORT_SYMBOL_GPL(i2400m_release); 972EXPORT_SYMBOL_GPL(i2400m_release);
@@ -759,6 +983,7 @@ struct d_level D_LEVEL[] = {
759 D_SUBMODULE_DEFINE(netdev), 983 D_SUBMODULE_DEFINE(netdev),
760 D_SUBMODULE_DEFINE(rfkill), 984 D_SUBMODULE_DEFINE(rfkill),
761 D_SUBMODULE_DEFINE(rx), 985 D_SUBMODULE_DEFINE(rx),
986 D_SUBMODULE_DEFINE(sysfs),
762 D_SUBMODULE_DEFINE(tx), 987 D_SUBMODULE_DEFINE(tx),
763}; 988};
764size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); 989size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
@@ -767,7 +992,9 @@ size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
767static 992static
768int __init i2400m_driver_init(void) 993int __init i2400m_driver_init(void)
769{ 994{
770 return 0; 995 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400m_debug_params,
996 "i2400m.debug");
997 return i2400m_barker_db_init(i2400m_barkers_params);
771} 998}
772module_init(i2400m_driver_init); 999module_init(i2400m_driver_init);
773 1000
@@ -776,6 +1003,7 @@ void __exit i2400m_driver_exit(void)
776{ 1003{
777 /* for scheds i2400m_dev_reset_handle() */ 1004 /* for scheds i2400m_dev_reset_handle() */
778 flush_scheduled_work(); 1005 flush_scheduled_work();
1006 i2400m_barker_db_exit();
779 return; 1007 return;
780} 1008}
781module_exit(i2400m_driver_exit); 1009module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e81750e54452..64cdfeb299ca 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -40,11 +40,9 @@
40 * 40 *
41 * THE PROCEDURE 41 * THE PROCEDURE
42 * 42 *
43 * (this is decribed for USB, but for SDIO is similar) 43 * The 2400m and derived devices work in two modes: boot-mode or
44 * 44 * normal mode. In boot mode we can execute only a handful of commands
45 * The 2400m works in two modes: boot-mode or normal mode. In boot 45 * targeted at uploading the firmware and launching it.
46 * mode we can execute only a handful of commands targeted at
47 * uploading the firmware and launching it.
48 * 46 *
49 * The 2400m enters boot mode when it is first connected to the 47 * The 2400m enters boot mode when it is first connected to the
50 * system, when it crashes and when you ask it to reboot. There are 48 * system, when it crashes and when you ask it to reboot. There are
@@ -52,18 +50,26 @@
52 * firmwares signed with a certain private key, non-signed takes any 50 * firmwares signed with a certain private key, non-signed takes any
53 * firmware. Normal hardware takes only signed firmware. 51 * firmware. Normal hardware takes only signed firmware.
54 * 52 *
55 * Upon entrance to boot mode, the device sends a few zero length 53 * On boot mode, in USB, we write to the device using the bulk out
56 * packets (ZLPs) on the notification endpoint, then a reboot barker 54 * endpoint and read from it in the notification endpoint. In SDIO we
57 * (4 le32 words with value I2400M_{S,N}BOOT_BARKER). We ack it by 55 * talk to it via the write address and read from the read address.
58 * sending the same barker on the bulk out endpoint. The device acks 56 *
59 * with a reboot ack barker (4 le32 words with value 0xfeedbabe) and 57 * Upon entrance to boot mode, the device sends (preceeded with a few
60 * then the device is fully rebooted. At this point we can upload the 58 * zero length packets (ZLPs) on the notification endpoint in USB) a
61 * firmware. 59 * reboot barker (4 le32 words with the same value). We ack it by
60 * sending the same barker to the device. The device acks with a
61 * reboot ack barker (4 le32 words with value I2400M_ACK_BARKER) and
62 * then is fully booted. At this point we can upload the firmware.
63 *
64 * Note that different iterations of the device and EEPROM
65 * configurations will send different [re]boot barkers; these are
66 * collected in i2400m_barker_db along with the firmware
67 * characteristics they require.
62 * 68 *
63 * This process is accomplished by the i2400m_bootrom_init() 69 * This process is accomplished by the i2400m_bootrom_init()
64 * function. All the device interaction happens through the 70 * function. All the device interaction happens through the
65 * i2400m_bm_cmd() [boot mode command]. Special return values will 71 * i2400m_bm_cmd() [boot mode command]. Special return values will
66 * indicate if the device resets. 72 * indicate if the device did reset during the process.
67 * 73 *
68 * After this, we read the MAC address and then (if needed) 74 * After this, we read the MAC address and then (if needed)
69 * reinitialize the device. We need to read it ahead of time because 75 * reinitialize the device. We need to read it ahead of time because
@@ -72,11 +78,11 @@
72 * 78 *
73 * We can then upload the firmware file. The file is composed of a BCF 79 * We can then upload the firmware file. The file is composed of a BCF
74 * header (basic data, keys and signatures) and a list of write 80 * header (basic data, keys and signatures) and a list of write
75 * commands and payloads. We first upload the header 81 * commands and payloads. Optionally more BCF headers might follow the
76 * [i2400m_dnload_init()] and then pass the commands and payloads 82 * main payload. We first upload the header [i2400m_dnload_init()] and
77 * verbatim to the i2400m_bm_cmd() function 83 * then pass the commands and payloads verbatim to the i2400m_bm_cmd()
78 * [i2400m_dnload_bcf()]. Then we tell the device to jump to the new 84 * function [i2400m_dnload_bcf()]. Then we tell the device to jump to
79 * firmware [i2400m_dnload_finalize()]. 85 * the new firmware [i2400m_dnload_finalize()].
80 * 86 *
81 * Once firmware is uploaded, we are good to go :) 87 * Once firmware is uploaded, we are good to go :)
82 * 88 *
@@ -99,18 +105,32 @@
99 * read an acknolwedgement from it (or an asynchronous notification) 105 * read an acknolwedgement from it (or an asynchronous notification)
100 * from it. 106 * from it.
101 * 107 *
108 * FIRMWARE LOADING
109 *
110 * Note that in some cases, we can't just load a firmware file (for
111 * example, when resuming). For that, we might cache the firmware
112 * file. Thus, when doing the bootstrap, if there is a cache firmware
113 * file, it is used; if not, loading from disk is attempted.
114 *
102 * ROADMAP 115 * ROADMAP
103 * 116 *
117 * i2400m_barker_db_init Called by i2400m_driver_init()
118 * i2400m_barker_db_add
119 *
120 * i2400m_barker_db_exit Called by i2400m_driver_exit()
121 *
104 * i2400m_dev_bootstrap Called by __i2400m_dev_start() 122 * i2400m_dev_bootstrap Called by __i2400m_dev_start()
105 * request_firmware 123 * request_firmware
106 * i2400m_fw_check 124 * i2400m_fw_bootstrap
107 * i2400m_fw_dnload 125 * i2400m_fw_check
126 * i2400m_fw_hdr_check
127 * i2400m_fw_dnload
108 * release_firmware 128 * release_firmware
109 * 129 *
110 * i2400m_fw_dnload 130 * i2400m_fw_dnload
111 * i2400m_bootrom_init 131 * i2400m_bootrom_init
112 * i2400m_bm_cmd 132 * i2400m_bm_cmd
113 * i2400m->bus_reset 133 * i2400m_reset
114 * i2400m_dnload_init 134 * i2400m_dnload_init
115 * i2400m_dnload_init_signed 135 * i2400m_dnload_init_signed
116 * i2400m_dnload_init_nonsigned 136 * i2400m_dnload_init_nonsigned
@@ -125,9 +145,14 @@
125 * i2400m->bus_bm_cmd_send() 145 * i2400m->bus_bm_cmd_send()
126 * i2400m->bus_bm_wait_for_ack 146 * i2400m->bus_bm_wait_for_ack
127 * __i2400m_bm_ack_verify 147 * __i2400m_bm_ack_verify
148 * i2400m_is_boot_barker
128 * 149 *
129 * i2400m_bm_cmd_prepare Used by bus-drivers to prep 150 * i2400m_bm_cmd_prepare Used by bus-drivers to prep
130 * commands before sending 151 * commands before sending
152 *
153 * i2400m_pm_notifier Called on Power Management events
154 * i2400m_fw_cache
155 * i2400m_fw_uncache
131 */ 156 */
132#include <linux/firmware.h> 157#include <linux/firmware.h>
133#include <linux/sched.h> 158#include <linux/sched.h>
@@ -175,6 +200,240 @@ EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
175 200
176 201
177/* 202/*
203 * Database of known barkers.
204 *
205 * A barker is what the device sends indicating he is ready to be
206 * bootloaded. Different versions of the device will send different
207 * barkers. Depending on the barker, it might mean the device wants
208 * some kind of firmware or the other.
209 */
210static struct i2400m_barker_db {
211 __le32 data[4];
212} *i2400m_barker_db;
213static size_t i2400m_barker_db_used, i2400m_barker_db_size;
214
215
216static
217int i2400m_zrealloc_2x(void **ptr, size_t *_count, size_t el_size,
218 gfp_t gfp_flags)
219{
220 size_t old_count = *_count,
221 new_count = old_count ? 2 * old_count : 2,
222 old_size = el_size * old_count,
223 new_size = el_size * new_count;
224 void *nptr = krealloc(*ptr, new_size, gfp_flags);
225 if (nptr) {
226 /* zero the other half or the whole thing if old_count
227 * was zero */
228 if (old_size == 0)
229 memset(nptr, 0, new_size);
230 else
231 memset(nptr + old_size, 0, old_size);
232 *_count = new_count;
233 *ptr = nptr;
234 return 0;
235 } else
236 return -ENOMEM;
237}
238
239
240/*
241 * Add a barker to the database
242 *
243 * This cannot used outside of this module and only at at module_init
244 * time. This is to avoid the need to do locking.
245 */
246static
247int i2400m_barker_db_add(u32 barker_id)
248{
249 int result;
250
251 struct i2400m_barker_db *barker;
252 if (i2400m_barker_db_used >= i2400m_barker_db_size) {
253 result = i2400m_zrealloc_2x(
254 (void **) &i2400m_barker_db, &i2400m_barker_db_size,
255 sizeof(i2400m_barker_db[0]), GFP_KERNEL);
256 if (result < 0)
257 return result;
258 }
259 barker = i2400m_barker_db + i2400m_barker_db_used++;
260 barker->data[0] = le32_to_cpu(barker_id);
261 barker->data[1] = le32_to_cpu(barker_id);
262 barker->data[2] = le32_to_cpu(barker_id);
263 barker->data[3] = le32_to_cpu(barker_id);
264 return 0;
265}
266
267
268void i2400m_barker_db_exit(void)
269{
270 kfree(i2400m_barker_db);
271 i2400m_barker_db = NULL;
272 i2400m_barker_db_size = 0;
273 i2400m_barker_db_used = 0;
274}
275
276
277/*
278 * Helper function to add all the known stable barkers to the barker
279 * database.
280 */
281static
282int i2400m_barker_db_known_barkers(void)
283{
284 int result;
285
286 result = i2400m_barker_db_add(I2400M_NBOOT_BARKER);
287 if (result < 0)
288 goto error_add;
289 result = i2400m_barker_db_add(I2400M_SBOOT_BARKER);
290 if (result < 0)
291 goto error_add;
292 result = i2400m_barker_db_add(I2400M_SBOOT_BARKER_6050);
293 if (result < 0)
294 goto error_add;
295error_add:
296 return result;
297}
298
299
300/*
301 * Initialize the barker database
302 *
303 * This can only be used from the module_init function for this
304 * module; this is to avoid the need to do locking.
305 *
306 * @options: command line argument with extra barkers to
307 * recognize. This is a comma-separated list of 32-bit hex
308 * numbers. They are appended to the existing list. Setting 0
309 * cleans the existing list and starts a new one.
310 */
311int i2400m_barker_db_init(const char *_options)
312{
313 int result;
314 char *options = NULL, *options_orig, *token;
315
316 i2400m_barker_db = NULL;
317 i2400m_barker_db_size = 0;
318 i2400m_barker_db_used = 0;
319
320 result = i2400m_barker_db_known_barkers();
321 if (result < 0)
322 goto error_add;
323 /* parse command line options from i2400m.barkers */
324 if (_options != NULL) {
325 unsigned barker;
326
327 options_orig = kstrdup(_options, GFP_KERNEL);
328 if (options_orig == NULL)
329 goto error_parse;
330 options = options_orig;
331
332 while ((token = strsep(&options, ",")) != NULL) {
333 if (*token == '\0') /* eat joint commas */
334 continue;
335 if (sscanf(token, "%x", &barker) != 1
336 || barker > 0xffffffff) {
337 printk(KERN_ERR "%s: can't recognize "
338 "i2400m.barkers value '%s' as "
339 "a 32-bit number\n",
340 __func__, token);
341 result = -EINVAL;
342 goto error_parse;
343 }
344 if (barker == 0) {
345 /* clean list and start new */
346 i2400m_barker_db_exit();
347 continue;
348 }
349 result = i2400m_barker_db_add(barker);
350 if (result < 0)
351 goto error_add;
352 }
353 kfree(options_orig);
354 }
355 return 0;
356
357error_parse:
358error_add:
359 kfree(i2400m_barker_db);
360 return result;
361}
362
363
364/*
365 * Recognize a boot barker
366 *
367 * @buf: buffer where the boot barker.
368 * @buf_size: size of the buffer (has to be 16 bytes). It is passed
369 * here so the function can check it for the caller.
370 *
371 * Note that as a side effect, upon identifying the obtained boot
372 * barker, this function will set i2400m->barker to point to the right
373 * barker database entry. Subsequent calls to the function will result
374 * in verifying that the same type of boot barker is returned when the
375 * device [re]boots (as long as the same device instance is used).
376 *
377 * Return: 0 if @buf matches a known boot barker. -ENOENT if the
378 * buffer in @buf doesn't match any boot barker in the database or
379 * -EILSEQ if the buffer doesn't have the right size.
380 */
381int i2400m_is_boot_barker(struct i2400m *i2400m,
382 const void *buf, size_t buf_size)
383{
384 int result;
385 struct device *dev = i2400m_dev(i2400m);
386 struct i2400m_barker_db *barker;
387 int i;
388
389 result = -ENOENT;
390 if (buf_size != sizeof(i2400m_barker_db[i].data))
391 return result;
392
393 /* Short circuit if we have already discovered the barker
394 * associated with the device. */
395 if (i2400m->barker
396 && !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) {
397 unsigned index = (i2400m->barker - i2400m_barker_db)
398 / sizeof(*i2400m->barker);
399 d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n",
400 index, le32_to_cpu(i2400m->barker->data[0]));
401 return 0;
402 }
403
404 for (i = 0; i < i2400m_barker_db_used; i++) {
405 barker = &i2400m_barker_db[i];
406 BUILD_BUG_ON(sizeof(barker->data) != 16);
407 if (memcmp(buf, barker->data, sizeof(barker->data)))
408 continue;
409
410 if (i2400m->barker == NULL) {
411 i2400m->barker = barker;
412 d_printf(1, dev, "boot barker set to #%u/%08x\n",
413 i, le32_to_cpu(barker->data[0]));
414 if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER))
415 i2400m->sboot = 0;
416 else
417 i2400m->sboot = 1;
418 } else if (i2400m->barker != barker) {
419 dev_err(dev, "HW inconsistency: device "
420 "reports a different boot barker "
421 "than set (from %08x to %08x)\n",
422 le32_to_cpu(i2400m->barker->data[0]),
423 le32_to_cpu(barker->data[0]));
424 result = -EIO;
425 } else
426 d_printf(2, dev, "boot barker confirmed #%u/%08x\n",
427 i, le32_to_cpu(barker->data[0]));
428 result = 0;
429 break;
430 }
431 return result;
432}
433EXPORT_SYMBOL_GPL(i2400m_is_boot_barker);
434
435
436/*
178 * Verify the ack data received 437 * Verify the ack data received
179 * 438 *
180 * Given a reply to a boot mode command, chew it and verify everything 439 * Given a reply to a boot mode command, chew it and verify everything
@@ -204,20 +463,10 @@ ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
204 opcode, ack_size, sizeof(*ack)); 463 opcode, ack_size, sizeof(*ack));
205 goto error_ack_short; 464 goto error_ack_short;
206 } 465 }
207 if (ack_size == sizeof(i2400m_NBOOT_BARKER) 466 result = i2400m_is_boot_barker(i2400m, ack, ack_size);
208 && memcmp(ack, i2400m_NBOOT_BARKER, sizeof(*ack)) == 0) { 467 if (result >= 0) {
209 result = -ERESTARTSYS; 468 result = -ERESTARTSYS;
210 i2400m->sboot = 0; 469 d_printf(6, dev, "boot-mode cmd %d: HW boot barker\n", opcode);
211 d_printf(6, dev, "boot-mode cmd %d: "
212 "HW non-signed boot barker\n", opcode);
213 goto error_reboot;
214 }
215 if (ack_size == sizeof(i2400m_SBOOT_BARKER)
216 && memcmp(ack, i2400m_SBOOT_BARKER, sizeof(*ack)) == 0) {
217 result = -ERESTARTSYS;
218 i2400m->sboot = 1;
219 d_printf(6, dev, "boot-mode cmd %d: HW signed reboot barker\n",
220 opcode);
221 goto error_reboot; 470 goto error_reboot;
222 } 471 }
223 if (ack_size == sizeof(i2400m_ACK_BARKER) 472 if (ack_size == sizeof(i2400m_ACK_BARKER)
@@ -343,7 +592,6 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
343 BUG_ON(i2400m->boot_mode == 0); 592 BUG_ON(i2400m->boot_mode == 0);
344 593
345 if (cmd != NULL) { /* send the command */ 594 if (cmd != NULL) { /* send the command */
346 memcpy(i2400m->bm_cmd_buf, cmd, cmd_size);
347 result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags); 595 result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
348 if (result < 0) 596 if (result < 0)
349 goto error_cmd_send; 597 goto error_cmd_send;
@@ -432,8 +680,8 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
432 * Download a BCF file's sections to the device 680 * Download a BCF file's sections to the device
433 * 681 *
434 * @i2400m: device descriptor 682 * @i2400m: device descriptor
435 * @bcf: pointer to firmware data (followed by the payloads). Assumed 683 * @bcf: pointer to firmware data (first header followed by the
436 * verified and consistent. 684 * payloads). Assumed verified and consistent.
437 * @bcf_len: length (in bytes) of the @bcf buffer. 685 * @bcf_len: length (in bytes) of the @bcf buffer.
438 * 686 *
439 * Returns: < 0 errno code on error or the offset to the jump instruction. 687 * Returns: < 0 errno code on error or the offset to the jump instruction.
@@ -472,14 +720,17 @@ ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
472 "downloading section #%zu (@%zu %zu B) to 0x%08x\n", 720 "downloading section #%zu (@%zu %zu B) to 0x%08x\n",
473 section, offset, sizeof(*bh) + data_size, 721 section, offset, sizeof(*bh) + data_size,
474 le32_to_cpu(bh->target_addr)); 722 le32_to_cpu(bh->target_addr));
475 if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP) { 723 /*
476 /* Secure boot needs to stop here */ 724 * We look for JUMP cmd from the bootmode header,
477 d_printf(5, dev, "signed jump found @%zu\n", offset); 725 * either I2400M_BRH_SIGNED_JUMP for secure boot
726 * or I2400M_BRH_JUMP for unsecure boot, the last chunk
727 * should be the bootmode header with JUMP cmd.
728 */
729 if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP ||
730 i2400m_brh_get_opcode(bh) == I2400M_BRH_JUMP) {
731 d_printf(5, dev, "jump found @%zu\n", offset);
478 break; 732 break;
479 } 733 }
480 if (offset + section_size == bcf_len)
481 /* Non-secure boot stops here */
482 break;
483 if (offset + section_size > bcf_len) { 734 if (offset + section_size > bcf_len) {
484 dev_err(dev, "fw %s: bad section #%zu, " 735 dev_err(dev, "fw %s: bad section #%zu, "
485 "end (@%zu) beyond EOF (@%zu)\n", 736 "end (@%zu) beyond EOF (@%zu)\n",
@@ -510,13 +761,30 @@ error_send:
510 761
511 762
512/* 763/*
764 * Indicate if the device emitted a reboot barker that indicates
765 * "signed boot"
766 */
767static
768unsigned i2400m_boot_is_signed(struct i2400m *i2400m)
769{
770 return likely(i2400m->sboot);
771}
772
773
774/*
513 * Do the final steps of uploading firmware 775 * Do the final steps of uploading firmware
514 * 776 *
777 * @bcf_hdr: BCF header we are actually using
778 * @bcf: pointer to the firmware image (which matches the first header
779 * that is followed by the actual payloads).
780 * @offset: [byte] offset into @bcf for the command we need to send.
781 *
515 * Depending on the boot mode (signed vs non-signed), different 782 * Depending on the boot mode (signed vs non-signed), different
516 * actions need to be taken. 783 * actions need to be taken.
517 */ 784 */
518static 785static
519int i2400m_dnload_finalize(struct i2400m *i2400m, 786int i2400m_dnload_finalize(struct i2400m *i2400m,
787 const struct i2400m_bcf_hdr *bcf_hdr,
520 const struct i2400m_bcf_hdr *bcf, size_t offset) 788 const struct i2400m_bcf_hdr *bcf, size_t offset)
521{ 789{
522 int ret = 0; 790 int ret = 0;
@@ -530,10 +798,14 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
530 798
531 d_fnstart(3, dev, "offset %zu\n", offset); 799 d_fnstart(3, dev, "offset %zu\n", offset);
532 cmd = (void *) bcf + offset; 800 cmd = (void *) bcf + offset;
533 if (i2400m->sboot == 0) { 801 if (i2400m_boot_is_signed(i2400m) == 0) {
534 struct i2400m_bootrom_header jump_ack; 802 struct i2400m_bootrom_header jump_ack;
535 d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n", 803 d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n",
536 le32_to_cpu(cmd->target_addr)); 804 le32_to_cpu(cmd->target_addr));
805 cmd_buf = i2400m->bm_cmd_buf;
806 memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
807 cmd = &cmd_buf->cmd;
808 /* now cmd points to the actual bootrom_header in cmd_buf */
537 i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP); 809 i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
538 cmd->data_size = 0; 810 cmd->data_size = 0;
539 ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), 811 ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
@@ -544,12 +816,13 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
544 cmd_buf = i2400m->bm_cmd_buf; 816 cmd_buf = i2400m->bm_cmd_buf;
545 memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd)); 817 memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
546 signature_block_offset = 818 signature_block_offset =
547 sizeof(*bcf) 819 sizeof(*bcf_hdr)
548 + le32_to_cpu(bcf->key_size) * sizeof(u32) 820 + le32_to_cpu(bcf_hdr->key_size) * sizeof(u32)
549 + le32_to_cpu(bcf->exponent_size) * sizeof(u32); 821 + le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32);
550 signature_block_size = 822 signature_block_size =
551 le32_to_cpu(bcf->modulus_size) * sizeof(u32); 823 le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32);
552 memcpy(cmd_buf->cmd_pl, (void *) bcf + signature_block_offset, 824 memcpy(cmd_buf->cmd_pl,
825 (void *) bcf_hdr + signature_block_offset,
553 signature_block_size); 826 signature_block_size);
554 ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, 827 ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
555 sizeof(cmd_buf->cmd) + signature_block_size, 828 sizeof(cmd_buf->cmd) + signature_block_size,
@@ -565,7 +838,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
565 * 838 *
566 * @i2400m: device descriptor 839 * @i2400m: device descriptor
567 * @flags: 840 * @flags:
568 * I2400M_BRI_SOFT: a reboot notification has been seen 841 * I2400M_BRI_SOFT: a reboot barker has been seen
569 * already, so don't wait for it. 842 * already, so don't wait for it.
570 * 843 *
571 * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait 844 * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
@@ -576,17 +849,15 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
576 * 849 *
577 * < 0 errno code on error, 0 if ok. 850 * < 0 errno code on error, 0 if ok.
578 * 851 *
579 * i2400m->sboot set to 0 for unsecure boot process, 1 for secure
580 * boot process.
581 *
582 * Description: 852 * Description:
583 * 853 *
584 * Tries hard enough to put the device in boot-mode. There are two 854 * Tries hard enough to put the device in boot-mode. There are two
585 * main phases to this: 855 * main phases to this:
586 * 856 *
587 * a. (1) send a reboot command and (2) get a reboot barker 857 * a. (1) send a reboot command and (2) get a reboot barker
588 * b. (1) ack the reboot sending a reboot barker and (2) getting an 858 *
589 * ack barker in return 859 * b. (1) echo/ack the reboot sending the reboot barker back and (2)
860 * getting an ack barker in return
590 * 861 *
591 * We want to skip (a) in some cases [soft]. The state machine is 862 * We want to skip (a) in some cases [soft]. The state machine is
592 * horrible, but it is basically: on each phase, send what has to be 863 * horrible, but it is basically: on each phase, send what has to be
@@ -594,6 +865,16 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
594 * have to backtrack and retry, so we keep a max tries counter for 865 * have to backtrack and retry, so we keep a max tries counter for
595 * that. 866 * that.
596 * 867 *
868 * It sucks because we don't know ahead of time which is going to be
869 * the reboot barker (the device might send different ones depending
870 * on its EEPROM config) and once the device reboots and waits for the
871 * echo/ack reboot barker being sent back, it doesn't understand
872 * anything else. So we can be left at the point where we don't know
873 * what to send to it -- cold reset and bus reset seem to have little
874 * effect. So the function iterates (in this case) through all the
875 * known barkers and tries them all until an ACK is
876 * received. Otherwise, it gives up.
877 *
597 * If we get a timeout after sending a warm reset, we do it again. 878 * If we get a timeout after sending a warm reset, we do it again.
598 */ 879 */
599int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags) 880int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
@@ -602,10 +883,11 @@ int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
602 struct device *dev = i2400m_dev(i2400m); 883 struct device *dev = i2400m_dev(i2400m);
603 struct i2400m_bootrom_header *cmd; 884 struct i2400m_bootrom_header *cmd;
604 struct i2400m_bootrom_header ack; 885 struct i2400m_bootrom_header ack;
605 int count = I2400M_BOOT_RETRIES; 886 int count = i2400m->bus_bm_retries;
606 int ack_timeout_cnt = 1; 887 int ack_timeout_cnt = 1;
888 unsigned i;
607 889
608 BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_NBOOT_BARKER)); 890 BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_barker_db[0].data));
609 BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER)); 891 BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
610 892
611 d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags); 893 d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
@@ -614,27 +896,59 @@ int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
614 if (flags & I2400M_BRI_SOFT) 896 if (flags & I2400M_BRI_SOFT)
615 goto do_reboot_ack; 897 goto do_reboot_ack;
616do_reboot: 898do_reboot:
899 ack_timeout_cnt = 1;
617 if (--count < 0) 900 if (--count < 0)
618 goto error_timeout; 901 goto error_timeout;
619 d_printf(4, dev, "device reboot: reboot command [%d # left]\n", 902 d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
620 count); 903 count);
621 if ((flags & I2400M_BRI_NO_REBOOT) == 0) 904 if ((flags & I2400M_BRI_NO_REBOOT) == 0)
622 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 905 i2400m_reset(i2400m, I2400M_RT_WARM);
623 result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack), 906 result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
624 I2400M_BM_CMD_RAW); 907 I2400M_BM_CMD_RAW);
625 flags &= ~I2400M_BRI_NO_REBOOT; 908 flags &= ~I2400M_BRI_NO_REBOOT;
626 switch (result) { 909 switch (result) {
627 case -ERESTARTSYS: 910 case -ERESTARTSYS:
911 /*
912 * at this point, i2400m_bm_cmd(), through
913 * __i2400m_bm_ack_process(), has updated
914 * i2400m->barker and we are good to go.
915 */
628 d_printf(4, dev, "device reboot: got reboot barker\n"); 916 d_printf(4, dev, "device reboot: got reboot barker\n");
629 break; 917 break;
630 case -EISCONN: /* we don't know how it got here...but we follow it */ 918 case -EISCONN: /* we don't know how it got here...but we follow it */
631 d_printf(4, dev, "device reboot: got ack barker - whatever\n"); 919 d_printf(4, dev, "device reboot: got ack barker - whatever\n");
632 goto do_reboot; 920 goto do_reboot;
633 case -ETIMEDOUT: /* device has timed out, we might be in boot 921 case -ETIMEDOUT:
634 * mode already and expecting an ack, let's try 922 /*
635 * that */ 923 * Device has timed out, we might be in boot mode
636 dev_info(dev, "warm reset timed out, trying an ack\n"); 924 * already and expecting an ack; if we don't know what
637 goto do_reboot_ack; 925 * the barker is, we just send them all. Cold reset
926 * and bus reset don't work. Beats me.
927 */
928 if (i2400m->barker != NULL) {
929 dev_err(dev, "device boot: reboot barker timed out, "
930 "trying (set) %08x echo/ack\n",
931 le32_to_cpu(i2400m->barker->data[0]));
932 goto do_reboot_ack;
933 }
934 for (i = 0; i < i2400m_barker_db_used; i++) {
935 struct i2400m_barker_db *barker = &i2400m_barker_db[i];
936 memcpy(cmd, barker->data, sizeof(barker->data));
937 result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
938 &ack, sizeof(ack),
939 I2400M_BM_CMD_RAW);
940 if (result == -EISCONN) {
941 dev_warn(dev, "device boot: got ack barker "
942 "after sending echo/ack barker "
943 "#%d/%08x; rebooting j.i.c.\n",
944 i, le32_to_cpu(barker->data[0]));
945 flags &= ~I2400M_BRI_NO_REBOOT;
946 goto do_reboot;
947 }
948 }
949 dev_err(dev, "device boot: tried all the echo/acks, could "
950 "not get device to respond; giving up");
951 result = -ESHUTDOWN;
638 case -EPROTO: 952 case -EPROTO:
639 case -ESHUTDOWN: /* dev is gone */ 953 case -ESHUTDOWN: /* dev is gone */
640 case -EINTR: /* user cancelled */ 954 case -EINTR: /* user cancelled */
@@ -642,6 +956,7 @@ do_reboot:
642 default: 956 default:
643 dev_err(dev, "device reboot: error %d while waiting " 957 dev_err(dev, "device reboot: error %d while waiting "
644 "for reboot barker - rebooting\n", result); 958 "for reboot barker - rebooting\n", result);
959 d_dump(1, dev, &ack, result);
645 goto do_reboot; 960 goto do_reboot;
646 } 961 }
647 /* At this point we ack back with 4 REBOOT barkers and expect 962 /* At this point we ack back with 4 REBOOT barkers and expect
@@ -650,12 +965,7 @@ do_reboot:
650 * notification and report it as -EISCONN. */ 965 * notification and report it as -EISCONN. */
651do_reboot_ack: 966do_reboot_ack:
652 d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count); 967 d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
653 if (i2400m->sboot == 0) 968 memcpy(cmd, i2400m->barker->data, sizeof(i2400m->barker->data));
654 memcpy(cmd, i2400m_NBOOT_BARKER,
655 sizeof(i2400m_NBOOT_BARKER));
656 else
657 memcpy(cmd, i2400m_SBOOT_BARKER,
658 sizeof(i2400m_SBOOT_BARKER));
659 result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), 969 result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
660 &ack, sizeof(ack), I2400M_BM_CMD_RAW); 970 &ack, sizeof(ack), I2400M_BM_CMD_RAW);
661 switch (result) { 971 switch (result) {
@@ -668,10 +978,8 @@ do_reboot_ack:
668 d_printf(4, dev, "reboot ack: got ack barker - good\n"); 978 d_printf(4, dev, "reboot ack: got ack barker - good\n");
669 break; 979 break;
670 case -ETIMEDOUT: /* no response, maybe it is the other type? */ 980 case -ETIMEDOUT: /* no response, maybe it is the other type? */
671 if (ack_timeout_cnt-- >= 0) { 981 if (ack_timeout_cnt-- < 0) {
672 d_printf(4, dev, "reboot ack timedout: " 982 d_printf(4, dev, "reboot ack timedout: retrying\n");
673 "trying the other type?\n");
674 i2400m->sboot = !i2400m->sboot;
675 goto do_reboot_ack; 983 goto do_reboot_ack;
676 } else { 984 } else {
677 dev_err(dev, "reboot ack timedout too long: " 985 dev_err(dev, "reboot ack timedout too long: "
@@ -839,32 +1147,29 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m,
839 * (signed or non-signed). 1147 * (signed or non-signed).
840 */ 1148 */
841static 1149static
842int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf) 1150int i2400m_dnload_init(struct i2400m *i2400m,
1151 const struct i2400m_bcf_hdr *bcf_hdr)
843{ 1152{
844 int result; 1153 int result;
845 struct device *dev = i2400m_dev(i2400m); 1154 struct device *dev = i2400m_dev(i2400m);
846 u32 module_id = le32_to_cpu(bcf->module_id);
847 1155
848 if (i2400m->sboot == 0 1156 if (i2400m_boot_is_signed(i2400m)) {
849 && (module_id & I2400M_BCF_MOD_ID_POKES) == 0) { 1157 d_printf(1, dev, "signed boot\n");
850 /* non-signed boot process without pokes */ 1158 result = i2400m_dnload_init_signed(i2400m, bcf_hdr);
851 result = i2400m_dnload_init_nonsigned(i2400m);
852 if (result == -ERESTARTSYS) 1159 if (result == -ERESTARTSYS)
853 return result; 1160 return result;
854 if (result < 0) 1161 if (result < 0)
855 dev_err(dev, "fw %s: non-signed download " 1162 dev_err(dev, "firmware %s: signed boot download "
856 "initialization failed: %d\n", 1163 "initialization failed: %d\n",
857 i2400m->fw_name, result); 1164 i2400m->fw_name, result);
858 } else if (i2400m->sboot == 0 1165 } else {
859 && (module_id & I2400M_BCF_MOD_ID_POKES)) { 1166 /* non-signed boot process without pokes */
860 /* non-signed boot process with pokes, nothing to do */ 1167 d_printf(1, dev, "non-signed boot\n");
861 result = 0; 1168 result = i2400m_dnload_init_nonsigned(i2400m);
862 } else { /* signed boot process */
863 result = i2400m_dnload_init_signed(i2400m, bcf);
864 if (result == -ERESTARTSYS) 1169 if (result == -ERESTARTSYS)
865 return result; 1170 return result;
866 if (result < 0) 1171 if (result < 0)
867 dev_err(dev, "fw %s: signed boot download " 1172 dev_err(dev, "firmware %s: non-signed download "
868 "initialization failed: %d\n", 1173 "initialization failed: %d\n",
869 i2400m->fw_name, result); 1174 i2400m->fw_name, result);
870 } 1175 }
@@ -873,74 +1178,201 @@ int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf)
873 1178
874 1179
875/* 1180/*
876 * Run quick consistency tests on the firmware file 1181 * Run consistency tests on the firmware file and load up headers
877 * 1182 *
878 * Check for the firmware being made for the i2400m device, 1183 * Check for the firmware being made for the i2400m device,
879 * etc...These checks are mostly informative, as the device will make 1184 * etc...These checks are mostly informative, as the device will make
880 * them too; but the driver's response is more informative on what 1185 * them too; but the driver's response is more informative on what
881 * went wrong. 1186 * went wrong.
1187 *
1188 * This will also look at all the headers present on the firmware
1189 * file, and update i2400m->fw_bcf_hdr to point to them.
882 */ 1190 */
883static 1191static
884int i2400m_fw_check(struct i2400m *i2400m, 1192int i2400m_fw_hdr_check(struct i2400m *i2400m,
885 const struct i2400m_bcf_hdr *bcf, 1193 const struct i2400m_bcf_hdr *bcf_hdr,
886 size_t bcf_size) 1194 size_t index, size_t offset)
887{ 1195{
888 int result;
889 struct device *dev = i2400m_dev(i2400m); 1196 struct device *dev = i2400m_dev(i2400m);
1197
890 unsigned module_type, header_len, major_version, minor_version, 1198 unsigned module_type, header_len, major_version, minor_version,
891 module_id, module_vendor, date, size; 1199 module_id, module_vendor, date, size;
892 1200
893 /* Check hard errors */ 1201 module_type = bcf_hdr->module_type;
894 result = -EINVAL; 1202 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
895 if (bcf_size < sizeof(*bcf)) { /* big enough header? */ 1203 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
896 dev_err(dev, "firmware %s too short: " 1204 >> 16;
897 "%zu B vs %zu (at least) expected\n", 1205 minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff;
898 i2400m->fw_name, bcf_size, sizeof(*bcf)); 1206 module_id = le32_to_cpu(bcf_hdr->module_id);
899 goto error; 1207 module_vendor = le32_to_cpu(bcf_hdr->module_vendor);
900 } 1208 date = le32_to_cpu(bcf_hdr->date);
1209 size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
901 1210
902 module_type = bcf->module_type; 1211 d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header "
903 header_len = sizeof(u32) * le32_to_cpu(bcf->header_len); 1212 "type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n",
904 major_version = le32_to_cpu(bcf->header_version) & 0xffff0000 >> 16; 1213 i2400m->fw_name, index, offset,
905 minor_version = le32_to_cpu(bcf->header_version) & 0x0000ffff; 1214 module_type, module_vendor, module_id,
906 module_id = le32_to_cpu(bcf->module_id); 1215 major_version, minor_version, header_len, size, date);
907 module_vendor = le32_to_cpu(bcf->module_vendor);
908 date = le32_to_cpu(bcf->date);
909 size = sizeof(u32) * le32_to_cpu(bcf->size);
910
911 if (bcf_size != size) { /* annoyingly paranoid */
912 dev_err(dev, "firmware %s: bad size, got "
913 "%zu B vs %u expected\n",
914 i2400m->fw_name, bcf_size, size);
915 goto error;
916 }
917 1216
918 d_printf(2, dev, "type 0x%x id 0x%x vendor 0x%x; header v%u.%u (%zu B) " 1217 /* Hard errors */
919 "date %08x (%zu B)\n", 1218 if (major_version != 1) {
920 module_type, module_id, module_vendor, 1219 dev_err(dev, "firmware %s #%zd@%08zx: major header version "
921 major_version, minor_version, (size_t) header_len, 1220 "v%u.%u not supported\n",
922 date, (size_t) size); 1221 i2400m->fw_name, index, offset,
1222 major_version, minor_version);
1223 return -EBADF;
1224 }
923 1225
924 if (module_type != 6) { /* built for the right hardware? */ 1226 if (module_type != 6) { /* built for the right hardware? */
925 dev_err(dev, "bad fw %s: unexpected module type 0x%x; " 1227 dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
926 "aborting\n", i2400m->fw_name, module_type); 1228 "type 0x%x; aborting\n",
927 goto error; 1229 i2400m->fw_name, index, offset,
1230 module_type);
1231 return -EBADF;
1232 }
1233
1234 if (module_vendor != 0x8086) {
1235 dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
1236 "vendor 0x%x; aborting\n",
1237 i2400m->fw_name, index, offset, module_vendor);
1238 return -EBADF;
928 } 1239 }
929 1240
930 /* Check soft-er errors */
931 result = 0;
932 if (module_vendor != 0x8086)
933 dev_err(dev, "bad fw %s? unexpected vendor 0x%04x\n",
934 i2400m->fw_name, module_vendor);
935 if (date < 0x20080300) 1241 if (date < 0x20080300)
936 dev_err(dev, "bad fw %s? build date too old %08x\n", 1242 dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x "
937 i2400m->fw_name, date); 1243 "too old; unsupported\n",
938error: 1244 i2400m->fw_name, index, offset, date);
1245 return 0;
1246}
1247
1248
1249/*
1250 * Run consistency tests on the firmware file and load up headers
1251 *
1252 * Check for the firmware being made for the i2400m device,
1253 * etc...These checks are mostly informative, as the device will make
1254 * them too; but the driver's response is more informative on what
1255 * went wrong.
1256 *
1257 * This will also look at all the headers present on the firmware
1258 * file, and update i2400m->fw_hdrs to point to them.
1259 */
1260static
1261int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
1262{
1263 int result;
1264 struct device *dev = i2400m_dev(i2400m);
1265 size_t headers = 0;
1266 const struct i2400m_bcf_hdr *bcf_hdr;
1267 const void *itr, *next, *top;
1268 size_t slots = 0, used_slots = 0;
1269
1270 for (itr = bcf, top = itr + bcf_size;
1271 itr < top;
1272 headers++, itr = next) {
1273 size_t leftover, offset, header_len, size;
1274
1275 leftover = top - itr;
1276 offset = itr - (const void *) bcf;
1277 if (leftover <= sizeof(*bcf_hdr)) {
1278 dev_err(dev, "firmware %s: %zu B left at @%zx, "
1279 "not enough for BCF header\n",
1280 i2400m->fw_name, leftover, offset);
1281 break;
1282 }
1283 bcf_hdr = itr;
1284 /* Only the first header is supposed to be followed by
1285 * payload */
1286 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
1287 size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
1288 if (headers == 0)
1289 next = itr + size;
1290 else
1291 next = itr + header_len;
1292
1293 result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset);
1294 if (result < 0)
1295 continue;
1296 if (used_slots + 1 >= slots) {
1297 /* +1 -> we need to account for the one we'll
1298 * occupy and at least an extra one for
1299 * always being NULL */
1300 result = i2400m_zrealloc_2x(
1301 (void **) &i2400m->fw_hdrs, &slots,
1302 sizeof(i2400m->fw_hdrs[0]),
1303 GFP_KERNEL);
1304 if (result < 0)
1305 goto error_zrealloc;
1306 }
1307 i2400m->fw_hdrs[used_slots] = bcf_hdr;
1308 used_slots++;
1309 }
1310 if (headers == 0) {
1311 dev_err(dev, "firmware %s: no usable headers found\n",
1312 i2400m->fw_name);
1313 result = -EBADF;
1314 } else
1315 result = 0;
1316error_zrealloc:
939 return result; 1317 return result;
940} 1318}
941 1319
942 1320
943/* 1321/*
1322 * Match a barker to a BCF header module ID
1323 *
1324 * The device sends a barker which tells the firmware loader which
1325 * header in the BCF file has to be used. This does the matching.
1326 */
1327static
1328unsigned i2400m_bcf_hdr_match(struct i2400m *i2400m,
1329 const struct i2400m_bcf_hdr *bcf_hdr)
1330{
1331 u32 barker = le32_to_cpu(i2400m->barker->data[0])
1332 & 0x7fffffff;
1333 u32 module_id = le32_to_cpu(bcf_hdr->module_id)
1334 & 0x7fffffff; /* high bit used for something else */
1335
1336 /* special case for 5x50 */
1337 if (barker == I2400M_SBOOT_BARKER && module_id == 0)
1338 return 1;
1339 if (module_id == barker)
1340 return 1;
1341 return 0;
1342}
1343
1344static
1345const struct i2400m_bcf_hdr *i2400m_bcf_hdr_find(struct i2400m *i2400m)
1346{
1347 struct device *dev = i2400m_dev(i2400m);
1348 const struct i2400m_bcf_hdr **bcf_itr, *bcf_hdr;
1349 unsigned i = 0;
1350 u32 barker = le32_to_cpu(i2400m->barker->data[0]);
1351
1352 d_printf(2, dev, "finding BCF header for barker %08x\n", barker);
1353 if (barker == I2400M_NBOOT_BARKER) {
1354 bcf_hdr = i2400m->fw_hdrs[0];
1355 d_printf(1, dev, "using BCF header #%u/%08x for non-signed "
1356 "barker\n", 0, le32_to_cpu(bcf_hdr->module_id));
1357 return bcf_hdr;
1358 }
1359 for (bcf_itr = i2400m->fw_hdrs; *bcf_itr != NULL; bcf_itr++, i++) {
1360 bcf_hdr = *bcf_itr;
1361 if (i2400m_bcf_hdr_match(i2400m, bcf_hdr)) {
1362 d_printf(1, dev, "hit on BCF hdr #%u/%08x\n",
1363 i, le32_to_cpu(bcf_hdr->module_id));
1364 return bcf_hdr;
1365 } else
1366 d_printf(1, dev, "miss on BCF hdr #%u/%08x\n",
1367 i, le32_to_cpu(bcf_hdr->module_id));
1368 }
1369 dev_err(dev, "cannot find a matching BCF header for barker %08x\n",
1370 barker);
1371 return NULL;
1372}
1373
1374
1375/*
944 * Download the firmware to the device 1376 * Download the firmware to the device
945 * 1377 *
946 * @i2400m: device descriptor 1378 * @i2400m: device descriptor
@@ -956,14 +1388,16 @@ error:
956 */ 1388 */
957static 1389static
958int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf, 1390int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
959 size_t bcf_size, enum i2400m_bri flags) 1391 size_t fw_size, enum i2400m_bri flags)
960{ 1392{
961 int ret = 0; 1393 int ret = 0;
962 struct device *dev = i2400m_dev(i2400m); 1394 struct device *dev = i2400m_dev(i2400m);
963 int count = i2400m->bus_bm_retries; 1395 int count = i2400m->bus_bm_retries;
1396 const struct i2400m_bcf_hdr *bcf_hdr;
1397 size_t bcf_size;
964 1398
965 d_fnstart(5, dev, "(i2400m %p bcf %p size %zu)\n", 1399 d_fnstart(5, dev, "(i2400m %p bcf %p fw size %zu)\n",
966 i2400m, bcf, bcf_size); 1400 i2400m, bcf, fw_size);
967 i2400m->boot_mode = 1; 1401 i2400m->boot_mode = 1;
968 wmb(); /* Make sure other readers see it */ 1402 wmb(); /* Make sure other readers see it */
969hw_reboot: 1403hw_reboot:
@@ -985,13 +1419,28 @@ hw_reboot:
985 * Initialize the download, push the bytes to the device and 1419 * Initialize the download, push the bytes to the device and
986 * then jump to the new firmware. Note @ret is passed with the 1420 * then jump to the new firmware. Note @ret is passed with the
987 * offset of the jump instruction to _dnload_finalize() 1421 * offset of the jump instruction to _dnload_finalize()
1422 *
1423 * Note we need to use the BCF header in the firmware image
1424 * that matches the barker that the device sent when it
1425 * rebooted, so it has to be passed along.
988 */ 1426 */
989 ret = i2400m_dnload_init(i2400m, bcf); /* Init device's dnload */ 1427 ret = -EBADF;
1428 bcf_hdr = i2400m_bcf_hdr_find(i2400m);
1429 if (bcf_hdr == NULL)
1430 goto error_bcf_hdr_find;
1431
1432 ret = i2400m_dnload_init(i2400m, bcf_hdr);
990 if (ret == -ERESTARTSYS) 1433 if (ret == -ERESTARTSYS)
991 goto error_dev_rebooted; 1434 goto error_dev_rebooted;
992 if (ret < 0) 1435 if (ret < 0)
993 goto error_dnload_init; 1436 goto error_dnload_init;
994 1437
1438 /*
1439 * bcf_size refers to one header size plus the fw sections size
1440 * indicated by the header,ie. if there are other extended headers
1441 * at the tail, they are not counted
1442 */
1443 bcf_size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
995 ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size); 1444 ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
996 if (ret == -ERESTARTSYS) 1445 if (ret == -ERESTARTSYS)
997 goto error_dev_rebooted; 1446 goto error_dev_rebooted;
@@ -1001,7 +1450,7 @@ hw_reboot:
1001 goto error_dnload_bcf; 1450 goto error_dnload_bcf;
1002 } 1451 }
1003 1452
1004 ret = i2400m_dnload_finalize(i2400m, bcf, ret); 1453 ret = i2400m_dnload_finalize(i2400m, bcf_hdr, bcf, ret);
1005 if (ret == -ERESTARTSYS) 1454 if (ret == -ERESTARTSYS)
1006 goto error_dev_rebooted; 1455 goto error_dev_rebooted;
1007 if (ret < 0) { 1456 if (ret < 0) {
@@ -1018,10 +1467,11 @@ hw_reboot:
1018error_dnload_finalize: 1467error_dnload_finalize:
1019error_dnload_bcf: 1468error_dnload_bcf:
1020error_dnload_init: 1469error_dnload_init:
1470error_bcf_hdr_find:
1021error_bootrom_init: 1471error_bootrom_init:
1022error_too_many_reboots: 1472error_too_many_reboots:
1023 d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n", 1473 d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
1024 i2400m, bcf, bcf_size, ret); 1474 i2400m, bcf, fw_size, ret);
1025 return ret; 1475 return ret;
1026 1476
1027error_dev_rebooted: 1477error_dev_rebooted:
@@ -1031,6 +1481,61 @@ error_dev_rebooted:
1031 goto hw_reboot; 1481 goto hw_reboot;
1032} 1482}
1033 1483
1484static
1485int i2400m_fw_bootstrap(struct i2400m *i2400m, const struct firmware *fw,
1486 enum i2400m_bri flags)
1487{
1488 int ret;
1489 struct device *dev = i2400m_dev(i2400m);
1490 const struct i2400m_bcf_hdr *bcf; /* Firmware data */
1491
1492 d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
1493 bcf = (void *) fw->data;
1494 ret = i2400m_fw_check(i2400m, bcf, fw->size);
1495 if (ret >= 0)
1496 ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
1497 if (ret < 0)
1498 dev_err(dev, "%s: cannot use: %d, skipping\n",
1499 i2400m->fw_name, ret);
1500 kfree(i2400m->fw_hdrs);
1501 i2400m->fw_hdrs = NULL;
1502 d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
1503 return ret;
1504}
1505
1506
1507/* Refcounted container for firmware data */
1508struct i2400m_fw {
1509 struct kref kref;
1510 const struct firmware *fw;
1511};
1512
1513
1514static
1515void i2400m_fw_destroy(struct kref *kref)
1516{
1517 struct i2400m_fw *i2400m_fw =
1518 container_of(kref, struct i2400m_fw, kref);
1519 release_firmware(i2400m_fw->fw);
1520 kfree(i2400m_fw);
1521}
1522
1523
1524static
1525struct i2400m_fw *i2400m_fw_get(struct i2400m_fw *i2400m_fw)
1526{
1527 if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
1528 kref_get(&i2400m_fw->kref);
1529 return i2400m_fw;
1530}
1531
1532
1533static
1534void i2400m_fw_put(struct i2400m_fw *i2400m_fw)
1535{
1536 kref_put(&i2400m_fw->kref, i2400m_fw_destroy);
1537}
1538
1034 1539
1035/** 1540/**
1036 * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware 1541 * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
@@ -1049,42 +1554,109 @@ error_dev_rebooted:
1049 */ 1554 */
1050int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) 1555int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
1051{ 1556{
1052 int ret = 0, itr = 0; 1557 int ret, itr;
1053 struct device *dev = i2400m_dev(i2400m); 1558 struct device *dev = i2400m_dev(i2400m);
1054 const struct firmware *fw; 1559 struct i2400m_fw *i2400m_fw;
1055 const struct i2400m_bcf_hdr *bcf; /* Firmware data */ 1560 const struct i2400m_bcf_hdr *bcf; /* Firmware data */
1561 const struct firmware *fw;
1056 const char *fw_name; 1562 const char *fw_name;
1057 1563
1058 d_fnstart(5, dev, "(i2400m %p)\n", i2400m); 1564 d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
1059 1565
1566 ret = -ENODEV;
1567 spin_lock(&i2400m->rx_lock);
1568 i2400m_fw = i2400m_fw_get(i2400m->fw_cached);
1569 spin_unlock(&i2400m->rx_lock);
1570 if (i2400m_fw == (void *) ~0) {
1571 dev_err(dev, "can't load firmware now!");
1572 goto out;
1573 } else if (i2400m_fw != NULL) {
1574 dev_info(dev, "firmware %s: loading from cache\n",
1575 i2400m->fw_name);
1576 ret = i2400m_fw_bootstrap(i2400m, i2400m_fw->fw, flags);
1577 i2400m_fw_put(i2400m_fw);
1578 goto out;
1579 }
1580
1060 /* Load firmware files to memory. */ 1581 /* Load firmware files to memory. */
1061 itr = 0; 1582 for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
1062 while(1) {
1063 fw_name = i2400m->bus_fw_names[itr]; 1583 fw_name = i2400m->bus_fw_names[itr];
1064 if (fw_name == NULL) { 1584 if (fw_name == NULL) {
1065 dev_err(dev, "Could not find a usable firmware image\n"); 1585 dev_err(dev, "Could not find a usable firmware image\n");
1066 ret = -ENOENT; 1586 break;
1067 goto error_no_fw;
1068 } 1587 }
1588 d_printf(1, dev, "trying firmware %s (%d)\n", fw_name, itr);
1069 ret = request_firmware(&fw, fw_name, dev); 1589 ret = request_firmware(&fw, fw_name, dev);
1070 if (ret == 0) 1590 if (ret < 0) {
1071 break; /* got it */
1072 if (ret < 0)
1073 dev_err(dev, "fw %s: cannot load file: %d\n", 1591 dev_err(dev, "fw %s: cannot load file: %d\n",
1074 fw_name, ret); 1592 fw_name, ret);
1075 itr++; 1593 continue;
1594 }
1595 i2400m->fw_name = fw_name;
1596 ret = i2400m_fw_bootstrap(i2400m, fw, flags);
1597 release_firmware(fw);
1598 if (ret >= 0) /* firmware loaded succesfully */
1599 break;
1600 i2400m->fw_name = NULL;
1076 } 1601 }
1077 1602out:
1078 bcf = (void *) fw->data;
1079 i2400m->fw_name = fw_name;
1080 ret = i2400m_fw_check(i2400m, bcf, fw->size);
1081 if (ret < 0)
1082 goto error_fw_bad;
1083 ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
1084error_fw_bad:
1085 release_firmware(fw);
1086error_no_fw:
1087 d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret); 1603 d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
1088 return ret; 1604 return ret;
1089} 1605}
1090EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap); 1606EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
1607
1608
1609void i2400m_fw_cache(struct i2400m *i2400m)
1610{
1611 int result;
1612 struct i2400m_fw *i2400m_fw;
1613 struct device *dev = i2400m_dev(i2400m);
1614
1615 /* if there is anything there, free it -- now, this'd be weird */
1616 spin_lock(&i2400m->rx_lock);
1617 i2400m_fw = i2400m->fw_cached;
1618 spin_unlock(&i2400m->rx_lock);
1619 if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) {
1620 i2400m_fw_put(i2400m_fw);
1621 WARN(1, "%s:%u: still cached fw still present?\n",
1622 __func__, __LINE__);
1623 }
1624
1625 if (i2400m->fw_name == NULL) {
1626 dev_err(dev, "firmware n/a: can't cache\n");
1627 i2400m_fw = (void *) ~0;
1628 goto out;
1629 }
1630
1631 i2400m_fw = kzalloc(sizeof(*i2400m_fw), GFP_ATOMIC);
1632 if (i2400m_fw == NULL)
1633 goto out;
1634 kref_init(&i2400m_fw->kref);
1635 result = request_firmware(&i2400m_fw->fw, i2400m->fw_name, dev);
1636 if (result < 0) {
1637 dev_err(dev, "firmware %s: failed to cache: %d\n",
1638 i2400m->fw_name, result);
1639 kfree(i2400m_fw);
1640 i2400m_fw = (void *) ~0;
1641 } else
1642 dev_info(dev, "firmware %s: cached\n", i2400m->fw_name);
1643out:
1644 spin_lock(&i2400m->rx_lock);
1645 i2400m->fw_cached = i2400m_fw;
1646 spin_unlock(&i2400m->rx_lock);
1647}
1648
1649
1650void i2400m_fw_uncache(struct i2400m *i2400m)
1651{
1652 struct i2400m_fw *i2400m_fw;
1653
1654 spin_lock(&i2400m->rx_lock);
1655 i2400m_fw = i2400m->fw_cached;
1656 i2400m->fw_cached = NULL;
1657 spin_unlock(&i2400m->rx_lock);
1658
1659 if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
1660 i2400m_fw_put(i2400m_fw);
1661}
1662
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index 9c4e3189f7b5..b9c4bed3b457 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -67,6 +67,7 @@
67 67
68/* Host-Device interface for SDIO */ 68/* Host-Device interface for SDIO */
69enum { 69enum {
70 I2400M_SDIO_BOOT_RETRIES = 3,
70 I2400MS_BLK_SIZE = 256, 71 I2400MS_BLK_SIZE = 256,
71 I2400MS_PL_SIZE_MAX = 0x3E00, 72 I2400MS_PL_SIZE_MAX = 0x3E00,
72 73
@@ -77,9 +78,11 @@ enum {
77 I2400MS_INTR_GET_SIZE_ADDR = 0x2C, 78 I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
78 /* The number of ticks to wait for the device to signal that 79 /* The number of ticks to wait for the device to signal that
79 * it is ready */ 80 * it is ready */
80 I2400MS_INIT_SLEEP_INTERVAL = 10, 81 I2400MS_INIT_SLEEP_INTERVAL = 100,
81 /* How long to wait for the device to settle after reset */ 82 /* How long to wait for the device to settle after reset */
82 I2400MS_SETTLE_TIME = 40, 83 I2400MS_SETTLE_TIME = 40,
84 /* The number of msec to wait for IOR after sending IOE */
85 IWMC3200_IOR_TIMEOUT = 10,
83}; 86};
84 87
85 88
@@ -97,6 +100,14 @@ enum {
97 * @tx_workqueue: workqeueue used for data TX; we don't use the 100 * @tx_workqueue: workqeueue used for data TX; we don't use the
98 * system's workqueue as that might cause deadlocks with code in 101 * system's workqueue as that might cause deadlocks with code in
99 * the bus-generic driver. 102 * the bus-generic driver.
103 *
104 * @debugfs_dentry: dentry for the SDIO specific debugfs files
105 *
106 * Note this value is set to NULL upon destruction; this is
107 * because some routinges use it to determine if we are inside the
108 * probe() path or some other path. When debugfs is disabled,
109 * creation sets the dentry to '(void*) -ENODEV', which is valid
110 * for the test.
100 */ 111 */
101struct i2400ms { 112struct i2400ms {
102 struct i2400m i2400m; /* FIRST! See doc */ 113 struct i2400m i2400m; /* FIRST! See doc */
@@ -111,6 +122,9 @@ struct i2400ms {
111 wait_queue_head_t bm_wfa_wq; 122 wait_queue_head_t bm_wfa_wq;
112 int bm_wait_result; 123 int bm_wait_result;
113 size_t bm_ack_size; 124 size_t bm_ack_size;
125
126 /* Device is any of the iwmc3200 SKUs */
127 unsigned iwmc3200:1;
114}; 128};
115 129
116 130
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6f76558b170f..5cc0f279417e 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -88,6 +88,13 @@ struct edc {
88 u16 errorcount; 88 u16 errorcount;
89}; 89};
90 90
91struct i2400m_endpoint_cfg {
92 unsigned char bulk_out;
93 unsigned char notification;
94 unsigned char reset_cold;
95 unsigned char bulk_in;
96};
97
91static inline void edc_init(struct edc *edc) 98static inline void edc_init(struct edc *edc)
92{ 99{
93 edc->timestart = jiffies; 100 edc->timestart = jiffies;
@@ -137,15 +144,13 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
137 144
138/* Host-Device interface for USB */ 145/* Host-Device interface for USB */
139enum { 146enum {
147 I2400M_USB_BOOT_RETRIES = 3,
140 I2400MU_MAX_NOTIFICATION_LEN = 256, 148 I2400MU_MAX_NOTIFICATION_LEN = 256,
141 I2400MU_BLK_SIZE = 16, 149 I2400MU_BLK_SIZE = 16,
142 I2400MU_PL_SIZE_MAX = 0x3EFF, 150 I2400MU_PL_SIZE_MAX = 0x3EFF,
143 151
144 /* Endpoints */ 152 /* Device IDs */
145 I2400MU_EP_BULK_OUT = 0, 153 USB_DEVICE_ID_I6050 = 0x0186,
146 I2400MU_EP_NOTIFICATION,
147 I2400MU_EP_RESET_COLD,
148 I2400MU_EP_BULK_IN,
149}; 154};
150 155
151 156
@@ -215,6 +220,7 @@ struct i2400mu {
215 struct usb_device *usb_dev; 220 struct usb_device *usb_dev;
216 struct usb_interface *usb_iface; 221 struct usb_interface *usb_iface;
217 struct edc urb_edc; /* Error density counter */ 222 struct edc urb_edc; /* Error density counter */
223 struct i2400m_endpoint_cfg endpoint_cfg;
218 224
219 struct urb *notif_urb; 225 struct urb *notif_urb;
220 struct task_struct *tx_kthread; 226 struct task_struct *tx_kthread;
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 60330f313f27..04df9bbe340f 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -117,16 +117,30 @@
117 * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The 117 * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The
118 * i2400m driver will only register with the WiMAX and network stacks; 118 * i2400m driver will only register with the WiMAX and network stacks;
119 * the only access done to the device is to read the MAC address so we 119 * the only access done to the device is to read the MAC address so we
120 * can register a network device. This calls i2400m_dev_start() to 120 * can register a network device.
121 * load firmware, setup communication with the device and configure it
122 * for operation.
123 * 121 *
124 * At this point, control and data communications are possible. 122 * The high-level call flow is:
123 *
124 * bus_probe()
125 * i2400m_setup()
126 * i2400m->bus_setup()
127 * boot rom initialization / read mac addr
128 * network / WiMAX stacks registration
129 * i2400m_dev_start()
130 * i2400m->bus_dev_start()
131 * i2400m_dev_initialize()
125 * 132 *
126 * On disconnect/driver unload, the bus-specific disconnect function 133 * The reverse applies for a disconnect() call:
127 * calls i2400m_release() to undo i2400m_setup(). i2400m_dev_stop() 134 *
128 * shuts the firmware down and releases resources uses to communicate 135 * bus_disconnect()
129 * with the device. 136 * i2400m_release()
137 * i2400m_dev_stop()
138 * i2400m_dev_shutdown()
139 * i2400m->bus_dev_stop()
140 * network / WiMAX stack unregistration
141 * i2400m->bus_release()
142 *
143 * At this point, control and data communications are possible.
130 * 144 *
131 * While the device is up, it might reset. The bus-specific driver has 145 * While the device is up, it might reset. The bus-specific driver has
132 * to catch that situation and call i2400m_dev_reset_handle() to deal 146 * to catch that situation and call i2400m_dev_reset_handle() to deal
@@ -148,9 +162,6 @@
148 162
149/* Misc constants */ 163/* Misc constants */
150enum { 164enum {
151 /* Firmware uploading */
152 I2400M_BOOT_RETRIES = 3,
153 I3200_BOOT_RETRIES = 3,
154 /* Size of the Boot Mode Command buffer */ 165 /* Size of the Boot Mode Command buffer */
155 I2400M_BM_CMD_BUF_SIZE = 16 * 1024, 166 I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
156 I2400M_BM_ACK_BUF_SIZE = 256, 167 I2400M_BM_ACK_BUF_SIZE = 256,
@@ -197,6 +208,7 @@ enum i2400m_reset_type {
197 208
198struct i2400m_reset_ctx; 209struct i2400m_reset_ctx;
199struct i2400m_roq; 210struct i2400m_roq;
211struct i2400m_barker_db;
200 212
201/** 213/**
202 * struct i2400m - descriptor for an Intel 2400m 214 * struct i2400m - descriptor for an Intel 2400m
@@ -204,27 +216,50 @@ struct i2400m_roq;
204 * Members marked with [fill] must be filled out/initialized before 216 * Members marked with [fill] must be filled out/initialized before
205 * calling i2400m_setup(). 217 * calling i2400m_setup().
206 * 218 *
219 * Note the @bus_setup/@bus_release, @bus_dev_start/@bus_dev_release
220 * call pairs are very much doing almost the same, and depending on
221 * the underlying bus, some stuff has to be put in one or the
222 * other. The idea of setup/release is that they setup the minimal
223 * amount needed for loading firmware, where us dev_start/stop setup
224 * the rest needed to do full data/control traffic.
225 *
207 * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16, 226 * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
208 * so we have a tx_blk_size variable that the bus layer sets to 227 * so we have a tx_blk_size variable that the bus layer sets to
209 * tell the engine how much of that we need. 228 * tell the engine how much of that we need.
210 * 229 *
211 * @bus_pl_size_max: [fill] Maximum payload size. 230 * @bus_pl_size_max: [fill] Maximum payload size.
212 * 231 *
213 * @bus_dev_start: [fill] Function called by the bus-generic code 232 * @bus_setup: [optional fill] Function called by the bus-generic code
214 * [i2400m_dev_start()] to setup the bus-specific communications 233 * [i2400m_setup()] to setup the basic bus-specific communications
215 * to the the device. See LIFE CYCLE above. 234 * to the the device needed to load firmware. See LIFE CYCLE above.
216 * 235 *
217 * NOTE: Doesn't need to upload the firmware, as that is taken 236 * NOTE: Doesn't need to upload the firmware, as that is taken
218 * care of by the bus-generic code. 237 * care of by the bus-generic code.
219 * 238 *
220 * @bus_dev_stop: [fill] Function called by the bus-generic code 239 * @bus_release: [optional fill] Function called by the bus-generic
221 * [i2400m_dev_stop()] to shutdown the bus-specific communications 240 * code [i2400m_release()] to shutdown the basic bus-specific
222 * to the the device. See LIFE CYCLE above. 241 * communications to the the device needed to load firmware. See
242 * LIFE CYCLE above.
223 * 243 *
224 * This function does not need to reset the device, just tear down 244 * This function does not need to reset the device, just tear down
225 * all the host resources created to handle communication with 245 * all the host resources created to handle communication with
226 * the device. 246 * the device.
227 * 247 *
248 * @bus_dev_start: [optional fill] Function called by the bus-generic
249 * code [i2400m_dev_start()] to do things needed to start the
250 * device. See LIFE CYCLE above.
251 *
252 * NOTE: Doesn't need to upload the firmware, as that is taken
253 * care of by the bus-generic code.
254 *
255 * @bus_dev_stop: [optional fill] Function called by the bus-generic
256 * code [i2400m_dev_stop()] to do things needed for stopping the
257 * device. See LIFE CYCLE above.
258 *
259 * This function does not need to reset the device, just tear down
260 * all the host resources created to handle communication with
261 * the device.
262 *
228 * @bus_tx_kick: [fill] Function called by the bus-generic code to let 263 * @bus_tx_kick: [fill] Function called by the bus-generic code to let
229 * the bus-specific code know that there is data available in the 264 * the bus-specific code know that there is data available in the
230 * TX FIFO for transmission to the device. 265 * TX FIFO for transmission to the device.
@@ -246,6 +281,9 @@ struct i2400m_roq;
246 * process, so it cannot rely on common infrastructure being laid 281 * process, so it cannot rely on common infrastructure being laid
247 * out. 282 * out.
248 * 283 *
284 * IMPORTANT: don't call reset on RT_BUS with i2400m->init_mutex
285 * held, as the .pre/.post reset handlers will deadlock.
286 *
249 * @bus_bm_retries: [fill] How many times shall a firmware upload / 287 * @bus_bm_retries: [fill] How many times shall a firmware upload /
250 * device initialization be retried? Different models of the same 288 * device initialization be retried? Different models of the same
251 * device might need different values, hence it is set by the 289 * device might need different values, hence it is set by the
@@ -297,6 +335,27 @@ struct i2400m_roq;
297 * force this to be the first field so that we can get from 335 * force this to be the first field so that we can get from
298 * netdev_priv() the right pointer. 336 * netdev_priv() the right pointer.
299 * 337 *
338 * @updown: the device is up and ready for transmitting control and
339 * data packets. This implies @ready (communication infrastructure
340 * with the device is ready) and the device's firmware has been
341 * loaded and the device initialized.
342 *
343 * Write to it only inside a i2400m->init_mutex protected area
344 * followed with a wmb(); rmb() before accesing (unless locked
345 * inside i2400m->init_mutex). Read access can be loose like that
346 * [just using rmb()] because the paths that use this also do
347 * other error checks later on.
348 *
349 * @ready: Communication infrastructure with the device is ready, data
350 * frames can start to be passed around (this is lighter than
351 * using the WiMAX state for certain hot paths).
352 *
353 * Write to it only inside a i2400m->init_mutex protected area
354 * followed with a wmb(); rmb() before accesing (unless locked
355 * inside i2400m->init_mutex). Read access can be loose like that
356 * [just using rmb()] because the paths that use this also do
357 * other error checks later on.
358 *
300 * @rx_reorder: 1 if RX reordering is enabled; this can only be 359 * @rx_reorder: 1 if RX reordering is enabled; this can only be
301 * set at probe time. 360 * set at probe time.
302 * 361 *
@@ -362,6 +421,13 @@ struct i2400m_roq;
362 * delivered. Then the driver can release them to the host. See 421 * delivered. Then the driver can release them to the host. See
363 * drivers/net/i2400m/rx.c for details. 422 * drivers/net/i2400m/rx.c for details.
364 * 423 *
424 * @rx_reports: reports received from the device that couldn't be
425 * processed because the driver wasn't still ready; when ready,
426 * they are pulled from here and chewed.
427 *
428 * @rx_reports_ws: Work struct used to kick a scan of the RX reports
429 * list and to process each.
430 *
365 * @src_mac_addr: MAC address used to make ethernet packets be coming 431 * @src_mac_addr: MAC address used to make ethernet packets be coming
366 * from. This is generated at i2400m_setup() time and used during 432 * from. This is generated at i2400m_setup() time and used during
367 * the life cycle of the instance. See i2400m_fake_eth_header(). 433 * the life cycle of the instance. See i2400m_fake_eth_header().
@@ -422,6 +488,25 @@ struct i2400m_roq;
422 * 488 *
423 * @fw_version: version of the firmware interface, Major.minor, 489 * @fw_version: version of the firmware interface, Major.minor,
424 * encoded in the high word and low word (major << 16 | minor). 490 * encoded in the high word and low word (major << 16 | minor).
491 *
492 * @fw_hdrs: NULL terminated array of pointers to the firmware
493 * headers. This is only available during firmware load time.
494 *
495 * @fw_cached: Used to cache firmware when the system goes to
496 * suspend/standby/hibernation (as on resume we can't read it). If
497 * NULL, no firmware was cached, read it. If ~0, you can't read
498 * any firmware files (the system still didn't come out of suspend
499 * and failed to cache one), so abort; otherwise, a valid cached
500 * firmware to be used. Access to this variable is protected by
501 * the spinlock i2400m->rx_lock.
502 *
503 * @barker: barker type that the device uses; this is initialized by
504 * i2400m_is_boot_barker() the first time it is called. Then it
505 * won't change during the life cycle of the device and everytime
506 * a boot barker is received, it is just verified for it being the
507 * same.
508 *
509 * @pm_notifier: used to register for PM events
425 */ 510 */
426struct i2400m { 511struct i2400m {
427 struct wimax_dev wimax_dev; /* FIRST! See doc */ 512 struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -429,7 +514,7 @@ struct i2400m {
429 unsigned updown:1; /* Network device is up or down */ 514 unsigned updown:1; /* Network device is up or down */
430 unsigned boot_mode:1; /* is the device in boot mode? */ 515 unsigned boot_mode:1; /* is the device in boot mode? */
431 unsigned sboot:1; /* signed or unsigned fw boot */ 516 unsigned sboot:1; /* signed or unsigned fw boot */
432 unsigned ready:1; /* all probing steps done */ 517 unsigned ready:1; /* Device comm infrastructure ready */
433 unsigned rx_reorder:1; /* RX reorder is enabled */ 518 unsigned rx_reorder:1; /* RX reorder is enabled */
434 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */ 519 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
435 /* typed u8 so /sys/kernel/debug/u8 can tweak */ 520 /* typed u8 so /sys/kernel/debug/u8 can tweak */
@@ -440,8 +525,10 @@ struct i2400m {
440 size_t bus_pl_size_max; 525 size_t bus_pl_size_max;
441 unsigned bus_bm_retries; 526 unsigned bus_bm_retries;
442 527
528 int (*bus_setup)(struct i2400m *);
443 int (*bus_dev_start)(struct i2400m *); 529 int (*bus_dev_start)(struct i2400m *);
444 void (*bus_dev_stop)(struct i2400m *); 530 void (*bus_dev_stop)(struct i2400m *);
531 void (*bus_release)(struct i2400m *);
445 void (*bus_tx_kick)(struct i2400m *); 532 void (*bus_tx_kick)(struct i2400m *);
446 int (*bus_reset)(struct i2400m *, enum i2400m_reset_type); 533 int (*bus_reset)(struct i2400m *, enum i2400m_reset_type);
447 ssize_t (*bus_bm_cmd_send)(struct i2400m *, 534 ssize_t (*bus_bm_cmd_send)(struct i2400m *,
@@ -468,6 +555,8 @@ struct i2400m {
468 rx_num, rx_size_acc, rx_size_min, rx_size_max; 555 rx_num, rx_size_acc, rx_size_min, rx_size_max;
469 struct i2400m_roq *rx_roq; /* not under rx_lock! */ 556 struct i2400m_roq *rx_roq; /* not under rx_lock! */
470 u8 src_mac_addr[ETH_HLEN]; 557 u8 src_mac_addr[ETH_HLEN];
558 struct list_head rx_reports; /* under rx_lock! */
559 struct work_struct rx_report_ws;
471 560
472 struct mutex msg_mutex; /* serialize command execution */ 561 struct mutex msg_mutex; /* serialize command execution */
473 struct completion msg_completion; 562 struct completion msg_completion;
@@ -487,37 +576,12 @@ struct i2400m {
487 struct dentry *debugfs_dentry; 576 struct dentry *debugfs_dentry;
488 const char *fw_name; /* name of the current firmware image */ 577 const char *fw_name; /* name of the current firmware image */
489 unsigned long fw_version; /* version of the firmware interface */ 578 unsigned long fw_version; /* version of the firmware interface */
490}; 579 const struct i2400m_bcf_hdr **fw_hdrs;
491 580 struct i2400m_fw *fw_cached; /* protected by rx_lock */
581 struct i2400m_barker_db *barker;
492 582
493/* 583 struct notifier_block pm_notifier;
494 * Initialize a 'struct i2400m' from all zeroes 584};
495 *
496 * This is a bus-generic API call.
497 */
498static inline
499void i2400m_init(struct i2400m *i2400m)
500{
501 wimax_dev_init(&i2400m->wimax_dev);
502
503 i2400m->boot_mode = 1;
504 i2400m->rx_reorder = 1;
505 init_waitqueue_head(&i2400m->state_wq);
506
507 spin_lock_init(&i2400m->tx_lock);
508 i2400m->tx_pl_min = UINT_MAX;
509 i2400m->tx_size_min = UINT_MAX;
510
511 spin_lock_init(&i2400m->rx_lock);
512 i2400m->rx_pl_min = UINT_MAX;
513 i2400m->rx_size_min = UINT_MAX;
514
515 mutex_init(&i2400m->msg_mutex);
516 init_completion(&i2400m->msg_completion);
517
518 mutex_init(&i2400m->init_mutex);
519 /* wake_tx_ws is initialized in i2400m_tx_setup() */
520}
521 585
522 586
523/* 587/*
@@ -577,6 +641,14 @@ extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
577extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri); 641extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
578extern int i2400m_read_mac_addr(struct i2400m *); 642extern int i2400m_read_mac_addr(struct i2400m *);
579extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri); 643extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
644extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
645static inline
646int i2400m_is_d2h_barker(const void *buf)
647{
648 const __le32 *barker = buf;
649 return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
650}
651extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
580 652
581/* Make/grok boot-rom header commands */ 653/* Make/grok boot-rom header commands */
582 654
@@ -644,6 +716,8 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
644/* 716/*
645 * Driver / device setup and internal functions 717 * Driver / device setup and internal functions
646 */ 718 */
719extern void i2400m_init(struct i2400m *);
720extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
647extern void i2400m_netdev_setup(struct net_device *net_dev); 721extern void i2400m_netdev_setup(struct net_device *net_dev);
648extern int i2400m_sysfs_setup(struct device_driver *); 722extern int i2400m_sysfs_setup(struct device_driver *);
649extern void i2400m_sysfs_release(struct device_driver *); 723extern void i2400m_sysfs_release(struct device_driver *);
@@ -654,10 +728,14 @@ extern void i2400m_tx_release(struct i2400m *);
654extern int i2400m_rx_setup(struct i2400m *); 728extern int i2400m_rx_setup(struct i2400m *);
655extern void i2400m_rx_release(struct i2400m *); 729extern void i2400m_rx_release(struct i2400m *);
656 730
731extern void i2400m_fw_cache(struct i2400m *);
732extern void i2400m_fw_uncache(struct i2400m *);
733
657extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, 734extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
658 const void *, int); 735 const void *, int);
659extern void i2400m_net_erx(struct i2400m *, struct sk_buff *, 736extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
660 enum i2400m_cs); 737 enum i2400m_cs);
738extern void i2400m_net_wake_stop(struct i2400m *);
661enum i2400m_pt; 739enum i2400m_pt;
662extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt); 740extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
663 741
@@ -672,14 +750,12 @@ static inline int i2400m_debugfs_add(struct i2400m *i2400m)
672static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {} 750static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
673#endif 751#endif
674 752
675/* Called by _dev_start()/_dev_stop() to initialize the device itself */ 753/* Initialize/shutdown the device */
676extern int i2400m_dev_initialize(struct i2400m *); 754extern int i2400m_dev_initialize(struct i2400m *);
677extern void i2400m_dev_shutdown(struct i2400m *); 755extern void i2400m_dev_shutdown(struct i2400m *);
678 756
679extern struct attribute_group i2400m_dev_attr_group; 757extern struct attribute_group i2400m_dev_attr_group;
680 758
681extern int i2400m_schedule_work(struct i2400m *,
682 void (*)(struct work_struct *), gfp_t);
683 759
684/* HDI message's payload description handling */ 760/* HDI message's payload description handling */
685 761
@@ -724,7 +800,9 @@ void i2400m_put(struct i2400m *i2400m)
724 dev_put(i2400m->wimax_dev.net_dev); 800 dev_put(i2400m->wimax_dev.net_dev);
725} 801}
726 802
727extern int i2400m_dev_reset_handle(struct i2400m *); 803extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
804extern int i2400m_pre_reset(struct i2400m *);
805extern int i2400m_post_reset(struct i2400m *);
728 806
729/* 807/*
730 * _setup()/_release() are called by the probe/disconnect functions of 808 * _setup()/_release() are called by the probe/disconnect functions of
@@ -737,20 +815,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
737extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); 815extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
738extern void i2400m_tx_msg_sent(struct i2400m *); 816extern void i2400m_tx_msg_sent(struct i2400m *);
739 817
740static const __le32 i2400m_NBOOT_BARKER[4] = {
741 cpu_to_le32(I2400M_NBOOT_BARKER),
742 cpu_to_le32(I2400M_NBOOT_BARKER),
743 cpu_to_le32(I2400M_NBOOT_BARKER),
744 cpu_to_le32(I2400M_NBOOT_BARKER)
745};
746
747static const __le32 i2400m_SBOOT_BARKER[4] = {
748 cpu_to_le32(I2400M_SBOOT_BARKER),
749 cpu_to_le32(I2400M_SBOOT_BARKER),
750 cpu_to_le32(I2400M_SBOOT_BARKER),
751 cpu_to_le32(I2400M_SBOOT_BARKER)
752};
753
754extern int i2400m_power_save_disabled; 818extern int i2400m_power_save_disabled;
755 819
756/* 820/*
@@ -773,10 +837,12 @@ struct device *i2400m_dev(struct i2400m *i2400m)
773struct i2400m_work { 837struct i2400m_work {
774 struct work_struct ws; 838 struct work_struct ws;
775 struct i2400m *i2400m; 839 struct i2400m *i2400m;
840 size_t pl_size;
776 u8 pl[0]; 841 u8 pl[0];
777}; 842};
778extern int i2400m_queue_work(struct i2400m *, 843
779 void (*)(struct work_struct *), gfp_t, 844extern int i2400m_schedule_work(struct i2400m *,
845 void (*)(struct work_struct *), gfp_t,
780 const void *, size_t); 846 const void *, size_t);
781 847
782extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, 848extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
@@ -789,6 +855,7 @@ extern void i2400m_msg_ack_hook(struct i2400m *,
789 const struct i2400m_l3l4_hdr *, size_t); 855 const struct i2400m_l3l4_hdr *, size_t);
790extern void i2400m_report_hook(struct i2400m *, 856extern void i2400m_report_hook(struct i2400m *,
791 const struct i2400m_l3l4_hdr *, size_t); 857 const struct i2400m_l3l4_hdr *, size_t);
858extern void i2400m_report_hook_work(struct work_struct *);
792extern int i2400m_cmd_enter_powersave(struct i2400m *); 859extern int i2400m_cmd_enter_powersave(struct i2400m *);
793extern int i2400m_cmd_get_state(struct i2400m *); 860extern int i2400m_cmd_get_state(struct i2400m *);
794extern int i2400m_cmd_exit_idle(struct i2400m *); 861extern int i2400m_cmd_exit_idle(struct i2400m *);
@@ -849,6 +916,12 @@ void __i2400m_msleep(unsigned ms)
849#endif 916#endif
850} 917}
851 918
919
920/* module initialization helpers */
921extern int i2400m_barker_db_init(const char *);
922extern void i2400m_barker_db_exit(void);
923
924
852/* Module parameters */ 925/* Module parameters */
853 926
854extern int i2400m_idle_mode_disabled; 927extern int i2400m_idle_mode_disabled;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 796396cb4c82..599aa4eb9baa 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -74,6 +74,7 @@
74 */ 74 */
75#include <linux/if_arp.h> 75#include <linux/if_arp.h>
76#include <linux/netdevice.h> 76#include <linux/netdevice.h>
77#include <linux/ethtool.h>
77#include "i2400m.h" 78#include "i2400m.h"
78 79
79 80
@@ -88,7 +89,10 @@ enum {
88 * The MTU is 1400 or less 89 * The MTU is 1400 or less
89 */ 90 */
90 I2400M_MAX_MTU = 1400, 91 I2400M_MAX_MTU = 1400,
91 I2400M_TX_TIMEOUT = HZ, 92 /* 20 secs? yep, this is the maximum timeout that the device
93 * might take to get out of IDLE / negotiate it with the base
94 * station. We add 1sec for good measure. */
95 I2400M_TX_TIMEOUT = 21 * HZ,
92 I2400M_TX_QLEN = 5, 96 I2400M_TX_QLEN = 5,
93}; 97};
94 98
@@ -101,22 +105,19 @@ int i2400m_open(struct net_device *net_dev)
101 struct device *dev = i2400m_dev(i2400m); 105 struct device *dev = i2400m_dev(i2400m);
102 106
103 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); 107 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
104 if (i2400m->ready == 0) { 108 /* Make sure we wait until init is complete... */
105 dev_err(dev, "Device is still initializing\n"); 109 mutex_lock(&i2400m->init_mutex);
106 result = -EBUSY; 110 if (i2400m->updown)
107 } else
108 result = 0; 111 result = 0;
112 else
113 result = -EBUSY;
114 mutex_unlock(&i2400m->init_mutex);
109 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", 115 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
110 net_dev, i2400m, result); 116 net_dev, i2400m, result);
111 return result; 117 return result;
112} 118}
113 119
114 120
115/*
116 *
117 * On kernel versions where cancel_work_sync() didn't return anything,
118 * we rely on wake_tx_skb() being non-NULL.
119 */
120static 121static
121int i2400m_stop(struct net_device *net_dev) 122int i2400m_stop(struct net_device *net_dev)
122{ 123{
@@ -124,21 +125,7 @@ int i2400m_stop(struct net_device *net_dev)
124 struct device *dev = i2400m_dev(i2400m); 125 struct device *dev = i2400m_dev(i2400m);
125 126
126 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); 127 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
127 /* See i2400m_hard_start_xmit(), references are taken there 128 i2400m_net_wake_stop(i2400m);
128 * and here we release them if the work was still
129 * pending. Note we can't differentiate work not pending vs
130 * never scheduled, so the NULL check does that. */
131 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
132 && i2400m->wake_tx_skb != NULL) {
133 unsigned long flags;
134 struct sk_buff *wake_tx_skb;
135 spin_lock_irqsave(&i2400m->tx_lock, flags);
136 wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
137 i2400m->wake_tx_skb = NULL; /* compat help */
138 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
139 i2400m_put(i2400m);
140 kfree_skb(wake_tx_skb);
141 }
142 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m); 129 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
143 return 0; 130 return 0;
144} 131}
@@ -167,6 +154,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
167{ 154{
168 int result; 155 int result;
169 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); 156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
170 struct device *dev = i2400m_dev(i2400m); 158 struct device *dev = i2400m_dev(i2400m);
171 struct sk_buff *skb = i2400m->wake_tx_skb; 159 struct sk_buff *skb = i2400m->wake_tx_skb;
172 unsigned long flags; 160 unsigned long flags;
@@ -182,27 +170,36 @@ void i2400m_wake_tx_work(struct work_struct *ws)
182 dev_err(dev, "WAKE&TX: skb dissapeared!\n"); 170 dev_err(dev, "WAKE&TX: skb dissapeared!\n");
183 goto out_put; 171 goto out_put;
184 } 172 }
173 /* If we have, somehow, lost the connection after this was
174 * queued, don't do anything; this might be the device got
175 * reset or just disconnected. */
176 if (unlikely(!netif_carrier_ok(net_dev)))
177 goto out_kfree;
185 result = i2400m_cmd_exit_idle(i2400m); 178 result = i2400m_cmd_exit_idle(i2400m);
186 if (result == -EILSEQ) 179 if (result == -EILSEQ)
187 result = 0; 180 result = 0;
188 if (result < 0) { 181 if (result < 0) {
189 dev_err(dev, "WAKE&TX: device didn't get out of idle: " 182 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
190 "%d\n", result); 183 "%d - resetting\n", result);
191 goto error; 184 i2400m_reset(i2400m, I2400M_RT_BUS);
185 goto error;
192 } 186 }
193 result = wait_event_timeout(i2400m->state_wq, 187 result = wait_event_timeout(i2400m->state_wq,
194 i2400m->state != I2400M_SS_IDLE, 5 * HZ); 188 i2400m->state != I2400M_SS_IDLE,
189 net_dev->watchdog_timeo - HZ/2);
195 if (result == 0) 190 if (result == 0)
196 result = -ETIMEDOUT; 191 result = -ETIMEDOUT;
197 if (result < 0) { 192 if (result < 0) {
198 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " 193 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
199 "%d\n", result); 194 "%d - resetting\n", result);
195 i2400m_reset(i2400m, I2400M_RT_BUS);
200 goto error; 196 goto error;
201 } 197 }
202 msleep(20); /* device still needs some time or it drops it */ 198 msleep(20); /* device still needs some time or it drops it */
203 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); 199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
204 netif_wake_queue(i2400m->wimax_dev.net_dev);
205error: 200error:
201 netif_wake_queue(net_dev);
202out_kfree:
206 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ 203 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
207out_put: 204out_put:
208 i2400m_put(i2400m); 205 i2400m_put(i2400m);
@@ -229,6 +226,38 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
229} 226}
230 227
231 228
229
230/*
231 * Cleanup resources acquired during i2400m_net_wake_tx()
232 *
233 * This is called by __i2400m_dev_stop and means we have to make sure
234 * the workqueue is flushed from any pending work.
235 */
236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{
238 struct device *dev = i2400m_dev(i2400m);
239
240 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
241 /* See i2400m_hard_start_xmit(), references are taken there
242 * and here we release them if the work was still
243 * pending. Note we can't differentiate work not pending vs
244 * never scheduled, so the NULL check does that. */
245 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
246 && i2400m->wake_tx_skb != NULL) {
247 unsigned long flags;
248 struct sk_buff *wake_tx_skb;
249 spin_lock_irqsave(&i2400m->tx_lock, flags);
250 wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
251 i2400m->wake_tx_skb = NULL; /* compat help */
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
253 i2400m_put(i2400m);
254 kfree_skb(wake_tx_skb);
255 }
256 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
257 return;
258}
259
260
232/* 261/*
233 * TX an skb to an idle device 262 * TX an skb to an idle device
234 * 263 *
@@ -342,6 +371,20 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
342 int result; 371 int result;
343 372
344 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); 373 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
374 if (skb_header_cloned(skb)) {
375 /*
376 * Make tcpdump/wireshark happy -- if they are
377 * running, the skb is cloned and we will overwrite
378 * the mac fields in i2400m_tx_prep_header. Expand
379 * seems to fix this...
380 */
381 result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
382 if (result) {
383 result = NETDEV_TX_BUSY;
384 goto error_expand;
385 }
386 }
387
345 if (i2400m->state == I2400M_SS_IDLE) 388 if (i2400m->state == I2400M_SS_IDLE)
346 result = i2400m_net_wake_tx(i2400m, net_dev, skb); 389 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
347 else 390 else
@@ -352,10 +395,11 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
352 net_dev->stats.tx_packets++; 395 net_dev->stats.tx_packets++;
353 net_dev->stats.tx_bytes += skb->len; 396 net_dev->stats.tx_bytes += skb->len;
354 } 397 }
398 result = NETDEV_TX_OK;
399error_expand:
355 kfree_skb(skb); 400 kfree_skb(skb);
356 401 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
357 d_fnend(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); 402 return result;
358 return NETDEV_TX_OK;
359} 403}
360 404
361 405
@@ -559,6 +603,22 @@ static const struct net_device_ops i2400m_netdev_ops = {
559 .ndo_change_mtu = i2400m_change_mtu, 603 .ndo_change_mtu = i2400m_change_mtu,
560}; 604};
561 605
606static void i2400m_get_drvinfo(struct net_device *net_dev,
607 struct ethtool_drvinfo *info)
608{
609 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
610
611 strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
612 strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
613 if (net_dev->dev.parent)
614 strncpy(info->bus_info, dev_name(net_dev->dev.parent),
615 sizeof(info->bus_info) - 1);
616}
617
618static const struct ethtool_ops i2400m_ethtool_ops = {
619 .get_drvinfo = i2400m_get_drvinfo,
620 .get_link = ethtool_op_get_link,
621};
562 622
563/** 623/**
564 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data 624 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
@@ -580,6 +640,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
580 & ~IFF_MULTICAST); 640 & ~IFF_MULTICAST);
581 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; 641 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
582 net_dev->netdev_ops = &i2400m_netdev_ops; 642 net_dev->netdev_ops = &i2400m_netdev_ops;
643 net_dev->ethtool_ops = &i2400m_ethtool_ops;
583 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); 644 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
584} 645}
585EXPORT_SYMBOL_GPL(i2400m_netdev_setup); 646EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 07c32e68909f..e3d2a9de023c 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -158,30 +158,104 @@ struct i2400m_report_hook_args {
158 struct sk_buff *skb_rx; 158 struct sk_buff *skb_rx;
159 const struct i2400m_l3l4_hdr *l3l4_hdr; 159 const struct i2400m_l3l4_hdr *l3l4_hdr;
160 size_t size; 160 size_t size;
161 struct list_head list_node;
161}; 162};
162 163
163 164
164/* 165/*
165 * Execute i2400m_report_hook in a workqueue 166 * Execute i2400m_report_hook in a workqueue
166 * 167 *
167 * Unpacks arguments from the deferred call, executes it and then 168 * Goes over the list of queued reports in i2400m->rx_reports and
168 * drops the references. 169 * processes them.
169 * 170 *
170 * Obvious NOTE: References are needed because we are a separate 171 * NOTE: refcounts on i2400m are not needed because we flush the
171 * thread; otherwise the buffer changes under us because it is 172 * workqueue this runs on (i2400m->work_queue) before destroying
172 * released by the original caller. 173 * i2400m.
173 */ 174 */
174static
175void i2400m_report_hook_work(struct work_struct *ws) 175void i2400m_report_hook_work(struct work_struct *ws)
176{ 176{
177 struct i2400m_work *iw = 177 struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
178 container_of(ws, struct i2400m_work, ws); 178 struct device *dev = i2400m_dev(i2400m);
179 struct i2400m_report_hook_args *args = (void *) iw->pl; 179 struct i2400m_report_hook_args *args, *args_next;
180 if (iw->i2400m->ready) 180 LIST_HEAD(list);
181 i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size); 181 unsigned long flags;
182 kfree_skb(args->skb_rx); 182
183 i2400m_put(iw->i2400m); 183 while (1) {
184 kfree(iw); 184 spin_lock_irqsave(&i2400m->rx_lock, flags);
185 list_splice_init(&i2400m->rx_reports, &list);
186 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
187 if (list_empty(&list))
188 break;
189 else
190 d_printf(1, dev, "processing queued reports\n");
191 list_for_each_entry_safe(args, args_next, &list, list_node) {
192 d_printf(2, dev, "processing queued report %p\n", args);
193 i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
194 kfree_skb(args->skb_rx);
195 list_del(&args->list_node);
196 kfree(args);
197 }
198 }
199}
200
201
202/*
203 * Flush the list of queued reports
204 */
205static
206void i2400m_report_hook_flush(struct i2400m *i2400m)
207{
208 struct device *dev = i2400m_dev(i2400m);
209 struct i2400m_report_hook_args *args, *args_next;
210 LIST_HEAD(list);
211 unsigned long flags;
212
213 d_printf(1, dev, "flushing queued reports\n");
214 spin_lock_irqsave(&i2400m->rx_lock, flags);
215 list_splice_init(&i2400m->rx_reports, &list);
216 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
217 list_for_each_entry_safe(args, args_next, &list, list_node) {
218 d_printf(2, dev, "flushing queued report %p\n", args);
219 kfree_skb(args->skb_rx);
220 list_del(&args->list_node);
221 kfree(args);
222 }
223}
224
225
226/*
227 * Queue a report for later processing
228 *
229 * @i2400m: device descriptor
230 * @skb_rx: skb that contains the payload (for reference counting)
231 * @l3l4_hdr: pointer to the control
232 * @size: size of the message
233 */
234static
235void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
236 const void *l3l4_hdr, size_t size)
237{
238 struct device *dev = i2400m_dev(i2400m);
239 unsigned long flags;
240 struct i2400m_report_hook_args *args;
241
242 args = kzalloc(sizeof(*args), GFP_NOIO);
243 if (args) {
244 args->skb_rx = skb_get(skb_rx);
245 args->l3l4_hdr = l3l4_hdr;
246 args->size = size;
247 spin_lock_irqsave(&i2400m->rx_lock, flags);
248 list_add_tail(&args->list_node, &i2400m->rx_reports);
249 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
250 d_printf(2, dev, "queued report %p\n", args);
251 rmb(); /* see i2400m->ready's documentation */
252 if (likely(i2400m->ready)) /* only send if up */
253 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
254 } else {
255 if (printk_ratelimit())
256 dev_err(dev, "%s:%u: Can't allocate %zu B\n",
257 __func__, __LINE__, sizeof(*args));
258 }
185} 259}
186 260
187 261
@@ -295,21 +369,29 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
295 msg_type, size); 369 msg_type, size);
296 d_dump(2, dev, l3l4_hdr, size); 370 d_dump(2, dev, l3l4_hdr, size);
297 if (msg_type & I2400M_MT_REPORT_MASK) { 371 if (msg_type & I2400M_MT_REPORT_MASK) {
298 /* These hooks have to be ran serialized; as well, the 372 /*
299 * handling might force the execution of commands, and 373 * Process each report
300 * that might cause reentrancy issues with 374 *
301 * bus-specific subdrivers and workqueues. So we run 375 * - has to be ran serialized as well
302 * it in a separate workqueue. */ 376 *
303 struct i2400m_report_hook_args args = { 377 * - the handling might force the execution of
304 .skb_rx = skb_rx, 378 * commands. That might cause reentrancy issues with
305 .l3l4_hdr = l3l4_hdr, 379 * bus-specific subdrivers and workqueues, so the we
306 .size = size 380 * run it in a separate workqueue.
307 }; 381 *
308 if (unlikely(i2400m->ready == 0)) /* only send if up */ 382 * - when the driver is not yet ready to handle them,
309 return; 383 * they are queued and at some point the queue is
310 skb_get(skb_rx); 384 * restarted [NOTE: we can't queue SKBs directly, as
311 i2400m_queue_work(i2400m, i2400m_report_hook_work, 385 * this might be a piece of a SKB, not the whole
312 GFP_KERNEL, &args, sizeof(args)); 386 * thing, and this is cheaper than cloning the
387 * SKB].
388 *
389 * Note we don't do refcounting for the device
390 * structure; this is because before destroying
391 * 'i2400m', we make sure to flush the
392 * i2400m->work_queue, so there are no issues.
393 */
394 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
313 if (unlikely(i2400m->trace_msg_from_user)) 395 if (unlikely(i2400m->trace_msg_from_user))
314 wimax_msg(&i2400m->wimax_dev, "echo", 396 wimax_msg(&i2400m->wimax_dev, "echo",
315 l3l4_hdr, size, GFP_KERNEL); 397 l3l4_hdr, size, GFP_KERNEL);
@@ -363,8 +445,6 @@ void i2400m_rx_trace(struct i2400m *i2400m,
363 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", 445 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
364 msg_type, size); 446 msg_type, size);
365 d_dump(2, dev, l3l4_hdr, size); 447 d_dump(2, dev, l3l4_hdr, size);
366 if (unlikely(i2400m->ready == 0)) /* only send if up */
367 return;
368 result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL); 448 result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
369 if (result < 0) 449 if (result < 0)
370 dev_err(dev, "error sending trace to userspace: %d\n", 450 dev_err(dev, "error sending trace to userspace: %d\n",
@@ -748,7 +828,7 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
748 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", 828 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
749 nsn, lbn, roq->ws); 829 nsn, lbn, roq->ws);
750 i2400m_roq_log_dump(i2400m, roq); 830 i2400m_roq_log_dump(i2400m, roq);
751 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 831 i2400m_reset(i2400m, I2400M_RT_WARM);
752 } else { 832 } else {
753 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); 833 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
754 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, 834 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
@@ -814,7 +894,7 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
814 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", 894 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
815 nsn, sn, roq->ws); 895 nsn, sn, roq->ws);
816 i2400m_roq_log_dump(i2400m, roq); 896 i2400m_roq_log_dump(i2400m, roq);
817 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 897 i2400m_reset(i2400m, I2400M_RT_WARM);
818 } else { 898 } else {
819 /* if the queue is empty, don't bother as we'd queue 899 /* if the queue is empty, don't bother as we'd queue
820 * it and inmediately unqueue it -- just deliver it */ 900 * it and inmediately unqueue it -- just deliver it */
@@ -1194,6 +1274,28 @@ error_msg_hdr_check:
1194EXPORT_SYMBOL_GPL(i2400m_rx); 1274EXPORT_SYMBOL_GPL(i2400m_rx);
1195 1275
1196 1276
1277void i2400m_unknown_barker(struct i2400m *i2400m,
1278 const void *buf, size_t size)
1279{
1280 struct device *dev = i2400m_dev(i2400m);
1281 char prefix[64];
1282 const __le32 *barker = buf;
1283 dev_err(dev, "RX: HW BUG? unknown barker %08x, "
1284 "dropping %zu bytes\n", le32_to_cpu(*barker), size);
1285 snprintf(prefix, sizeof(prefix), "%s %s: ",
1286 dev_driver_string(dev), dev_name(dev));
1287 if (size > 64) {
1288 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
1289 8, 4, buf, 64, 0);
1290 printk(KERN_ERR "%s... (only first 64 bytes "
1291 "dumped)\n", prefix);
1292 } else
1293 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
1294 8, 4, buf, size, 0);
1295}
1296EXPORT_SYMBOL(i2400m_unknown_barker);
1297
1298
1197/* 1299/*
1198 * Initialize the RX queue and infrastructure 1300 * Initialize the RX queue and infrastructure
1199 * 1301 *
@@ -1261,4 +1363,6 @@ void i2400m_rx_release(struct i2400m *i2400m)
1261 kfree(i2400m->rx_roq[0].log); 1363 kfree(i2400m->rx_roq[0].log);
1262 kfree(i2400m->rx_roq); 1364 kfree(i2400m->rx_roq);
1263 } 1365 }
1366 /* at this point, nothing can be received... */
1367 i2400m_report_hook_flush(i2400m);
1264} 1368}
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
index 7d6ec0f475f8..8e025418f5be 100644
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ b/drivers/net/wimax/i2400m/sdio-fw.c
@@ -118,7 +118,8 @@ ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
118 if (cmd_size > I2400M_BM_CMD_BUF_SIZE) 118 if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
119 goto error_too_big; 119 goto error_too_big;
120 120
121 memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size); /* Prep command */ 121 if (_cmd != i2400m->bm_cmd_buf)
122 memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
122 cmd = i2400m->bm_cmd_buf; 123 cmd = i2400m->bm_cmd_buf;
123 if (cmd_size_a > cmd_size) /* Zero pad space */ 124 if (cmd_size_a > cmd_size) /* Zero pad space */
124 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); 125 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
@@ -177,10 +178,6 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
177 d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n", 178 d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
178 i2400m, ack, ack_size); 179 i2400m, ack, ack_size);
179 180
180 spin_lock(&i2400m->rx_lock);
181 i2400ms->bm_ack_size = -EINPROGRESS;
182 spin_unlock(&i2400m->rx_lock);
183
184 result = wait_event_timeout(i2400ms->bm_wfa_wq, 181 result = wait_event_timeout(i2400ms->bm_wfa_wq,
185 i2400ms->bm_ack_size != -EINPROGRESS, 182 i2400ms->bm_ack_size != -EINPROGRESS,
186 2 * HZ); 183 2 * HZ);
@@ -199,6 +196,10 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
199 size = min(ack_size, i2400ms->bm_ack_size); 196 size = min(ack_size, i2400ms->bm_ack_size);
200 memcpy(ack, i2400m->bm_ack_buf, size); 197 memcpy(ack, i2400m->bm_ack_buf, size);
201 } 198 }
199 /*
200 * Remember always to clear the bm_ack_size to -EINPROGRESS
201 * after the RX data is processed
202 */
202 i2400ms->bm_ack_size = -EINPROGRESS; 203 i2400ms->bm_ack_size = -EINPROGRESS;
203 spin_unlock(&i2400m->rx_lock); 204 spin_unlock(&i2400m->rx_lock);
204 205
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index 321beadf6e47..8adf6c9b6f8f 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -53,6 +53,7 @@
53 * i2400ms_irq() 53 * i2400ms_irq()
54 * i2400ms_rx() 54 * i2400ms_rx()
55 * __i2400ms_rx_get_size() 55 * __i2400ms_rx_get_size()
56 * i2400m_is_boot_barker()
56 * i2400m_rx() 57 * i2400m_rx()
57 * 58 *
58 * i2400ms_rx_setup() 59 * i2400ms_rx_setup()
@@ -138,6 +139,11 @@ void i2400ms_rx(struct i2400ms *i2400ms)
138 ret = rx_size; 139 ret = rx_size;
139 goto error_get_size; 140 goto error_get_size;
140 } 141 }
142 /*
143 * Hardware quirk: make sure to clear the INTR status register
144 * AFTER getting the data transfer size.
145 */
146 sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
141 147
142 ret = -ENOMEM; 148 ret = -ENOMEM;
143 skb = alloc_skb(rx_size, GFP_ATOMIC); 149 skb = alloc_skb(rx_size, GFP_ATOMIC);
@@ -153,25 +159,34 @@ void i2400ms_rx(struct i2400ms *i2400ms)
153 } 159 }
154 160
155 rmb(); /* make sure we get boot_mode from dev_reset_handle */ 161 rmb(); /* make sure we get boot_mode from dev_reset_handle */
156 if (i2400m->boot_mode == 1) { 162 if (unlikely(i2400m->boot_mode == 1)) {
157 spin_lock(&i2400m->rx_lock); 163 spin_lock(&i2400m->rx_lock);
158 i2400ms->bm_ack_size = rx_size; 164 i2400ms->bm_ack_size = rx_size;
159 spin_unlock(&i2400m->rx_lock); 165 spin_unlock(&i2400m->rx_lock);
160 memcpy(i2400m->bm_ack_buf, skb->data, rx_size); 166 memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
161 wake_up(&i2400ms->bm_wfa_wq); 167 wake_up(&i2400ms->bm_wfa_wq);
162 dev_err(dev, "RX: SDIO boot mode message\n"); 168 d_printf(5, dev, "RX: SDIO boot mode message\n");
163 kfree_skb(skb); 169 kfree_skb(skb);
164 } else if (unlikely(!memcmp(skb->data, i2400m_NBOOT_BARKER, 170 goto out;
165 sizeof(i2400m_NBOOT_BARKER)) 171 }
166 || !memcmp(skb->data, i2400m_SBOOT_BARKER, 172 ret = -EIO;
167 sizeof(i2400m_SBOOT_BARKER)))) { 173 if (unlikely(rx_size < sizeof(__le32))) {
168 ret = i2400m_dev_reset_handle(i2400m); 174 dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
175 goto error_bad_size;
176 }
177 if (likely(i2400m_is_d2h_barker(skb->data))) {
178 skb_put(skb, rx_size);
179 i2400m_rx(i2400m, skb);
180 } else if (unlikely(i2400m_is_boot_barker(i2400m,
181 skb->data, rx_size))) {
182 ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
169 dev_err(dev, "RX: SDIO reboot barker\n"); 183 dev_err(dev, "RX: SDIO reboot barker\n");
170 kfree_skb(skb); 184 kfree_skb(skb);
171 } else { 185 } else {
172 skb_put(skb, rx_size); 186 i2400m_unknown_barker(i2400m, skb->data, rx_size);
173 i2400m_rx(i2400m, skb); 187 kfree_skb(skb);
174 } 188 }
189out:
175 d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms); 190 d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
176 return; 191 return;
177 192
@@ -179,6 +194,7 @@ error_memcpy_fromio:
179 kfree_skb(skb); 194 kfree_skb(skb);
180error_alloc_skb: 195error_alloc_skb:
181error_get_size: 196error_get_size:
197error_bad_size:
182 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret); 198 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
183 return; 199 return;
184} 200}
@@ -209,7 +225,6 @@ void i2400ms_irq(struct sdio_func *func)
209 dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n"); 225 dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
210 goto error_no_irq; 226 goto error_no_irq;
211 } 227 }
212 sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
213 i2400ms_rx(i2400ms); 228 i2400ms_rx(i2400ms);
214error_no_irq: 229error_no_irq:
215 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms); 230 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
@@ -234,6 +249,13 @@ int i2400ms_rx_setup(struct i2400ms *i2400ms)
234 init_waitqueue_head(&i2400ms->bm_wfa_wq); 249 init_waitqueue_head(&i2400ms->bm_wfa_wq);
235 spin_lock(&i2400m->rx_lock); 250 spin_lock(&i2400m->rx_lock);
236 i2400ms->bm_wait_result = -EINPROGRESS; 251 i2400ms->bm_wait_result = -EINPROGRESS;
252 /*
253 * Before we are about to enable the RX interrupt, make sure
254 * bm_ack_size is cleared to -EINPROGRESS which indicates
255 * no RX interrupt happened yet or the previous interrupt
256 * has been handled, we are ready to take the new interrupt
257 */
258 i2400ms->bm_ack_size = -EINPROGRESS;
237 spin_unlock(&i2400m->rx_lock); 259 spin_unlock(&i2400m->rx_lock);
238 260
239 sdio_claim_host(func); 261 sdio_claim_host(func);
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index 5105a5ebc44f..de66d068c9cb 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -149,5 +149,8 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
149 149
150void i2400ms_tx_release(struct i2400ms *i2400ms) 150void i2400ms_tx_release(struct i2400ms *i2400ms)
151{ 151{
152 destroy_workqueue(i2400ms->tx_workqueue); 152 if (i2400ms->tx_workqueue) {
153 destroy_workqueue(i2400ms->tx_workqueue);
154 i2400ms->tx_workqueue = NULL;
155 }
153} 156}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 2981e211e04f..76a50ac02ebb 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -43,18 +43,9 @@
43 * i2400m_release() 43 * i2400m_release()
44 * free_netdev(net_dev) 44 * free_netdev(net_dev)
45 * 45 *
46 * i2400ms_bus_reset() Called by i2400m->bus_reset 46 * i2400ms_bus_reset() Called by i2400m_reset
47 * __i2400ms_reset() 47 * __i2400ms_reset()
48 * __i2400ms_send_barker() 48 * __i2400ms_send_barker()
49 *
50 * i2400ms_bus_dev_start() Called by i2400m_dev_start() [who is
51 * i2400ms_tx_setup() called by i2400m_setup()]
52 * i2400ms_rx_setup()
53 *
54 * i2400ms_bus_dev_stop() Called by i2400m_dev_stop() [who is
55 * i2400ms_rx_release() is called by i2400m_release()]
56 * i2400ms_tx_release()
57 *
58 */ 49 */
59 50
60#include <linux/debugfs.h> 51#include <linux/debugfs.h>
@@ -71,6 +62,14 @@
71static int ioe_timeout = 2; 62static int ioe_timeout = 2;
72module_param(ioe_timeout, int, 0); 63module_param(ioe_timeout, int, 0);
73 64
65static char i2400ms_debug_params[128];
66module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
67 0644);
68MODULE_PARM_DESC(debug,
69 "String of space-separated NAME:VALUE pairs, where NAMEs "
70 "are the different debug submodules and VALUE are the "
71 "initial debug value to set.");
72
74/* Our firmware file name list */ 73/* Our firmware file name list */
75static const char *i2400ms_bus_fw_names[] = { 74static const char *i2400ms_bus_fw_names[] = {
76#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf" 75#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
@@ -95,17 +94,24 @@ static const struct i2400m_poke_table i2400ms_pokes[] = {
95 * when we ask it to explicitly doing). Tries until a timeout is 94 * when we ask it to explicitly doing). Tries until a timeout is
96 * reached. 95 * reached.
97 * 96 *
97 * The @maxtries argument indicates how many times (at most) it should
98 * be tried to enable the function. 0 means forever. This acts along
99 * with the timeout (ie: it'll stop trying as soon as the maximum
100 * number of tries is reached _or_ as soon as the timeout is reached).
101 *
98 * The reverse of this is...sdio_disable_function() 102 * The reverse of this is...sdio_disable_function()
99 * 103 *
100 * Returns: 0 if the SDIO function was enabled, < 0 errno code on 104 * Returns: 0 if the SDIO function was enabled, < 0 errno code on
101 * error (-ENODEV when it was unable to enable the function). 105 * error (-ENODEV when it was unable to enable the function).
102 */ 106 */
103static 107static
104int i2400ms_enable_function(struct sdio_func *func) 108int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
105{ 109{
110 struct sdio_func *func = i2400ms->func;
106 u64 timeout; 111 u64 timeout;
107 int err; 112 int err;
108 struct device *dev = &func->dev; 113 struct device *dev = &func->dev;
114 unsigned tries = 0;
109 115
110 d_fnstart(3, dev, "(func %p)\n", func); 116 d_fnstart(3, dev, "(func %p)\n", func);
111 /* Setup timeout (FIXME: This needs to read the CIS table to 117 /* Setup timeout (FIXME: This needs to read the CIS table to
@@ -115,6 +121,14 @@ int i2400ms_enable_function(struct sdio_func *func)
115 err = -ENODEV; 121 err = -ENODEV;
116 while (err != 0 && time_before64(get_jiffies_64(), timeout)) { 122 while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
117 sdio_claim_host(func); 123 sdio_claim_host(func);
124 /*
125 * There is a sillicon bug on the IWMC3200, where the
126 * IOE timeout will cause problems on Moorestown
127 * platforms (system hang). We explicitly overwrite
128 * func->enable_timeout here to work around the issue.
129 */
130 if (i2400ms->iwmc3200)
131 func->enable_timeout = IWMC3200_IOR_TIMEOUT;
118 err = sdio_enable_func(func); 132 err = sdio_enable_func(func);
119 if (0 == err) { 133 if (0 == err) {
120 sdio_release_host(func); 134 sdio_release_host(func);
@@ -122,8 +136,11 @@ int i2400ms_enable_function(struct sdio_func *func)
122 goto function_enabled; 136 goto function_enabled;
123 } 137 }
124 d_printf(2, dev, "SDIO function failed to enable: %d\n", err); 138 d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
125 sdio_disable_func(func);
126 sdio_release_host(func); 139 sdio_release_host(func);
140 if (maxtries > 0 && ++tries >= maxtries) {
141 err = -ETIME;
142 break;
143 }
127 msleep(I2400MS_INIT_SLEEP_INTERVAL); 144 msleep(I2400MS_INIT_SLEEP_INTERVAL);
128 } 145 }
129 /* If timed out, device is not there yet -- get -ENODEV so 146 /* If timed out, device is not there yet -- get -ENODEV so
@@ -140,46 +157,99 @@ function_enabled:
140 157
141 158
142/* 159/*
143 * Setup driver resources needed to communicate with the device 160 * Setup minimal device communication infrastructure needed to at
161 * least be able to update the firmware.
144 * 162 *
145 * The fw needs some time to settle, and it was just uploaded, 163 * Note the ugly trick: if we are in the probe path
146 * so give it a break first. I'd prefer to just wait for the device to 164 * (i2400ms->debugfs_dentry == NULL), we only retry function
147 * send something, but seems the poking we do to enable SDIO stuff 165 * enablement one, to avoid racing with the iwmc3200 top controller.
148 * interferes with it, so just give it a break before starting...
149 */ 166 */
150static 167static
151int i2400ms_bus_dev_start(struct i2400m *i2400m) 168int i2400ms_bus_setup(struct i2400m *i2400m)
152{ 169{
153 int result; 170 int result;
154 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); 171 struct i2400ms *i2400ms =
172 container_of(i2400m, struct i2400ms, i2400m);
173 struct device *dev = i2400m_dev(i2400m);
155 struct sdio_func *func = i2400ms->func; 174 struct sdio_func *func = i2400ms->func;
156 struct device *dev = &func->dev; 175 int retries;
176
177 sdio_claim_host(func);
178 result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
179 sdio_release_host(func);
180 if (result < 0) {
181 dev_err(dev, "Failed to set block size: %d\n", result);
182 goto error_set_blk_size;
183 }
184
185 if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
186 retries = 1;
187 else
188 retries = 0;
189 result = i2400ms_enable_function(i2400ms, retries);
190 if (result < 0) {
191 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
192 goto error_func_enable;
193 }
157 194
158 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
159 msleep(200);
160 result = i2400ms_tx_setup(i2400ms); 195 result = i2400ms_tx_setup(i2400ms);
161 if (result < 0) 196 if (result < 0)
162 goto error_tx_setup; 197 goto error_tx_setup;
163 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 198 result = i2400ms_rx_setup(i2400ms);
164 return result; 199 if (result < 0)
200 goto error_rx_setup;
201 return 0;
165 202
166error_tx_setup: 203error_rx_setup:
167 i2400ms_tx_release(i2400ms); 204 i2400ms_tx_release(i2400ms);
168 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 205error_tx_setup:
206 sdio_claim_host(func);
207 sdio_disable_func(func);
208 sdio_release_host(func);
209error_func_enable:
210error_set_blk_size:
169 return result; 211 return result;
170} 212}
171 213
172 214
215/*
216 * Tear down minimal device communication infrastructure needed to at
217 * least be able to update the firmware.
218 */
219static
220void i2400ms_bus_release(struct i2400m *i2400m)
221{
222 struct i2400ms *i2400ms =
223 container_of(i2400m, struct i2400ms, i2400m);
224 struct sdio_func *func = i2400ms->func;
225
226 i2400ms_rx_release(i2400ms);
227 i2400ms_tx_release(i2400ms);
228 sdio_claim_host(func);
229 sdio_disable_func(func);
230 sdio_release_host(func);
231}
232
233
234/*
235 * Setup driver resources needed to communicate with the device
236 *
237 * The fw needs some time to settle, and it was just uploaded,
238 * so give it a break first. I'd prefer to just wait for the device to
239 * send something, but seems the poking we do to enable SDIO stuff
240 * interferes with it, so just give it a break before starting...
241 */
173static 242static
174void i2400ms_bus_dev_stop(struct i2400m *i2400m) 243int i2400ms_bus_dev_start(struct i2400m *i2400m)
175{ 244{
176 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); 245 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
177 struct sdio_func *func = i2400ms->func; 246 struct sdio_func *func = i2400ms->func;
178 struct device *dev = &func->dev; 247 struct device *dev = &func->dev;
179 248
180 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 249 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
181 i2400ms_tx_release(i2400ms); 250 msleep(200);
182 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 251 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
252 return 0;
183} 253}
184 254
185 255
@@ -233,18 +303,17 @@ error_kzalloc:
233 * Warm reset: 303 * Warm reset:
234 * 304 *
235 * The device will be fully reset internally, but won't be 305 * The device will be fully reset internally, but won't be
236 * disconnected from the USB bus (so no reenumeration will 306 * disconnected from the bus (so no reenumeration will
237 * happen). Firmware upload will be neccessary. 307 * happen). Firmware upload will be neccessary.
238 * 308 *
239 * The device will send a reboot barker in the notification endpoint 309 * The device will send a reboot barker that will trigger the driver
240 * that will trigger the driver to reinitialize the state 310 * to reinitialize the state via __i2400m_dev_reset_handle.
241 * automatically from notif.c:i2400m_notification_grok() into
242 * i2400m_dev_bootstrap_delayed().
243 * 311 *
244 * Cold and bus (USB) reset: 312 *
313 * Cold and bus reset:
245 * 314 *
246 * The device will be fully reset internally, disconnected from the 315 * The device will be fully reset internally, disconnected from the
247 * USB bus an a reenumeration will happen. Firmware upload will be 316 * bus an a reenumeration will happen. Firmware upload will be
248 * neccessary. Thus, we don't do any locking or struct 317 * neccessary. Thus, we don't do any locking or struct
249 * reinitialization, as we are going to be fully disconnected and 318 * reinitialization, as we are going to be fully disconnected and
250 * reenumerated. 319 * reenumerated.
@@ -283,25 +352,13 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
283 sizeof(i2400m_COLD_BOOT_BARKER)); 352 sizeof(i2400m_COLD_BOOT_BARKER));
284 else if (rt == I2400M_RT_BUS) { 353 else if (rt == I2400M_RT_BUS) {
285do_bus_reset: 354do_bus_reset:
286 /* call netif_tx_disable() before sending IOE disable,
287 * so that all the tx from network layer are stopped
288 * while IOE is being reset. Make sure it is called
289 * only after register_netdev() was issued.
290 */
291 if (i2400m->wimax_dev.net_dev->reg_state == NETREG_REGISTERED)
292 netif_tx_disable(i2400m->wimax_dev.net_dev);
293 355
294 i2400ms_rx_release(i2400ms); 356 i2400ms_bus_release(i2400m);
295 sdio_claim_host(i2400ms->func);
296 sdio_disable_func(i2400ms->func);
297 sdio_release_host(i2400ms->func);
298 357
299 /* Wait for the device to settle */ 358 /* Wait for the device to settle */
300 msleep(40); 359 msleep(40);
301 360
302 result = i2400ms_enable_function(i2400ms->func); 361 result = i2400ms_bus_setup(i2400m);
303 if (result >= 0)
304 i2400ms_rx_setup(i2400ms);
305 } else 362 } else
306 BUG(); 363 BUG();
307 if (result < 0 && rt != I2400M_RT_BUS) { 364 if (result < 0 && rt != I2400M_RT_BUS) {
@@ -350,7 +407,7 @@ int i2400ms_debugfs_add(struct i2400ms *i2400ms)
350 int result; 407 int result;
351 struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry; 408 struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
352 409
353 dentry = debugfs_create_dir("i2400m-usb", dentry); 410 dentry = debugfs_create_dir("i2400m-sdio", dentry);
354 result = PTR_ERR(dentry); 411 result = PTR_ERR(dentry);
355 if (IS_ERR(dentry)) { 412 if (IS_ERR(dentry)) {
356 if (result == -ENODEV) 413 if (result == -ENODEV)
@@ -367,6 +424,7 @@ int i2400ms_debugfs_add(struct i2400ms *i2400ms)
367 424
368error: 425error:
369 debugfs_remove_recursive(i2400ms->debugfs_dentry); 426 debugfs_remove_recursive(i2400ms->debugfs_dentry);
427 i2400ms->debugfs_dentry = NULL;
370 return result; 428 return result;
371} 429}
372 430
@@ -425,37 +483,30 @@ int i2400ms_probe(struct sdio_func *func,
425 483
426 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE; 484 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
427 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX; 485 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
486 i2400m->bus_setup = i2400ms_bus_setup;
428 i2400m->bus_dev_start = i2400ms_bus_dev_start; 487 i2400m->bus_dev_start = i2400ms_bus_dev_start;
429 i2400m->bus_dev_stop = i2400ms_bus_dev_stop; 488 i2400m->bus_dev_stop = NULL;
489 i2400m->bus_release = i2400ms_bus_release;
430 i2400m->bus_tx_kick = i2400ms_bus_tx_kick; 490 i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
431 i2400m->bus_reset = i2400ms_bus_reset; 491 i2400m->bus_reset = i2400ms_bus_reset;
432 /* The iwmc3200-wimax sometimes requires the driver to try 492 /* The iwmc3200-wimax sometimes requires the driver to try
433 * hard when we paint it into a corner. */ 493 * hard when we paint it into a corner. */
434 i2400m->bus_bm_retries = I3200_BOOT_RETRIES; 494 i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
435 i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send; 495 i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
436 i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack; 496 i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
437 i2400m->bus_fw_names = i2400ms_bus_fw_names; 497 i2400m->bus_fw_names = i2400ms_bus_fw_names;
438 i2400m->bus_bm_mac_addr_impaired = 1; 498 i2400m->bus_bm_mac_addr_impaired = 1;
439 i2400m->bus_bm_pokes_table = &i2400ms_pokes[0]; 499 i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
440 500
441 sdio_claim_host(func); 501 switch (func->device) {
442 result = sdio_set_block_size(func, I2400MS_BLK_SIZE); 502 case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
443 sdio_release_host(func); 503 case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
444 if (result < 0) { 504 i2400ms->iwmc3200 = 1;
445 dev_err(dev, "Failed to set block size: %d\n", result); 505 break;
446 goto error_set_blk_size; 506 default:
447 } 507 i2400ms->iwmc3200 = 0;
448
449 result = i2400ms_enable_function(i2400ms->func);
450 if (result < 0) {
451 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
452 goto error_func_enable;
453 } 508 }
454 509
455 result = i2400ms_rx_setup(i2400ms);
456 if (result < 0)
457 goto error_rx_setup;
458
459 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT); 510 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
460 if (result < 0) { 511 if (result < 0) {
461 dev_err(dev, "cannot setup device: %d\n", result); 512 dev_err(dev, "cannot setup device: %d\n", result);
@@ -473,13 +524,6 @@ int i2400ms_probe(struct sdio_func *func,
473error_debugfs_add: 524error_debugfs_add:
474 i2400m_release(i2400m); 525 i2400m_release(i2400m);
475error_setup: 526error_setup:
476 i2400ms_rx_release(i2400ms);
477error_rx_setup:
478 sdio_claim_host(func);
479 sdio_disable_func(func);
480 sdio_release_host(func);
481error_func_enable:
482error_set_blk_size:
483 sdio_set_drvdata(func, NULL); 527 sdio_set_drvdata(func, NULL);
484 free_netdev(net_dev); 528 free_netdev(net_dev);
485error_alloc_netdev: 529error_alloc_netdev:
@@ -497,12 +541,9 @@ void i2400ms_remove(struct sdio_func *func)
497 541
498 d_fnstart(3, dev, "SDIO func %p\n", func); 542 d_fnstart(3, dev, "SDIO func %p\n", func);
499 debugfs_remove_recursive(i2400ms->debugfs_dentry); 543 debugfs_remove_recursive(i2400ms->debugfs_dentry);
500 i2400ms_rx_release(i2400ms); 544 i2400ms->debugfs_dentry = NULL;
501 i2400m_release(i2400m); 545 i2400m_release(i2400m);
502 sdio_set_drvdata(func, NULL); 546 sdio_set_drvdata(func, NULL);
503 sdio_claim_host(func);
504 sdio_disable_func(func);
505 sdio_release_host(func);
506 free_netdev(net_dev); 547 free_netdev(net_dev);
507 d_fnend(3, dev, "SDIO func %p\n", func); 548 d_fnend(3, dev, "SDIO func %p\n", func);
508} 549}
@@ -512,6 +553,8 @@ const struct sdio_device_id i2400ms_sdio_ids[] = {
512 /* Intel: i2400m WiMAX (iwmc3200) over SDIO */ 553 /* Intel: i2400m WiMAX (iwmc3200) over SDIO */
513 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 554 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
514 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) }, 555 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
556 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
557 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
515 { /* end: all zeroes */ }, 558 { /* end: all zeroes */ },
516}; 559};
517MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids); 560MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
@@ -529,6 +572,8 @@ struct sdio_driver i2400m_sdio_driver = {
529static 572static
530int __init i2400ms_driver_init(void) 573int __init i2400ms_driver_init(void)
531{ 574{
575 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
576 "i2400m_sdio.debug");
532 return sdio_register_driver(&i2400m_sdio_driver); 577 return sdio_register_driver(&i2400m_sdio_driver);
533} 578}
534module_init(i2400ms_driver_init); 579module_init(i2400ms_driver_init);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index fa16ccf8e26a..54480e8947f1 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -310,7 +310,7 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
310 size_t tail_room; 310 size_t tail_room;
311 size_t tx_in; 311 size_t tx_in;
312 312
313 if (unlikely(i2400m->tx_in) == 0) 313 if (unlikely(i2400m->tx_in == 0))
314 return I2400M_TX_BUF_SIZE; 314 return I2400M_TX_BUF_SIZE;
315 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 315 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
316 tail_room = I2400M_TX_BUF_SIZE - tx_in; 316 tail_room = I2400M_TX_BUF_SIZE - tx_in;
@@ -642,6 +642,9 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
642 * current one is out of payload slots or we have a singleton, 642 * current one is out of payload slots or we have a singleton,
643 * close it and start a new one */ 643 * close it and start a new one */
644 spin_lock_irqsave(&i2400m->tx_lock, flags); 644 spin_lock_irqsave(&i2400m->tx_lock, flags);
645 result = -ESHUTDOWN;
646 if (i2400m->tx_buf == NULL)
647 goto error_tx_new;
645try_new: 648try_new:
646 if (unlikely(i2400m->tx_msg == NULL)) 649 if (unlikely(i2400m->tx_msg == NULL))
647 i2400m_tx_new(i2400m); 650 i2400m_tx_new(i2400m);
@@ -697,7 +700,10 @@ try_new:
697 } 700 }
698error_tx_new: 701error_tx_new:
699 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 702 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
700 i2400m->bus_tx_kick(i2400m); /* always kick, might free up space */ 703 /* kick in most cases, except when the TX subsys is down, as
704 * it might free space */
705 if (likely(result != -ESHUTDOWN))
706 i2400m->bus_tx_kick(i2400m);
701 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n", 707 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
702 i2400m, buf, buf_len, pl_type, result); 708 i2400m, buf, buf_len, pl_type, result);
703 return result; 709 return result;
@@ -740,6 +746,9 @@ struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
740 746
741 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size); 747 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
742 spin_lock_irqsave(&i2400m->tx_lock, flags); 748 spin_lock_irqsave(&i2400m->tx_lock, flags);
749 tx_msg_moved = NULL;
750 if (i2400m->tx_buf == NULL)
751 goto out_unlock;
743skip: 752skip:
744 tx_msg_moved = NULL; 753 tx_msg_moved = NULL;
745 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */ 754 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
@@ -829,6 +838,8 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
829 838
830 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 839 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
831 spin_lock_irqsave(&i2400m->tx_lock, flags); 840 spin_lock_irqsave(&i2400m->tx_lock, flags);
841 if (i2400m->tx_buf == NULL)
842 goto out_unlock;
832 i2400m->tx_out += i2400m->tx_msg_size; 843 i2400m->tx_out += i2400m->tx_msg_size;
833 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size); 844 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
834 i2400m->tx_msg_size = 0; 845 i2400m->tx_msg_size = 0;
@@ -837,6 +848,7 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
837 n = i2400m->tx_out / I2400M_TX_BUF_SIZE; 848 n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
838 i2400m->tx_out %= I2400M_TX_BUF_SIZE; 849 i2400m->tx_out %= I2400M_TX_BUF_SIZE;
839 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; 850 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
851out_unlock:
840 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 852 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
841 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 853 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
842} 854}
@@ -876,5 +888,9 @@ int i2400m_tx_setup(struct i2400m *i2400m)
876 */ 888 */
877void i2400m_tx_release(struct i2400m *i2400m) 889void i2400m_tx_release(struct i2400m *i2400m)
878{ 890{
891 unsigned long flags;
892 spin_lock_irqsave(&i2400m->tx_lock, flags);
879 kfree(i2400m->tx_buf); 893 kfree(i2400m->tx_buf);
894 i2400m->tx_buf = NULL;
895 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
880} 896}
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 5ad287c228b8..ce6b9938fde0 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -99,10 +99,10 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
99 dev_err(dev, "BM-CMD: can't get autopm: %d\n", result); 99 dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
100 do_autopm = 0; 100 do_autopm = 0;
101 } 101 }
102 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT); 102 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
103 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 103 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
104retry: 104retry:
105 result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, HZ); 105 result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, 200);
106 switch (result) { 106 switch (result) {
107 case 0: 107 case 0:
108 if (len != buf_size) { 108 if (len != buf_size) {
@@ -113,6 +113,28 @@ retry:
113 } 113 }
114 result = len; 114 result = len;
115 break; 115 break;
116 case -EPIPE:
117 /*
118 * Stall -- maybe the device is choking with our
119 * requests. Clear it and give it some time. If they
120 * happen to often, it might be another symptom, so we
121 * reset.
122 *
123 * No error handling for usb_clear_halt(0; if it
124 * works, the retry works; if it fails, this switch
125 * does the error handling for us.
126 */
127 if (edc_inc(&i2400mu->urb_edc,
128 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
129 dev_err(dev, "BM-CMD: too many stalls in "
130 "URB; resetting device\n");
131 usb_queue_reset_device(i2400mu->usb_iface);
132 /* fallthrough */
133 } else {
134 usb_clear_halt(i2400mu->usb_dev, pipe);
135 msleep(10); /* give the device some time */
136 goto retry;
137 }
116 case -EINVAL: /* while removing driver */ 138 case -EINVAL: /* while removing driver */
117 case -ENODEV: /* dev disconnect ... */ 139 case -ENODEV: /* dev disconnect ... */
118 case -ENOENT: /* just ignore it */ 140 case -ENOENT: /* just ignore it */
@@ -135,7 +157,6 @@ retry:
135 result); 157 result);
136 goto retry; 158 goto retry;
137 } 159 }
138 result = len;
139 if (do_autopm) 160 if (do_autopm)
140 usb_autopm_put_interface(i2400mu->usb_iface); 161 usb_autopm_put_interface(i2400mu->usb_iface);
141 return result; 162 return result;
@@ -172,7 +193,8 @@ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
172 result = -E2BIG; 193 result = -E2BIG;
173 if (cmd_size > I2400M_BM_CMD_BUF_SIZE) 194 if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
174 goto error_too_big; 195 goto error_too_big;
175 memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size); 196 if (_cmd != i2400m->bm_cmd_buf)
197 memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
176 cmd = i2400m->bm_cmd_buf; 198 cmd = i2400m->bm_cmd_buf;
177 if (cmd_size_a > cmd_size) /* Zero pad space */ 199 if (cmd_size_a > cmd_size) /* Zero pad space */
178 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); 200 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
@@ -226,7 +248,8 @@ int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
226 struct usb_endpoint_descriptor *epd; 248 struct usb_endpoint_descriptor *epd;
227 int pipe; 249 int pipe;
228 250
229 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION); 251 epd = usb_get_epd(i2400mu->usb_iface,
252 i2400mu->endpoint_cfg.notification);
230 pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); 253 pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
231 usb_fill_int_urb(urb, i2400mu->usb_dev, pipe, 254 usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
232 i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE, 255 i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
@@ -328,8 +351,8 @@ error_dev_gone:
328out: 351out:
329 if (do_autopm) 352 if (do_autopm)
330 usb_autopm_put_interface(i2400mu->usb_iface); 353 usb_autopm_put_interface(i2400mu->usb_iface);
331 d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %zd\n", 354 d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
332 i2400m, ack, ack_size, result); 355 i2400m, ack, ack_size, (long) result);
333 return result; 356 return result;
334 357
335error_exceeded: 358error_exceeded:
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 6add27c3f35c..f88d1c6e35cb 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -51,6 +51,7 @@
51 * 51 *
52 * i2400mu_usb_notification_cb() Called when a URB is ready 52 * i2400mu_usb_notification_cb() Called when a URB is ready
53 * i2400mu_notif_grok() 53 * i2400mu_notif_grok()
54 * i2400m_is_boot_barker()
54 * i2400m_dev_reset_handle() 55 * i2400m_dev_reset_handle()
55 * i2400mu_rx_kick() 56 * i2400mu_rx_kick()
56 */ 57 */
@@ -87,32 +88,21 @@ int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
87 d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n", 88 d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
88 i2400mu, buf, buf_len); 89 i2400mu, buf, buf_len);
89 ret = -EIO; 90 ret = -EIO;
90 if (buf_len < sizeof(i2400m_NBOOT_BARKER)) 91 if (buf_len < sizeof(i2400m_ZERO_BARKER))
91 /* Not a bug, just ignore */ 92 /* Not a bug, just ignore */
92 goto error_bad_size; 93 goto error_bad_size;
93 if (!memcmp(i2400m_NBOOT_BARKER, buf, sizeof(i2400m_NBOOT_BARKER)) 94 ret = 0;
94 || !memcmp(i2400m_SBOOT_BARKER, buf, sizeof(i2400m_SBOOT_BARKER))) 95 if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
95 ret = i2400m_dev_reset_handle(i2400m);
96 else if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
97 i2400mu_rx_kick(i2400mu); 96 i2400mu_rx_kick(i2400mu);
98 ret = 0; 97 goto out;
99 } else { /* Unknown or unexpected data in the notif message */
100 char prefix[64];
101 ret = -EIO;
102 dev_err(dev, "HW BUG? Unknown/unexpected data in notification "
103 "message (%zu bytes)\n", buf_len);
104 snprintf(prefix, sizeof(prefix), "%s %s: ",
105 dev_driver_string(dev), dev_name(dev));
106 if (buf_len > 64) {
107 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
108 8, 4, buf, 64, 0);
109 printk(KERN_ERR "%s... (only first 64 bytes "
110 "dumped)\n", prefix);
111 } else
112 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
113 8, 4, buf, buf_len, 0);
114 } 98 }
99 ret = i2400m_is_boot_barker(i2400m, buf, buf_len);
100 if (unlikely(ret >= 0))
101 ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
102 else /* Unknown or unexpected data in the notif message */
103 i2400m_unknown_barker(i2400m, buf, buf_len);
115error_bad_size: 104error_bad_size:
105out:
116 d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n", 106 d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
117 i2400mu, buf, buf_len, ret); 107 i2400mu, buf, buf_len, ret);
118 return ret; 108 return ret;
@@ -220,7 +210,8 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
220 dev_err(dev, "notification: cannot allocate URB\n"); 210 dev_err(dev, "notification: cannot allocate URB\n");
221 goto error_alloc_urb; 211 goto error_alloc_urb;
222 } 212 }
223 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION); 213 epd = usb_get_epd(i2400mu->usb_iface,
214 i2400mu->endpoint_cfg.notification);
224 usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); 215 usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
225 usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe, 216 usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
226 buf, I2400MU_MAX_NOTIFICATION_LEN, 217 buf, I2400MU_MAX_NOTIFICATION_LEN,
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index a314799967cf..ba1b02362dfc 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -204,7 +204,7 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
204 dev_err(dev, "RX: can't get autopm: %d\n", result); 204 dev_err(dev, "RX: can't get autopm: %d\n", result);
205 do_autopm = 0; 205 do_autopm = 0;
206 } 206 }
207 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_IN); 207 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
208 usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 208 usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
209retry: 209retry:
210 rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len; 210 rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
@@ -214,7 +214,7 @@ retry:
214 } 214 }
215 result = usb_bulk_msg( 215 result = usb_bulk_msg(
216 i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len, 216 i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
217 rx_size, &read_size, HZ); 217 rx_size, &read_size, 200);
218 usb_mark_last_busy(i2400mu->usb_dev); 218 usb_mark_last_busy(i2400mu->usb_dev);
219 switch (result) { 219 switch (result) {
220 case 0: 220 case 0:
@@ -222,6 +222,26 @@ retry:
222 goto retry; /* ZLP, just resubmit */ 222 goto retry; /* ZLP, just resubmit */
223 skb_put(rx_skb, read_size); 223 skb_put(rx_skb, read_size);
224 break; 224 break;
225 case -EPIPE:
226 /*
227 * Stall -- maybe the device is choking with our
228 * requests. Clear it and give it some time. If they
229 * happen to often, it might be another symptom, so we
230 * reset.
231 *
232 * No error handling for usb_clear_halt(0; if it
233 * works, the retry works; if it fails, this switch
234 * does the error handling for us.
235 */
236 if (edc_inc(&i2400mu->urb_edc,
237 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
238 dev_err(dev, "BM-CMD: too many stalls in "
239 "URB; resetting device\n");
240 goto do_reset;
241 }
242 usb_clear_halt(i2400mu->usb_dev, usb_pipe);
243 msleep(10); /* give the device some time */
244 goto retry;
225 case -EINVAL: /* while removing driver */ 245 case -EINVAL: /* while removing driver */
226 case -ENODEV: /* dev disconnect ... */ 246 case -ENODEV: /* dev disconnect ... */
227 case -ENOENT: /* just ignore it */ 247 case -ENOENT: /* just ignore it */
@@ -283,6 +303,7 @@ out:
283error_reset: 303error_reset:
284 dev_err(dev, "RX: maximum errors in URB exceeded; " 304 dev_err(dev, "RX: maximum errors in URB exceeded; "
285 "resetting device\n"); 305 "resetting device\n");
306do_reset:
286 usb_queue_reset_device(i2400mu->usb_iface); 307 usb_queue_reset_device(i2400mu->usb_iface);
287 rx_skb = ERR_PTR(result); 308 rx_skb = ERR_PTR(result);
288 goto out; 309 goto out;
@@ -316,10 +337,15 @@ int i2400mu_rxd(void *_i2400mu)
316 size_t pending; 337 size_t pending;
317 int rx_size; 338 int rx_size;
318 struct sk_buff *rx_skb; 339 struct sk_buff *rx_skb;
340 unsigned long flags;
319 341
320 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); 342 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
343 spin_lock_irqsave(&i2400m->rx_lock, flags);
344 BUG_ON(i2400mu->rx_kthread != NULL);
345 i2400mu->rx_kthread = current;
346 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
321 while (1) { 347 while (1) {
322 d_printf(2, dev, "TX: waiting for messages\n"); 348 d_printf(2, dev, "RX: waiting for messages\n");
323 pending = 0; 349 pending = 0;
324 wait_event_interruptible( 350 wait_event_interruptible(
325 i2400mu->rx_wq, 351 i2400mu->rx_wq,
@@ -367,6 +393,9 @@ int i2400mu_rxd(void *_i2400mu)
367 } 393 }
368 result = 0; 394 result = 0;
369out: 395out:
396 spin_lock_irqsave(&i2400m->rx_lock, flags);
397 i2400mu->rx_kthread = NULL;
398 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
370 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); 399 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
371 return result; 400 return result;
372 401
@@ -403,18 +432,33 @@ int i2400mu_rx_setup(struct i2400mu *i2400mu)
403 struct i2400m *i2400m = &i2400mu->i2400m; 432 struct i2400m *i2400m = &i2400mu->i2400m;
404 struct device *dev = &i2400mu->usb_iface->dev; 433 struct device *dev = &i2400mu->usb_iface->dev;
405 struct wimax_dev *wimax_dev = &i2400m->wimax_dev; 434 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
435 struct task_struct *kthread;
406 436
407 i2400mu->rx_kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx", 437 kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
408 wimax_dev->name); 438 wimax_dev->name);
409 if (IS_ERR(i2400mu->rx_kthread)) { 439 /* the kthread function sets i2400mu->rx_thread */
410 result = PTR_ERR(i2400mu->rx_kthread); 440 if (IS_ERR(kthread)) {
441 result = PTR_ERR(kthread);
411 dev_err(dev, "RX: cannot start thread: %d\n", result); 442 dev_err(dev, "RX: cannot start thread: %d\n", result);
412 } 443 }
413 return result; 444 return result;
414} 445}
415 446
447
416void i2400mu_rx_release(struct i2400mu *i2400mu) 448void i2400mu_rx_release(struct i2400mu *i2400mu)
417{ 449{
418 kthread_stop(i2400mu->rx_kthread); 450 unsigned long flags;
451 struct i2400m *i2400m = &i2400mu->i2400m;
452 struct device *dev = i2400m_dev(i2400m);
453 struct task_struct *kthread;
454
455 spin_lock_irqsave(&i2400m->rx_lock, flags);
456 kthread = i2400mu->rx_kthread;
457 i2400mu->rx_kthread = NULL;
458 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
459 if (kthread)
460 kthread_stop(kthread);
461 else
462 d_printf(1, dev, "RX: kthread had already exited\n");
419} 463}
420 464
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index dfd893356f49..c65b9979f87e 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -101,11 +101,11 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
101 dev_err(dev, "TX: can't get autopm: %d\n", result); 101 dev_err(dev, "TX: can't get autopm: %d\n", result);
102 do_autopm = 0; 102 do_autopm = 0;
103 } 103 }
104 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT); 104 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
105 usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 105 usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
106retry: 106retry:
107 result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe, 107 result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
108 tx_msg, tx_msg_size, &sent_size, HZ); 108 tx_msg, tx_msg_size, &sent_size, 200);
109 usb_mark_last_busy(i2400mu->usb_dev); 109 usb_mark_last_busy(i2400mu->usb_dev);
110 switch (result) { 110 switch (result) {
111 case 0: 111 case 0:
@@ -115,6 +115,28 @@ retry:
115 result = -EIO; 115 result = -EIO;
116 } 116 }
117 break; 117 break;
118 case -EPIPE:
119 /*
120 * Stall -- maybe the device is choking with our
121 * requests. Clear it and give it some time. If they
122 * happen to often, it might be another symptom, so we
123 * reset.
124 *
125 * No error handling for usb_clear_halt(0; if it
126 * works, the retry works; if it fails, this switch
127 * does the error handling for us.
128 */
129 if (edc_inc(&i2400mu->urb_edc,
130 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
131 dev_err(dev, "BM-CMD: too many stalls in "
132 "URB; resetting device\n");
133 usb_queue_reset_device(i2400mu->usb_iface);
134 /* fallthrough */
135 } else {
136 usb_clear_halt(i2400mu->usb_dev, usb_pipe);
137 msleep(10); /* give the device some time */
138 goto retry;
139 }
118 case -EINVAL: /* while removing driver */ 140 case -EINVAL: /* while removing driver */
119 case -ENODEV: /* dev disconnect ... */ 141 case -ENODEV: /* dev disconnect ... */
120 case -ENOENT: /* just ignore it */ 142 case -ENOENT: /* just ignore it */
@@ -161,9 +183,15 @@ int i2400mu_txd(void *_i2400mu)
161 struct device *dev = &i2400mu->usb_iface->dev; 183 struct device *dev = &i2400mu->usb_iface->dev;
162 struct i2400m_msg_hdr *tx_msg; 184 struct i2400m_msg_hdr *tx_msg;
163 size_t tx_msg_size; 185 size_t tx_msg_size;
186 unsigned long flags;
164 187
165 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); 188 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
166 189
190 spin_lock_irqsave(&i2400m->tx_lock, flags);
191 BUG_ON(i2400mu->tx_kthread != NULL);
192 i2400mu->tx_kthread = current;
193 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
194
167 while (1) { 195 while (1) {
168 d_printf(2, dev, "TX: waiting for messages\n"); 196 d_printf(2, dev, "TX: waiting for messages\n");
169 tx_msg = NULL; 197 tx_msg = NULL;
@@ -183,6 +211,11 @@ int i2400mu_txd(void *_i2400mu)
183 if (result < 0) 211 if (result < 0)
184 break; 212 break;
185 } 213 }
214
215 spin_lock_irqsave(&i2400m->tx_lock, flags);
216 i2400mu->tx_kthread = NULL;
217 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
218
186 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); 219 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
187 return result; 220 return result;
188} 221}
@@ -213,11 +246,13 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
213 struct i2400m *i2400m = &i2400mu->i2400m; 246 struct i2400m *i2400m = &i2400mu->i2400m;
214 struct device *dev = &i2400mu->usb_iface->dev; 247 struct device *dev = &i2400mu->usb_iface->dev;
215 struct wimax_dev *wimax_dev = &i2400m->wimax_dev; 248 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
249 struct task_struct *kthread;
216 250
217 i2400mu->tx_kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx", 251 kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
218 wimax_dev->name); 252 wimax_dev->name);
219 if (IS_ERR(i2400mu->tx_kthread)) { 253 /* the kthread function sets i2400mu->tx_thread */
220 result = PTR_ERR(i2400mu->tx_kthread); 254 if (IS_ERR(kthread)) {
255 result = PTR_ERR(kthread);
221 dev_err(dev, "TX: cannot start thread: %d\n", result); 256 dev_err(dev, "TX: cannot start thread: %d\n", result);
222 } 257 }
223 return result; 258 return result;
@@ -225,5 +260,17 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
225 260
226void i2400mu_tx_release(struct i2400mu *i2400mu) 261void i2400mu_tx_release(struct i2400mu *i2400mu)
227{ 262{
228 kthread_stop(i2400mu->tx_kthread); 263 unsigned long flags;
264 struct i2400m *i2400m = &i2400mu->i2400m;
265 struct device *dev = i2400m_dev(i2400m);
266 struct task_struct *kthread;
267
268 spin_lock_irqsave(&i2400m->tx_lock, flags);
269 kthread = i2400mu->tx_kthread;
270 i2400mu->tx_kthread = NULL;
271 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
272 if (kthread)
273 kthread_stop(kthread);
274 else
275 d_printf(1, dev, "TX: kthread had already exited\n");
229} 276}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 7eadd11c815b..47e84ef355c5 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -58,7 +58,7 @@
58 * i2400mu_rx_release() 58 * i2400mu_rx_release()
59 * i2400mu_tx_release() 59 * i2400mu_tx_release()
60 * 60 *
61 * i2400mu_bus_reset() Called by i2400m->bus_reset 61 * i2400mu_bus_reset() Called by i2400m_reset
62 * __i2400mu_reset() 62 * __i2400mu_reset()
63 * __i2400mu_send_barker() 63 * __i2400mu_send_barker()
64 * usb_reset_device() 64 * usb_reset_device()
@@ -71,13 +71,25 @@
71#define D_SUBMODULE usb 71#define D_SUBMODULE usb
72#include "usb-debug-levels.h" 72#include "usb-debug-levels.h"
73 73
74static char i2400mu_debug_params[128];
75module_param_string(debug, i2400mu_debug_params, sizeof(i2400mu_debug_params),
76 0644);
77MODULE_PARM_DESC(debug,
78 "String of space-separated NAME:VALUE pairs, where NAMEs "
79 "are the different debug submodules and VALUE are the "
80 "initial debug value to set.");
74 81
75/* Our firmware file name */ 82/* Our firmware file name */
76static const char *i2400mu_bus_fw_names[] = { 83static const char *i2400mu_bus_fw_names_5x50[] = {
77#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf" 84#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
78 I2400MU_FW_FILE_NAME_v1_4, 85 I2400MU_FW_FILE_NAME_v1_4,
79#define I2400MU_FW_FILE_NAME_v1_3 "i2400m-fw-usb-1.3.sbcf" 86 NULL,
80 I2400MU_FW_FILE_NAME_v1_3, 87};
88
89
90static const char *i2400mu_bus_fw_names_6050[] = {
91#define I6050U_FW_FILE_NAME_v1_5 "i6050-fw-usb-1.5.sbcf"
92 I6050U_FW_FILE_NAME_v1_5,
81 NULL, 93 NULL,
82}; 94};
83 95
@@ -160,14 +172,59 @@ int __i2400mu_send_barker(struct i2400mu *i2400mu,
160 epd = usb_get_epd(i2400mu->usb_iface, endpoint); 172 epd = usb_get_epd(i2400mu->usb_iface, endpoint);
161 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 173 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
162 memcpy(buffer, barker, barker_size); 174 memcpy(buffer, barker, barker_size);
175retry:
163 ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size, 176 ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
164 &actual_len, HZ); 177 &actual_len, 200);
165 if (ret < 0) { 178 switch (ret) {
166 if (ret != -EINVAL) 179 case 0:
167 dev_err(dev, "E: barker error: %d\n", ret); 180 if (actual_len != barker_size) { /* Too short? drop it */
168 } else if (actual_len != barker_size) { 181 dev_err(dev, "E: %s: short write (%d B vs %zu "
169 dev_err(dev, "E: only %d bytes transmitted\n", actual_len); 182 "expected)\n",
170 ret = -EIO; 183 __func__, actual_len, barker_size);
184 ret = -EIO;
185 }
186 break;
187 case -EPIPE:
188 /*
189 * Stall -- maybe the device is choking with our
190 * requests. Clear it and give it some time. If they
191 * happen to often, it might be another symptom, so we
192 * reset.
193 *
194 * No error handling for usb_clear_halt(0; if it
195 * works, the retry works; if it fails, this switch
196 * does the error handling for us.
197 */
198 if (edc_inc(&i2400mu->urb_edc,
199 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
200 dev_err(dev, "E: %s: too many stalls in "
201 "URB; resetting device\n", __func__);
202 usb_queue_reset_device(i2400mu->usb_iface);
203 /* fallthrough */
204 } else {
205 usb_clear_halt(i2400mu->usb_dev, pipe);
206 msleep(10); /* give the device some time */
207 goto retry;
208 }
209 case -EINVAL: /* while removing driver */
210 case -ENODEV: /* dev disconnect ... */
211 case -ENOENT: /* just ignore it */
212 case -ESHUTDOWN: /* and exit */
213 case -ECONNRESET:
214 ret = -ESHUTDOWN;
215 break;
216 default: /* Some error? */
217 if (edc_inc(&i2400mu->urb_edc,
218 EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
219 dev_err(dev, "E: %s: maximum errors in URB "
220 "exceeded; resetting device\n",
221 __func__);
222 usb_queue_reset_device(i2400mu->usb_iface);
223 } else {
224 dev_warn(dev, "W: %s: cannot send URB: %d\n",
225 __func__, ret);
226 goto retry;
227 }
171 } 228 }
172 kfree(buffer); 229 kfree(buffer);
173error_kzalloc: 230error_kzalloc:
@@ -232,15 +289,16 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
232 289
233 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); 290 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
234 if (rt == I2400M_RT_WARM) 291 if (rt == I2400M_RT_WARM)
235 result = __i2400mu_send_barker(i2400mu, i2400m_WARM_BOOT_BARKER, 292 result = __i2400mu_send_barker(
236 sizeof(i2400m_WARM_BOOT_BARKER), 293 i2400mu, i2400m_WARM_BOOT_BARKER,
237 I2400MU_EP_BULK_OUT); 294 sizeof(i2400m_WARM_BOOT_BARKER),
295 i2400mu->endpoint_cfg.bulk_out);
238 else if (rt == I2400M_RT_COLD) 296 else if (rt == I2400M_RT_COLD)
239 result = __i2400mu_send_barker(i2400mu, i2400m_COLD_BOOT_BARKER, 297 result = __i2400mu_send_barker(
240 sizeof(i2400m_COLD_BOOT_BARKER), 298 i2400mu, i2400m_COLD_BOOT_BARKER,
241 I2400MU_EP_RESET_COLD); 299 sizeof(i2400m_COLD_BOOT_BARKER),
300 i2400mu->endpoint_cfg.reset_cold);
242 else if (rt == I2400M_RT_BUS) { 301 else if (rt == I2400M_RT_BUS) {
243do_bus_reset:
244 result = usb_reset_device(i2400mu->usb_dev); 302 result = usb_reset_device(i2400mu->usb_dev);
245 switch (result) { 303 switch (result) {
246 case 0: 304 case 0:
@@ -248,7 +306,7 @@ do_bus_reset:
248 case -ENODEV: 306 case -ENODEV:
249 case -ENOENT: 307 case -ENOENT:
250 case -ESHUTDOWN: 308 case -ESHUTDOWN:
251 result = rt == I2400M_RT_WARM ? -ENODEV : 0; 309 result = 0;
252 break; /* We assume the device is disconnected */ 310 break; /* We assume the device is disconnected */
253 default: 311 default:
254 dev_err(dev, "USB reset failed (%d), giving up!\n", 312 dev_err(dev, "USB reset failed (%d), giving up!\n",
@@ -261,10 +319,17 @@ do_bus_reset:
261 if (result < 0 319 if (result < 0
262 && result != -EINVAL /* device is gone */ 320 && result != -EINVAL /* device is gone */
263 && rt != I2400M_RT_BUS) { 321 && rt != I2400M_RT_BUS) {
322 /*
323 * Things failed -- resort to lower level reset, that
324 * we queue in another context; the reason for this is
325 * that the pre and post reset functionality requires
326 * the i2400m->init_mutex; RT_WARM and RT_COLD can
327 * come from areas where i2400m->init_mutex is taken.
328 */
264 dev_err(dev, "%s reset failed (%d); trying USB reset\n", 329 dev_err(dev, "%s reset failed (%d); trying USB reset\n",
265 rt == I2400M_RT_WARM ? "warm" : "cold", result); 330 rt == I2400M_RT_WARM ? "warm" : "cold", result);
266 rt = I2400M_RT_BUS; 331 usb_queue_reset_device(i2400mu->usb_iface);
267 goto do_bus_reset; 332 result = -ENODEV;
268 } 333 }
269 d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result); 334 d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
270 return result; 335 return result;
@@ -402,20 +467,33 @@ int i2400mu_probe(struct usb_interface *iface,
402 467
403 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE; 468 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
404 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX; 469 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
470 i2400m->bus_setup = NULL;
405 i2400m->bus_dev_start = i2400mu_bus_dev_start; 471 i2400m->bus_dev_start = i2400mu_bus_dev_start;
406 i2400m->bus_dev_stop = i2400mu_bus_dev_stop; 472 i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
473 i2400m->bus_release = NULL;
407 i2400m->bus_tx_kick = i2400mu_bus_tx_kick; 474 i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
408 i2400m->bus_reset = i2400mu_bus_reset; 475 i2400m->bus_reset = i2400mu_bus_reset;
409 i2400m->bus_bm_retries = I2400M_BOOT_RETRIES; 476 i2400m->bus_bm_retries = I2400M_USB_BOOT_RETRIES;
410 i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send; 477 i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
411 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
412 i2400m->bus_fw_names = i2400mu_bus_fw_names;
413 i2400m->bus_bm_mac_addr_impaired = 0; 479 i2400m->bus_bm_mac_addr_impaired = 0;
414 480
481 if (id->idProduct == USB_DEVICE_ID_I6050) {
482 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
483 i2400mu->endpoint_cfg.bulk_out = 0;
484 i2400mu->endpoint_cfg.notification = 3;
485 i2400mu->endpoint_cfg.reset_cold = 2;
486 i2400mu->endpoint_cfg.bulk_in = 1;
487 } else {
488 i2400m->bus_fw_names = i2400mu_bus_fw_names_5x50;
489 i2400mu->endpoint_cfg.bulk_out = 0;
490 i2400mu->endpoint_cfg.notification = 1;
491 i2400mu->endpoint_cfg.reset_cold = 2;
492 i2400mu->endpoint_cfg.bulk_in = 3;
493 }
415#ifdef CONFIG_PM 494#ifdef CONFIG_PM
416 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 495 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
417 device_init_wakeup(dev, 1); 496 device_init_wakeup(dev, 1);
418 usb_autopm_enable(i2400mu->usb_iface);
419 usb_dev->autosuspend_delay = 15 * HZ; 497 usb_dev->autosuspend_delay = 15 * HZ;
420 usb_dev->autosuspend_disabled = 0; 498 usb_dev->autosuspend_disabled = 0;
421#endif 499#endif
@@ -483,7 +561,10 @@ void i2400mu_disconnect(struct usb_interface *iface)
483 * So at the end, the three cases require common handling. 561 * So at the end, the three cases require common handling.
484 * 562 *
485 * If at the time of this call the device's firmware is not loaded, 563 * If at the time of this call the device's firmware is not loaded,
486 * nothing has to be done. 564 * nothing has to be done. Note we can be "loose" about not reading
565 * i2400m->updown under i2400m->init_mutex. If it happens to change
566 * inmediately, other parts of the call flow will fail and effectively
567 * catch it.
487 * 568 *
488 * If the firmware is loaded, we need to: 569 * If the firmware is loaded, we need to:
489 * 570 *
@@ -522,6 +603,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
522#endif 603#endif
523 604
524 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); 605 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
606 rmb(); /* see i2400m->updown's documentation */
525 if (i2400m->updown == 0) 607 if (i2400m->updown == 0)
526 goto no_firmware; 608 goto no_firmware;
527 if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) { 609 if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
@@ -575,6 +657,7 @@ int i2400mu_resume(struct usb_interface *iface)
575 struct i2400m *i2400m = &i2400mu->i2400m; 657 struct i2400m *i2400m = &i2400mu->i2400m;
576 658
577 d_fnstart(3, dev, "(iface %p)\n", iface); 659 d_fnstart(3, dev, "(iface %p)\n", iface);
660 rmb(); /* see i2400m->updown's documentation */
578 if (i2400m->updown == 0) { 661 if (i2400m->updown == 0) {
579 d_printf(1, dev, "fw was down, no resume neeed\n"); 662 d_printf(1, dev, "fw was down, no resume neeed\n");
580 goto out; 663 goto out;
@@ -591,7 +674,54 @@ out:
591 674
592 675
593static 676static
677int i2400mu_reset_resume(struct usb_interface *iface)
678{
679 int result;
680 struct device *dev = &iface->dev;
681 struct i2400mu *i2400mu = usb_get_intfdata(iface);
682 struct i2400m *i2400m = &i2400mu->i2400m;
683
684 d_fnstart(3, dev, "(iface %p)\n", iface);
685 result = i2400m_dev_reset_handle(i2400m, "device reset on resume");
686 d_fnend(3, dev, "(iface %p) = %d\n", iface, result);
687 return result < 0 ? result : 0;
688}
689
690
691/*
692 * Another driver or user space is triggering a reset on the device
693 * which contains the interface passed as an argument. Cease IO and
694 * save any device state you need to restore.
695 *
696 * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
697 * you are in atomic context.
698 */
699static
700int i2400mu_pre_reset(struct usb_interface *iface)
701{
702 struct i2400mu *i2400mu = usb_get_intfdata(iface);
703 return i2400m_pre_reset(&i2400mu->i2400m);
704}
705
706
707/*
708 * The reset has completed. Restore any saved device state and begin
709 * using the device again.
710 *
711 * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
712 * you are in atomic context.
713 */
714static
715int i2400mu_post_reset(struct usb_interface *iface)
716{
717 struct i2400mu *i2400mu = usb_get_intfdata(iface);
718 return i2400m_post_reset(&i2400mu->i2400m);
719}
720
721
722static
594struct usb_device_id i2400mu_id_table[] = { 723struct usb_device_id i2400mu_id_table[] = {
724 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
595 { USB_DEVICE(0x8086, 0x0181) }, 725 { USB_DEVICE(0x8086, 0x0181) },
596 { USB_DEVICE(0x8086, 0x1403) }, 726 { USB_DEVICE(0x8086, 0x1403) },
597 { USB_DEVICE(0x8086, 0x1405) }, 727 { USB_DEVICE(0x8086, 0x1405) },
@@ -609,8 +739,11 @@ struct usb_driver i2400mu_driver = {
609 .name = KBUILD_MODNAME, 739 .name = KBUILD_MODNAME,
610 .suspend = i2400mu_suspend, 740 .suspend = i2400mu_suspend,
611 .resume = i2400mu_resume, 741 .resume = i2400mu_resume,
742 .reset_resume = i2400mu_reset_resume,
612 .probe = i2400mu_probe, 743 .probe = i2400mu_probe,
613 .disconnect = i2400mu_disconnect, 744 .disconnect = i2400mu_disconnect,
745 .pre_reset = i2400mu_pre_reset,
746 .post_reset = i2400mu_post_reset,
614 .id_table = i2400mu_id_table, 747 .id_table = i2400mu_id_table,
615 .supports_autosuspend = 1, 748 .supports_autosuspend = 1,
616}; 749};
@@ -618,6 +751,8 @@ struct usb_driver i2400mu_driver = {
618static 751static
619int __init i2400mu_driver_init(void) 752int __init i2400mu_driver_init(void)
620{ 753{
754 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400mu_debug_params,
755 "i2400m_usb.debug");
621 return usb_register(&i2400mu_driver); 756 return usb_register(&i2400mu_driver);
622} 757}
623module_init(i2400mu_driver_init); 758module_init(i2400mu_driver_init);
@@ -632,7 +767,7 @@ void __exit i2400mu_driver_exit(void)
632module_exit(i2400mu_driver_exit); 767module_exit(i2400mu_driver_exit);
633 768
634MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); 769MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
635MODULE_DESCRIPTION("Intel 2400M WiMAX networking for USB"); 770MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
771 "(5x50 & 6050)");
636MODULE_LICENSE("GPL"); 772MODULE_LICENSE("GPL");
637MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4); 773MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4);
638MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_3);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index d7a764a2fc1a..56dd6650c97a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
5menuconfig WLAN 5menuconfig WLAN
6 bool "Wireless LAN" 6 bool "Wireless LAN"
7 depends on !S390 7 depends on !S390
8 select WIRELESS
8 default y 9 default y
9 ---help--- 10 ---help---
10 This section contains all the pre 802.11 and 802.11 wireless 11 This section contains all the pre 802.11 and 802.11 wireless
@@ -15,114 +16,12 @@ menuconfig WLAN
15 16
16if WLAN 17if WLAN
17 18
18menuconfig WLAN_PRE80211
19 bool "Wireless LAN (pre-802.11)"
20 depends on NETDEVICES
21 ---help---
22 Say Y if you have any pre-802.11 wireless LAN hardware.
23
24 This option does not affect the kernel build, it only
25 lets you choose drivers.
26
27config STRIP
28 tristate "STRIP (Metricom starmode radio IP)"
29 depends on INET && WLAN_PRE80211
30 select WIRELESS_EXT
31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 to send Internet traffic using Metricom radios. Metricom radios are
35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 the size and weight of a cellular telephone. (You may also have heard
37 them called "Metricom modems" but we avoid the term "modem" because
38 it misleads many people into thinking that you can plug a Metricom
39 modem into a phone line and use it as a modem.)
40
41 You can use STRIP on any Linux machine with a serial port, although
42 it is obviously most useful for people with laptop computers. If you
43 think you might get a Metricom radio in the future, there is no harm
44 in saying Y to STRIP now, except that it makes the kernel a bit
45 bigger.
46
47 To compile this as a module, choose M here: the module will be
48 called strip.
49
50config ARLAN
51 tristate "Aironet Arlan 655 & IC2200 DS support"
52 depends on ISA && !64BIT && WLAN_PRE80211
53 select WIRELESS_EXT
54 ---help---
55 Aironet makes Arlan, a class of wireless LAN adapters. These use the
56 www.Telxon.com chip, which is also used on several similar cards.
57 This driver is tested on the 655 and IC2200 series cards. Look at
58 <http://www.ylenurme.ee/~elmer/655/> for the latest information.
59
60 The driver is built as two modules, arlan and arlan-proc. The latter
61 is the /proc interface and is not needed most of time.
62
63 On some computers the card ends up in non-valid state after some
64 time. Use a ping-reset script to clear it.
65
66config WAVELAN
67 tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
68 depends on ISA && WLAN_PRE80211
69 select WIRELESS_EXT
70 ---help---
71 The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
72 a Radio LAN (wireless Ethernet-like Local Area Network) using the
73 radio frequencies 900 MHz and 2.4 GHz.
74
75 If you want to use an ISA WaveLAN card under Linux, say Y and read
76 the Ethernet-HOWTO, available from
77 <http://www.tldp.org/docs.html#howto>. Some more specific
78 information is contained in
79 <file:Documentation/networking/wavelan.txt> and in the source code
80 <file:drivers/net/wireless/wavelan.p.h>.
81
82 You will also need the wireless tools package available from
83 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
84 Please read the man pages contained therein.
85
86 To compile this driver as a module, choose M here: the module will be
87 called wavelan.
88
89config PCMCIA_WAVELAN
90 tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
91 depends on PCMCIA && WLAN_PRE80211
92 select WIRELESS_EXT
93 help
94 Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
95 (PC-card) wireless Ethernet networking card to your computer. This
96 driver is for the non-IEEE-802.11 Wavelan cards.
97
98 To compile this driver as a module, choose M here: the module will be
99 called wavelan_cs. If unsure, say N.
100
101config PCMCIA_NETWAVE
102 tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
103 depends on PCMCIA && WLAN_PRE80211
104 select WIRELESS_EXT
105 help
106 Say Y here if you intend to attach this type of PCMCIA (PC-card)
107 wireless Ethernet networking card to your computer.
108
109 To compile this driver as a module, choose M here: the module will be
110 called netwave_cs. If unsure, say N.
111
112
113menuconfig WLAN_80211
114 bool "Wireless LAN (IEEE 802.11)"
115 depends on NETDEVICES
116 ---help---
117 Say Y if you have any 802.11 wireless LAN hardware.
118
119 This option does not affect the kernel build, it only
120 lets you choose drivers.
121
122config PCMCIA_RAYCS 19config PCMCIA_RAYCS
123 tristate "Aviator/Raytheon 2.4GHz wireless support" 20 tristate "Aviator/Raytheon 2.4GHz wireless support"
124 depends on PCMCIA && WLAN_80211 21 depends on PCMCIA
125 select WIRELESS_EXT 22 select WIRELESS_EXT
23 select WEXT_SPY
24 select WEXT_PRIV
126 ---help--- 25 ---help---
127 Say Y here if you intend to attach an Aviator/Raytheon PCMCIA 26 Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
128 (PC-card) wireless Ethernet networking card to your computer. 27 (PC-card) wireless Ethernet networking card to your computer.
@@ -132,49 +31,9 @@ config PCMCIA_RAYCS
132 To compile this driver as a module, choose M here: the module will be 31 To compile this driver as a module, choose M here: the module will be
133 called ray_cs. If unsure, say N. 32 called ray_cs. If unsure, say N.
134 33
135config LIBERTAS
136 tristate "Marvell 8xxx Libertas WLAN driver support"
137 depends on WLAN_80211
138 select WIRELESS_EXT
139 select LIB80211
140 select FW_LOADER
141 ---help---
142 A library for Marvell Libertas 8xxx devices.
143
144config LIBERTAS_USB
145 tristate "Marvell Libertas 8388 USB 802.11b/g cards"
146 depends on LIBERTAS && USB
147 ---help---
148 A driver for Marvell Libertas 8388 USB devices.
149
150config LIBERTAS_CS
151 tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
152 depends on LIBERTAS && PCMCIA
153 select FW_LOADER
154 ---help---
155 A driver for Marvell Libertas 8385 CompactFlash devices.
156
157config LIBERTAS_SDIO
158 tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
159 depends on LIBERTAS && MMC
160 ---help---
161 A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
162
163config LIBERTAS_SPI
164 tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
165 depends on LIBERTAS && SPI
166 ---help---
167 A driver for Marvell Libertas 8686 SPI devices.
168
169config LIBERTAS_DEBUG
170 bool "Enable full debugging output in the Libertas module."
171 depends on LIBERTAS
172 ---help---
173 Debugging support.
174
175config LIBERTAS_THINFIRM 34config LIBERTAS_THINFIRM
176 tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware" 35 tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
177 depends on WLAN_80211 && MAC80211 36 depends on MAC80211
178 select FW_LOADER 37 select FW_LOADER
179 ---help--- 38 ---help---
180 A library for Marvell Libertas 8xxx devices using thinfirm. 39 A library for Marvell Libertas 8xxx devices using thinfirm.
@@ -187,9 +46,11 @@ config LIBERTAS_THINFIRM_USB
187 46
188config AIRO 47config AIRO
189 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 48 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
190 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN) 49 depends on ISA_DMA_API && (PCI || BROKEN)
191 select WIRELESS_EXT 50 select WIRELESS_EXT
192 select CRYPTO 51 select CRYPTO
52 select WEXT_SPY
53 select WEXT_PRIV
193 ---help--- 54 ---help---
194 This is the standard Linux driver to support Cisco/Aironet ISA and 55 This is the standard Linux driver to support Cisco/Aironet ISA and
195 PCI 802.11 wireless cards. 56 PCI 802.11 wireless cards.
@@ -205,8 +66,9 @@ config AIRO
205 66
206config ATMEL 67config ATMEL
207 tristate "Atmel at76c50x chipset 802.11b support" 68 tristate "Atmel at76c50x chipset 802.11b support"
208 depends on (PCI || PCMCIA) && WLAN_80211 69 depends on (PCI || PCMCIA)
209 select WIRELESS_EXT 70 select WIRELESS_EXT
71 select WEXT_PRIV
210 select FW_LOADER 72 select FW_LOADER
211 select CRC32 73 select CRC32
212 ---help--- 74 ---help---
@@ -239,7 +101,7 @@ config PCMCIA_ATMEL
239 101
240config AT76C50X_USB 102config AT76C50X_USB
241 tristate "Atmel at76c503/at76c505/at76c505a USB cards" 103 tristate "Atmel at76c503/at76c505/at76c505a USB cards"
242 depends on MAC80211 && WLAN_80211 && USB 104 depends on MAC80211 && USB
243 select FW_LOADER 105 select FW_LOADER
244 ---help--- 106 ---help---
245 Enable support for USB Wireless devices using Atmel at76c503, 107 Enable support for USB Wireless devices using Atmel at76c503,
@@ -247,8 +109,9 @@ config AT76C50X_USB
247 109
248config AIRO_CS 110config AIRO_CS
249 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 111 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
250 depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211 112 depends on PCMCIA && (BROKEN || !M32R)
251 select WIRELESS_EXT 113 select WIRELESS_EXT
114 select WEXT_SPY
252 select CRYPTO 115 select CRYPTO
253 select CRYPTO_AES 116 select CRYPTO_AES
254 ---help--- 117 ---help---
@@ -266,18 +129,21 @@ config AIRO_CS
266 Cisco Linux utilities can be used to configure the card. 129 Cisco Linux utilities can be used to configure the card.
267 130
268config PCMCIA_WL3501 131config PCMCIA_WL3501
269 tristate "Planet WL3501 PCMCIA cards" 132 tristate "Planet WL3501 PCMCIA cards"
270 depends on EXPERIMENTAL && PCMCIA && WLAN_80211 133 depends on EXPERIMENTAL && PCMCIA
271 select WIRELESS_EXT 134 select WIRELESS_EXT
272 ---help--- 135 select WEXT_SPY
273 A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. 136 help
274 It has basic support for Linux wireless extensions and initial 137 A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
275 micro support for ethtool. 138 It has basic support for Linux wireless extensions and initial
139 micro support for ethtool.
276 140
277config PRISM54 141config PRISM54
278 tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)' 142 tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
279 depends on PCI && EXPERIMENTAL && WLAN_80211 143 depends on PCI && EXPERIMENTAL
280 select WIRELESS_EXT 144 select WIRELESS_EXT
145 select WEXT_SPY
146 select WEXT_PRIV
281 select FW_LOADER 147 select FW_LOADER
282 ---help--- 148 ---help---
283 This enables support for FullMAC PCI/Cardbus prism54 devices. This 149 This enables support for FullMAC PCI/Cardbus prism54 devices. This
@@ -298,8 +164,9 @@ config PRISM54
298 164
299config USB_ZD1201 165config USB_ZD1201
300 tristate "USB ZD1201 based Wireless device support" 166 tristate "USB ZD1201 based Wireless device support"
301 depends on USB && WLAN_80211 167 depends on USB
302 select WIRELESS_EXT 168 select WIRELESS_EXT
169 select WEXT_PRIV
303 select FW_LOADER 170 select FW_LOADER
304 ---help--- 171 ---help---
305 Say Y if you want to use wireless LAN adapters based on the ZyDAS 172 Say Y if you want to use wireless LAN adapters based on the ZyDAS
@@ -316,7 +183,7 @@ config USB_ZD1201
316 183
317config USB_NET_RNDIS_WLAN 184config USB_NET_RNDIS_WLAN
318 tristate "Wireless RNDIS USB support" 185 tristate "Wireless RNDIS USB support"
319 depends on USB && WLAN_80211 && EXPERIMENTAL 186 depends on USB && EXPERIMENTAL
320 depends on CFG80211 187 depends on CFG80211
321 select USB_USBNET 188 select USB_USBNET
322 select USB_NET_CDCETHER 189 select USB_NET_CDCETHER
@@ -344,7 +211,7 @@ config USB_NET_RNDIS_WLAN
344 211
345config RTL8180 212config RTL8180
346 tristate "Realtek 8180/8185 PCI support" 213 tristate "Realtek 8180/8185 PCI support"
347 depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL 214 depends on MAC80211 && PCI && EXPERIMENTAL
348 select EEPROM_93CX6 215 select EEPROM_93CX6
349 ---help--- 216 ---help---
350 This is a driver for RTL8180 and RTL8185 based cards. 217 This is a driver for RTL8180 and RTL8185 based cards.
@@ -400,7 +267,7 @@ config RTL8180
400 267
401config RTL8187 268config RTL8187
402 tristate "Realtek 8187 and 8187B USB support" 269 tristate "Realtek 8187 and 8187B USB support"
403 depends on MAC80211 && USB && WLAN_80211 270 depends on MAC80211 && USB
404 select EEPROM_93CX6 271 select EEPROM_93CX6
405 ---help--- 272 ---help---
406 This is a driver for RTL8187 and RTL8187B based cards. 273 This is a driver for RTL8187 and RTL8187B based cards.
@@ -429,7 +296,7 @@ config RTL8187_LEDS
429 296
430config ADM8211 297config ADM8211
431 tristate "ADMtek ADM8211 support" 298 tristate "ADMtek ADM8211 support"
432 depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL 299 depends on MAC80211 && PCI && EXPERIMENTAL
433 select CRC32 300 select CRC32
434 select EEPROM_93CX6 301 select EEPROM_93CX6
435 ---help--- 302 ---help---
@@ -456,7 +323,7 @@ config ADM8211
456 323
457config MAC80211_HWSIM 324config MAC80211_HWSIM
458 tristate "Simulated radio testing tool for mac80211" 325 tristate "Simulated radio testing tool for mac80211"
459 depends on MAC80211 && WLAN_80211 326 depends on MAC80211
460 ---help--- 327 ---help---
461 This driver is a developer testing tool that can be used to test 328 This driver is a developer testing tool that can be used to test
462 IEEE 802.11 networking stack (mac80211) functionality. This is not 329 IEEE 802.11 networking stack (mac80211) functionality. This is not
@@ -469,24 +336,25 @@ config MAC80211_HWSIM
469 336
470config MWL8K 337config MWL8K
471 tristate "Marvell 88W8xxx PCI/PCIe Wireless support" 338 tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
472 depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL 339 depends on MAC80211 && PCI && EXPERIMENTAL
473 ---help--- 340 ---help---
474 This driver supports Marvell TOPDOG 802.11 wireless cards. 341 This driver supports Marvell TOPDOG 802.11 wireless cards.
475 342
476 To compile this driver as a module, choose M here: the module 343 To compile this driver as a module, choose M here: the module
477 will be called mwl8k. If unsure, say N. 344 will be called mwl8k. If unsure, say N.
478 345
479source "drivers/net/wireless/p54/Kconfig"
480source "drivers/net/wireless/ath/Kconfig" 346source "drivers/net/wireless/ath/Kconfig"
481source "drivers/net/wireless/ipw2x00/Kconfig"
482source "drivers/net/wireless/iwlwifi/Kconfig"
483source "drivers/net/wireless/hostap/Kconfig"
484source "drivers/net/wireless/b43/Kconfig" 347source "drivers/net/wireless/b43/Kconfig"
485source "drivers/net/wireless/b43legacy/Kconfig" 348source "drivers/net/wireless/b43legacy/Kconfig"
486source "drivers/net/wireless/zd1211rw/Kconfig" 349source "drivers/net/wireless/hostap/Kconfig"
487source "drivers/net/wireless/rt2x00/Kconfig" 350source "drivers/net/wireless/ipw2x00/Kconfig"
351source "drivers/net/wireless/iwlwifi/Kconfig"
352source "drivers/net/wireless/iwmc3200wifi/Kconfig"
353source "drivers/net/wireless/libertas/Kconfig"
488source "drivers/net/wireless/orinoco/Kconfig" 354source "drivers/net/wireless/orinoco/Kconfig"
355source "drivers/net/wireless/p54/Kconfig"
356source "drivers/net/wireless/rt2x00/Kconfig"
489source "drivers/net/wireless/wl12xx/Kconfig" 357source "drivers/net/wireless/wl12xx/Kconfig"
490source "drivers/net/wireless/iwmc3200wifi/Kconfig" 358source "drivers/net/wireless/zd1211rw/Kconfig"
491 359
492endif # WLAN 360endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7a4647e78fd3..5d4ce4d2b32b 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -5,16 +5,6 @@
5obj-$(CONFIG_IPW2100) += ipw2x00/ 5obj-$(CONFIG_IPW2100) += ipw2x00/
6obj-$(CONFIG_IPW2200) += ipw2x00/ 6obj-$(CONFIG_IPW2200) += ipw2x00/
7 7
8obj-$(CONFIG_STRIP) += strip.o
9obj-$(CONFIG_ARLAN) += arlan.o
10
11arlan-objs := arlan-main.o arlan-proc.o
12
13# Obsolete cards
14obj-$(CONFIG_WAVELAN) += wavelan.o
15obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
16obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
17
18obj-$(CONFIG_HERMES) += orinoco/ 8obj-$(CONFIG_HERMES) += orinoco/
19 9
20obj-$(CONFIG_AIRO) += airo.o 10obj-$(CONFIG_AIRO) += airo.o
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index b80f514877d8..39410016b4ff 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1538,7 +1538,7 @@ static int adm8211_start(struct ieee80211_hw *dev)
1538 adm8211_hw_init(dev); 1538 adm8211_hw_init(dev);
1539 adm8211_rf_set_channel(dev, priv->channel); 1539 adm8211_rf_set_channel(dev, priv->channel);
1540 1540
1541 retval = request_irq(priv->pdev->irq, &adm8211_interrupt, 1541 retval = request_irq(priv->pdev->irq, adm8211_interrupt,
1542 IRQF_SHARED, "adm8211", dev); 1542 IRQF_SHARED, "adm8211", dev);
1543 if (retval) { 1543 if (retval) {
1544 printk(KERN_ERR "%s: failed to register IRQ handler\n", 1544 printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index abf896a7390e..4eec87c3be2b 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5659,7 +5659,8 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5659 5659
5660 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 5660 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
5661 pci_save_state(pdev); 5661 pci_save_state(pdev);
5662 return pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5662 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5663 return 0;
5663} 5664}
5664 5665
5665static int airo_pci_resume(struct pci_dev *pdev) 5666static int airo_pci_resume(struct pci_dev *pdev)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8e1a55dec351..2517364d3ebe 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -121,6 +121,14 @@ static struct fwentry firmwares[] = {
121 [BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" }, 121 [BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" },
122 [BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" }, 122 [BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" },
123}; 123};
124MODULE_FIRMWARE("atmel_at76c503-i3861.bin");
125MODULE_FIRMWARE("atmel_at76c503-i3863.bin");
126MODULE_FIRMWARE("atmel_at76c503-rfmd.bin");
127MODULE_FIRMWARE("atmel_at76c503-rfmd-acc.bin");
128MODULE_FIRMWARE("atmel_at76c505-rfmd.bin");
129MODULE_FIRMWARE("atmel_at76c505-rfmd2958.bin");
130MODULE_FIRMWARE("atmel_at76c505a-rfmd2958.bin");
131MODULE_FIRMWARE("atmel_at76c505amx-rfmd.bin");
124 132
125#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) 133#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
126 134
@@ -524,20 +532,6 @@ static char *hex2str(void *buf, int len)
524 return ret; 532 return ret;
525} 533}
526 534
527#define MAC2STR_BUFFERS 4
528
529static inline char *mac2str(u8 *mac)
530{
531 static atomic_t a = ATOMIC_INIT(0);
532 static char bufs[MAC2STR_BUFFERS][6 * 3];
533 char *str;
534
535 str = bufs[atomic_inc_return(&a) & (MAC2STR_BUFFERS - 1)];
536 sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x",
537 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
538 return str;
539}
540
541/* LED trigger */ 535/* LED trigger */
542static int tx_activity; 536static int tx_activity;
543static void at76_ledtrig_tx_timerfunc(unsigned long data); 537static void at76_ledtrig_tx_timerfunc(unsigned long data);
@@ -973,13 +967,13 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
973 goto exit; 967 goto exit;
974 } 968 }
975 969
976 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %s res 0x%x 0x%x", 970 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %pM res 0x%x 0x%x",
977 wiphy_name(priv->hw->wiphy), 971 wiphy_name(priv->hw->wiphy),
978 mac2str(m->mac_addr), m->res[0], m->res[1]); 972 m->mac_addr, m->res[0], m->res[1]);
979 for (i = 0; i < ARRAY_SIZE(m->group_addr); i++) 973 for (i = 0; i < ARRAY_SIZE(m->group_addr); i++)
980 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %s, " 974 at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %pM, "
981 "status %d", wiphy_name(priv->hw->wiphy), i, 975 "status %d", wiphy_name(priv->hw->wiphy), i,
982 mac2str(m->group_addr[i]), m->group_addr_status[i]); 976 m->group_addr[i], m->group_addr_status[i]);
983exit: 977exit:
984 kfree(m); 978 kfree(m);
985} 979}
@@ -1042,7 +1036,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1042 at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration " 1036 at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
1043 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d " 1037 "%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
1044 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d " 1038 "CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
1045 "current_bssid %s current_essid %s current_bss_type %d " 1039 "current_bssid %pM current_essid %s current_bss_type %d "
1046 "pm_mode %d ibss_change %d res %d " 1040 "pm_mode %d ibss_change %d res %d "
1047 "multi_domain_capability_implemented %d " 1041 "multi_domain_capability_implemented %d "
1048 "international_roaming %d country_string %.3s", 1042 "international_roaming %d country_string %.3s",
@@ -1051,7 +1045,7 @@ static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
1051 le16_to_cpu(m->medium_occupancy_limit), 1045 le16_to_cpu(m->medium_occupancy_limit),
1052 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window), 1046 le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
1053 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period, 1047 m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
1054 m->CFP_period, mac2str(m->current_bssid), 1048 m->CFP_period, m->current_bssid,
1055 hex2str(m->current_essid, IW_ESSID_MAX_SIZE), 1049 hex2str(m->current_essid, IW_ESSID_MAX_SIZE),
1056 m->current_bss_type, m->power_mgmt_mode, m->ibss_change, 1050 m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
1057 m->res, m->multi_domain_capability_implemented, 1051 m->res, m->multi_domain_capability_implemented,
@@ -1080,7 +1074,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1080 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d " 1074 "cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
1081 "scan_type %d scan_channel %d probe_delay %u " 1075 "scan_type %d scan_channel %d probe_delay %u "
1082 "min_channel_time %d max_channel_time %d listen_int %d " 1076 "min_channel_time %d max_channel_time %d listen_int %d "
1083 "desired_ssid %s desired_bssid %s desired_bsstype %d", 1077 "desired_ssid %s desired_bssid %pM desired_bsstype %d",
1084 wiphy_name(priv->hw->wiphy), 1078 wiphy_name(priv->hw->wiphy),
1085 le32_to_cpu(m->max_tx_msdu_lifetime), 1079 le32_to_cpu(m->max_tx_msdu_lifetime),
1086 le32_to_cpu(m->max_rx_lifetime), 1080 le32_to_cpu(m->max_rx_lifetime),
@@ -1092,7 +1086,7 @@ static void at76_dump_mib_mac(struct at76_priv *priv)
1092 le16_to_cpu(m->max_channel_time), 1086 le16_to_cpu(m->max_channel_time),
1093 le16_to_cpu(m->listen_interval), 1087 le16_to_cpu(m->listen_interval),
1094 hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE), 1088 hex2str(m->desired_ssid, IW_ESSID_MAX_SIZE),
1095 mac2str(m->desired_bssid), m->desired_bsstype); 1089 m->desired_bssid, m->desired_bsstype);
1096exit: 1090exit:
1097 kfree(m); 1091 kfree(m);
1098} 1092}
@@ -1194,6 +1188,9 @@ static int at76_start_monitor(struct at76_priv *priv)
1194 scan.channel = priv->channel; 1188 scan.channel = priv->channel;
1195 scan.scan_type = SCAN_TYPE_PASSIVE; 1189 scan.scan_type = SCAN_TYPE_PASSIVE;
1196 scan.international_scan = 0; 1190 scan.international_scan = 0;
1191 scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
1192 scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
1193 scan.probe_delay = cpu_to_le16(0);
1197 1194
1198 ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan)); 1195 ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
1199 if (ret >= 0) 1196 if (ret >= 0)
@@ -2217,6 +2214,8 @@ static struct ieee80211_supported_band at76_supported_band = {
2217static int at76_init_new_device(struct at76_priv *priv, 2214static int at76_init_new_device(struct at76_priv *priv,
2218 struct usb_interface *interface) 2215 struct usb_interface *interface)
2219{ 2216{
2217 struct wiphy *wiphy;
2218 size_t len;
2220 int ret; 2219 int ret;
2221 2220
2222 /* set up the endpoint information */ 2221 /* set up the endpoint information */
@@ -2254,6 +2253,7 @@ static int at76_init_new_device(struct at76_priv *priv,
2254 priv->device_unplugged = 0; 2253 priv->device_unplugged = 0;
2255 2254
2256 /* mac80211 initialisation */ 2255 /* mac80211 initialisation */
2256 wiphy = priv->hw->wiphy;
2257 priv->hw->wiphy->max_scan_ssids = 1; 2257 priv->hw->wiphy->max_scan_ssids = 1;
2258 priv->hw->wiphy->max_scan_ie_len = 0; 2258 priv->hw->wiphy->max_scan_ie_len = 0;
2259 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 2259 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
@@ -2265,6 +2265,13 @@ static int at76_init_new_device(struct at76_priv *priv,
2265 SET_IEEE80211_DEV(priv->hw, &interface->dev); 2265 SET_IEEE80211_DEV(priv->hw, &interface->dev);
2266 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 2266 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
2267 2267
2268 len = sizeof(wiphy->fw_version);
2269 snprintf(wiphy->fw_version, len, "%d.%d.%d-%d",
2270 priv->fw_version.major, priv->fw_version.minor,
2271 priv->fw_version.patch, priv->fw_version.build);
2272
2273 wiphy->hw_version = priv->board_type;
2274
2268 ret = ieee80211_register_hw(priv->hw); 2275 ret = ieee80211_register_hw(priv->hw);
2269 if (ret) { 2276 if (ret) {
2270 printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n", 2277 printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n",
@@ -2274,9 +2281,9 @@ static int at76_init_new_device(struct at76_priv *priv,
2274 2281
2275 priv->mac80211_registered = 1; 2282 priv->mac80211_registered = 1;
2276 2283
2277 printk(KERN_INFO "%s: USB %s, MAC %s, firmware %d.%d.%d-%d\n", 2284 printk(KERN_INFO "%s: USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
2278 wiphy_name(priv->hw->wiphy), 2285 wiphy_name(priv->hw->wiphy),
2279 dev_name(&interface->dev), mac2str(priv->mac_addr), 2286 dev_name(&interface->dev), priv->mac_addr,
2280 priv->fw_version.major, priv->fw_version.minor, 2287 priv->fw_version.major, priv->fw_version.minor,
2281 priv->fw_version.patch, priv->fw_version.build); 2288 priv->fw_version.patch, priv->fw_version.build);
2282 printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n", 2289 printk(KERN_INFO "%s: regulatory domain 0x%02x: %s\n",
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 11ded150b932..4e7a7fd695c8 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,6 +1,5 @@
1menuconfig ATH_COMMON 1menuconfig ATH_COMMON
2 tristate "Atheros Wireless Cards" 2 tristate "Atheros Wireless Cards"
3 depends on WLAN_80211
4 depends on CFG80211 3 depends on CFG80211
5 ---help--- 4 ---help---
6 This will enable the support for the Atheros wireless drivers. 5 This will enable the support for the Atheros wireless drivers.
@@ -16,7 +15,15 @@ menuconfig ATH_COMMON
16 http://wireless.kernel.org/en/users/Drivers/Atheros 15 http://wireless.kernel.org/en/users/Drivers/Atheros
17 16
18if ATH_COMMON 17if ATH_COMMON
18
19config ATH_DEBUG
20 bool "Atheros wireless debugging"
21 ---help---
22 Say Y, if you want to debug atheros wireless drivers.
23 Right now only ath9k makes use of this.
24
19source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
20source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
21source "drivers/net/wireless/ath/ar9170/Kconfig" 27source "drivers/net/wireless/ath/ar9170/Kconfig"
28
22endif 29endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 4bb0132ada37..8113a5042afa 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,11 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_AR9170_USB) += ar9170/ 3obj-$(CONFIG_AR9170_USB) += ar9170/
4 4
5obj-$(CONFIG_ATH_COMMON) += ath.o 5obj-$(CONFIG_ATH_COMMON) += ath.o
6ath-objs := main.o regd.o 6
7ath-objs := main.o \
8 regd.o \
9 hw.o
10
11ath-$(CONFIG_ATH_DEBUG) += debug.o
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index 05918f1e685a..d7a4799d20fb 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,6 +1,6 @@
1config AR9170_USB 1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support" 2 tristate "Atheros AR9170 802.11n USB support"
3 depends on USB && MAC80211 && WLAN_80211 3 depends on USB && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 help 5 help
6 This is a driver for the Atheros "otus" 802.11n USB devices. 6 This is a driver for the Atheros "otus" 802.11n USB devices.
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 914e4718a9a8..9f9459860d82 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -172,8 +172,6 @@ struct ar9170 {
172 172
173 /* interface mode settings */ 173 /* interface mode settings */
174 struct ieee80211_vif *vif; 174 struct ieee80211_vif *vif;
175 u8 mac_addr[ETH_ALEN];
176 u8 bssid[ETH_ALEN];
177 175
178 /* beaconing */ 176 /* beaconing */
179 struct sk_buff *beacon; 177 struct sk_buff *beacon;
@@ -204,6 +202,8 @@ struct ar9170 {
204 u8 power_2G_ht20[8]; 202 u8 power_2G_ht20[8];
205 u8 power_2G_ht40[8]; 203 u8 power_2G_ht40[8];
206 204
205 u8 phy_heavy_clip;
206
207#ifdef CONFIG_AR9170_LEDS 207#ifdef CONFIG_AR9170_LEDS
208 struct delayed_work led_work; 208 struct delayed_work led_work;
209 struct ar9170_led leds[AR9170_NUM_LEDS]; 209 struct ar9170_led leds[AR9170_NUM_LEDS];
@@ -231,7 +231,7 @@ struct ar9170 {
231 struct sk_buff_head tx_status_ampdu; 231 struct sk_buff_head tx_status_ampdu;
232 spinlock_t tx_ampdu_list_lock; 232 spinlock_t tx_ampdu_list_lock;
233 struct list_head tx_ampdu_list; 233 struct list_head tx_ampdu_list;
234 unsigned int tx_ampdu_pending; 234 atomic_t tx_ampdu_pending;
235 235
236 /* rxstream mpdu merge */ 236 /* rxstream mpdu merge */
237 struct ar9170_rxstream_mpdu_merge rx_mpdu; 237 struct ar9170_rxstream_mpdu_merge rx_mpdu;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index f57a6200167b..cf6f5c4174a6 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -72,8 +72,7 @@ int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
72 return err; 72 return err;
73} 73}
74 74
75static int ar9170_read_mreg(struct ar9170 *ar, int nregs, 75int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out)
76 const u32 *regs, u32 *out)
77{ 76{
78 int i, err; 77 int i, err;
79 __le32 *offs, *res; 78 __le32 *offs, *res;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index a4f0e50e52b4..826c45e6b274 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -44,6 +44,7 @@
44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len); 44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val); 45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val); 46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
47int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out);
47int ar9170_echo_test(struct ar9170 *ar, u32 v); 48int ar9170_echo_test(struct ar9170 *ar, u32 v);
48 49
49/* 50/*
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 6cbfb2f83391..701ddb7d8400 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -152,14 +152,14 @@ enum ar9170_cmd {
152#define AR9170_MAC_REG_FTF_BIT14 BIT(14) 152#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
153#define AR9170_MAC_REG_FTF_BIT15 BIT(15) 153#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
154#define AR9170_MAC_REG_FTF_BAR BIT(24) 154#define AR9170_MAC_REG_FTF_BAR BIT(24)
155#define AR9170_MAC_REG_FTF_BIT25 BIT(25) 155#define AR9170_MAC_REG_FTF_BA BIT(25)
156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26) 156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
157#define AR9170_MAC_REG_FTF_RTS BIT(27) 157#define AR9170_MAC_REG_FTF_RTS BIT(27)
158#define AR9170_MAC_REG_FTF_CTS BIT(28) 158#define AR9170_MAC_REG_FTF_CTS BIT(28)
159#define AR9170_MAC_REG_FTF_ACK BIT(29) 159#define AR9170_MAC_REG_FTF_ACK BIT(29)
160#define AR9170_MAC_REG_FTF_CFE BIT(30) 160#define AR9170_MAC_REG_FTF_CFE BIT(30)
161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31) 161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff 162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0700ffff
163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff 163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
164 164
165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0) 165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
@@ -311,6 +311,8 @@ struct ar9170_tx_control {
311 311
312#define AR9170_TX_PHY_SHORT_GI 0x80000000 312#define AR9170_TX_PHY_SHORT_GI 0x80000000
313 313
314#define AR5416_MAX_RATE_POWER 63
315
314struct ar9170_rx_head { 316struct ar9170_rx_head {
315 u8 plcp[12]; 317 u8 plcp[12];
316} __packed; 318} __packed;
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index 614e3218a2bc..ddc8c09dc79e 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -35,6 +35,9 @@
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */ 37 */
38
39#include <asm/unaligned.h>
40
38#include "ar9170.h" 41#include "ar9170.h"
39#include "cmd.h" 42#include "cmd.h"
40 43
@@ -227,11 +230,8 @@ static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
227 230
228 ar9170_regwrite_begin(ar); 231 ar9170_regwrite_begin(ar);
229 232
230 ar9170_regwrite(reg, 233 ar9170_regwrite(reg, get_unaligned_le32(mac));
231 (mac[3] << 24) | (mac[2] << 16) | 234 ar9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
232 (mac[1] << 8) | mac[0]);
233
234 ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
235 235
236 ar9170_regwrite_finish(); 236 ar9170_regwrite_finish();
237 237
@@ -311,13 +311,14 @@ static int ar9170_set_promiscouous(struct ar9170 *ar)
311 311
312int ar9170_set_operating_mode(struct ar9170 *ar) 312int ar9170_set_operating_mode(struct ar9170 *ar)
313{ 313{
314 struct ath_common *common = &ar->common;
314 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS; 315 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
315 u8 *mac_addr, *bssid; 316 u8 *mac_addr, *bssid;
316 int err; 317 int err;
317 318
318 if (ar->vif) { 319 if (ar->vif) {
319 mac_addr = ar->mac_addr; 320 mac_addr = common->macaddr;
320 bssid = ar->bssid; 321 bssid = common->curbssid;
321 322
322 switch (ar->vif->type) { 323 switch (ar->vif->type) {
323 case NL80211_IFTYPE_MESH_POINT: 324 case NL80211_IFTYPE_MESH_POINT:
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c1f8c69db165..f9d6db8d013e 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -414,9 +414,9 @@ static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
414 414
415 skb_queue_tail(&ar->tx_status_ampdu, skb); 415 skb_queue_tail(&ar->tx_status_ampdu, skb);
416 ar9170_tx_fake_ampdu_status(ar); 416 ar9170_tx_fake_ampdu_status(ar);
417 ar->tx_ampdu_pending--;
418 417
419 if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending) 418 if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
419 !list_empty(&ar->tx_ampdu_list))
420 ar9170_tx_ampdu(ar); 420 ar9170_tx_ampdu(ar);
421} 421}
422 422
@@ -850,6 +850,7 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
850 } 850 }
851 break; 851 break;
852 852
853 case AR9170_RX_STATUS_MODULATION_DUPOFDM:
853 case AR9170_RX_STATUS_MODULATION_OFDM: 854 case AR9170_RX_STATUS_MODULATION_OFDM:
854 switch (head->plcp[0] & 0xf) { 855 switch (head->plcp[0] & 0xf) {
855 case 0xb: 856 case 0xb:
@@ -897,8 +898,7 @@ static int ar9170_rx_mac_status(struct ar9170 *ar,
897 status->flag |= RX_FLAG_HT; 898 status->flag |= RX_FLAG_HT;
898 break; 899 break;
899 900
900 case AR9170_RX_STATUS_MODULATION_DUPOFDM: 901 default:
901 /* XXX */
902 if (ar9170_nag_limiter(ar)) 902 if (ar9170_nag_limiter(ar))
903 printk(KERN_ERR "%s: invalid modulation\n", 903 printk(KERN_ERR "%s: invalid modulation\n",
904 wiphy_name(ar->hw->wiphy)); 904 wiphy_name(ar->hw->wiphy));
@@ -1248,6 +1248,7 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
1248 ar->global_ampdu_density = 6; 1248 ar->global_ampdu_density = 6;
1249 ar->global_ampdu_factor = 3; 1249 ar->global_ampdu_factor = 3;
1250 1250
1251 atomic_set(&ar->tx_ampdu_pending, 0);
1251 ar->bad_hw_nagger = jiffies; 1252 ar->bad_hw_nagger = jiffies;
1252 1253
1253 err = ar->open(ar); 1254 err = ar->open(ar);
@@ -1773,7 +1774,7 @@ static void ar9170_tx(struct ar9170 *ar)
1773 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1774 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1774 1775
1775 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1776 ar->tx_ampdu_pending++; 1777 atomic_inc(&ar->tx_ampdu_pending);
1777 1778
1778#ifdef AR9170_QUEUE_DEBUG 1779#ifdef AR9170_QUEUE_DEBUG
1779 printk(KERN_DEBUG "%s: send frame q:%d =>\n", 1780 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
@@ -1784,7 +1785,7 @@ static void ar9170_tx(struct ar9170 *ar)
1784 err = ar->tx(ar, skb); 1785 err = ar->tx(ar, skb);
1785 if (unlikely(err)) { 1786 if (unlikely(err)) {
1786 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1787 ar->tx_ampdu_pending--; 1788 atomic_dec(&ar->tx_ampdu_pending);
1788 1789
1789 frames_failed++; 1790 frames_failed++;
1790 dev_kfree_skb_any(skb); 1791 dev_kfree_skb_any(skb);
@@ -1931,7 +1932,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1931 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1932 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1932 bool run = ar9170_tx_ampdu_queue(ar, skb); 1933 bool run = ar9170_tx_ampdu_queue(ar, skb);
1933 1934
1934 if (run || !ar->tx_ampdu_pending) 1935 if (run || !atomic_read(&ar->tx_ampdu_pending))
1935 ar9170_tx_ampdu(ar); 1936 ar9170_tx_ampdu(ar);
1936 } else { 1937 } else {
1937 unsigned int queue = skb_get_queue_mapping(skb); 1938 unsigned int queue = skb_get_queue_mapping(skb);
@@ -1952,6 +1953,7 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1952 struct ieee80211_if_init_conf *conf) 1953 struct ieee80211_if_init_conf *conf)
1953{ 1954{
1954 struct ar9170 *ar = hw->priv; 1955 struct ar9170 *ar = hw->priv;
1956 struct ath_common *common = &ar->common;
1955 int err = 0; 1957 int err = 0;
1956 1958
1957 mutex_lock(&ar->mutex); 1959 mutex_lock(&ar->mutex);
@@ -1962,7 +1964,7 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1962 } 1964 }
1963 1965
1964 ar->vif = conf->vif; 1966 ar->vif = conf->vif;
1965 memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN); 1967 memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
1966 1968
1967 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { 1969 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1968 ar->rx_software_decryption = true; 1970 ar->rx_software_decryption = true;
@@ -2131,12 +2133,13 @@ static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
2131 u32 changed) 2133 u32 changed)
2132{ 2134{
2133 struct ar9170 *ar = hw->priv; 2135 struct ar9170 *ar = hw->priv;
2136 struct ath_common *common = &ar->common;
2134 int err = 0; 2137 int err = 0;
2135 2138
2136 mutex_lock(&ar->mutex); 2139 mutex_lock(&ar->mutex);
2137 2140
2138 if (changed & BSS_CHANGED_BSSID) { 2141 if (changed & BSS_CHANGED_BSSID) {
2139 memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN); 2142 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
2140 err = ar9170_set_operating_mode(ar); 2143 err = ar9170_set_operating_mode(ar);
2141 if (err) 2144 if (err)
2142 goto out; 2145 goto out;
@@ -2190,22 +2193,30 @@ static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
2190{ 2193{
2191 struct ar9170 *ar = hw->priv; 2194 struct ar9170 *ar = hw->priv;
2192 int err; 2195 int err;
2193 u32 tsf_low;
2194 u32 tsf_high;
2195 u64 tsf; 2196 u64 tsf;
2197#define NR 3
2198 static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
2199 AR9170_MAC_REG_TSF_L,
2200 AR9170_MAC_REG_TSF_H };
2201 u32 val[NR];
2202 int loops = 0;
2196 2203
2197 mutex_lock(&ar->mutex); 2204 mutex_lock(&ar->mutex);
2198 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low); 2205
2199 if (!err) 2206 while (loops++ < 10) {
2200 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high); 2207 err = ar9170_read_mreg(ar, NR, addr, val);
2208 if (err || val[0] == val[2])
2209 break;
2210 }
2211
2201 mutex_unlock(&ar->mutex); 2212 mutex_unlock(&ar->mutex);
2202 2213
2203 if (WARN_ON(err)) 2214 if (WARN_ON(err))
2204 return 0; 2215 return 0;
2205 2216 tsf = val[0];
2206 tsf = tsf_high; 2217 tsf = (tsf << 32) | val[1];
2207 tsf = (tsf << 32) | tsf_low;
2208 return tsf; 2218 return tsf;
2219#undef NR
2209} 2220}
2210 2221
2211static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2222static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -2430,6 +2441,7 @@ static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
2430} 2441}
2431 2442
2432static int ar9170_ampdu_action(struct ieee80211_hw *hw, 2443static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2444 struct ieee80211_vif *vif,
2433 enum ieee80211_ampdu_mlme_action action, 2445 enum ieee80211_ampdu_mlme_action action,
2434 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2446 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2435{ 2447{
@@ -2459,7 +2471,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2459 tid_info->state = AR9170_TID_STATE_PROGRESS; 2471 tid_info->state = AR9170_TID_STATE_PROGRESS;
2460 tid_info->active = false; 2472 tid_info->active = false;
2461 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); 2473 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2462 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2474 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2463 break; 2475 break;
2464 2476
2465 case IEEE80211_AMPDU_TX_STOP: 2477 case IEEE80211_AMPDU_TX_STOP:
@@ -2469,7 +2481,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2469 tid_info->active = false; 2481 tid_info->active = false;
2470 skb_queue_purge(&tid_info->queue); 2482 skb_queue_purge(&tid_info->queue);
2471 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags); 2483 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2472 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); 2484 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2473 break; 2485 break;
2474 2486
2475 case IEEE80211_AMPDU_TX_OPERATIONAL: 2487 case IEEE80211_AMPDU_TX_OPERATIONAL:
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index dbd488da18b1..45a415ea809a 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1239,9 +1239,6 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1239 struct ar9170_calctl_edges edges[], 1239 struct ar9170_calctl_edges edges[],
1240 u32 freq) 1240 u32 freq)
1241{ 1241{
1242/* TODO: move somewhere else */
1243#define AR5416_MAX_RATE_POWER 63
1244
1245 int i; 1242 int i;
1246 u8 rc = AR5416_MAX_RATE_POWER; 1243 u8 rc = AR5416_MAX_RATE_POWER;
1247 u8 f; 1244 u8 f;
@@ -1259,10 +1256,11 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1259 break; 1256 break;
1260 } 1257 }
1261 if (i > 0 && f < edges[i].channel) { 1258 if (i > 0 && f < edges[i].channel) {
1262 if (f > edges[i-1].channel && 1259 if (f > edges[i - 1].channel &&
1263 edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) { 1260 edges[i - 1].power_flags &
1261 AR9170_CALCTL_EDGE_FLAGS) {
1264 /* lower channel has the inband flag set */ 1262 /* lower channel has the inband flag set */
1265 rc = edges[i-1].power_flags & 1263 rc = edges[i - 1].power_flags &
1266 ~AR9170_CALCTL_EDGE_FLAGS; 1264 ~AR9170_CALCTL_EDGE_FLAGS;
1267 } 1265 }
1268 break; 1266 break;
@@ -1270,18 +1268,48 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1270 } 1268 }
1271 1269
1272 if (i == AR5416_NUM_BAND_EDGES) { 1270 if (i == AR5416_NUM_BAND_EDGES) {
1273 if (f > edges[i-1].channel && 1271 if (f > edges[i - 1].channel &&
1274 edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) { 1272 edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
1275 /* lower channel has the inband flag set */ 1273 /* lower channel has the inband flag set */
1276 rc = edges[i-1].power_flags & 1274 rc = edges[i - 1].power_flags &
1277 ~AR9170_CALCTL_EDGE_FLAGS; 1275 ~AR9170_CALCTL_EDGE_FLAGS;
1278 } 1276 }
1279 } 1277 }
1280 return rc; 1278 return rc;
1281} 1279}
1282 1280
1283/* calculate the conformance test limits and apply them to ar->power* 1281static u8 ar9170_get_heavy_clip(struct ar9170 *ar,
1284 * (derived from otus hal/hpmain.c, line 3706 ff.) 1282 struct ar9170_calctl_edges edges[],
1283 u32 freq, enum ar9170_bw bw)
1284{
1285 u8 f;
1286 int i;
1287 u8 rc = 0;
1288
1289 if (freq < 3000)
1290 f = freq - 2300;
1291 else
1292 f = (freq - 4800) / 5;
1293
1294 if (bw == AR9170_BW_40_BELOW || bw == AR9170_BW_40_ABOVE)
1295 rc |= 0xf0;
1296
1297 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1298 if (edges[i].channel == 0xff)
1299 break;
1300 if (f == edges[i].channel) {
1301 if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
1302 rc |= 0x0f;
1303 break;
1304 }
1305 }
1306
1307 return rc;
1308}
1309
1310/*
1311 * calculate the conformance test limits and the heavy clip parameter
1312 * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
1285 */ 1313 */
1286static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw) 1314static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1287{ 1315{
@@ -1295,7 +1323,8 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1295 int pwr_cal_len; 1323 int pwr_cal_len;
1296 } *modes; 1324 } *modes;
1297 1325
1298 /* order is relevant in the mode_list_*: we fall back to the 1326 /*
1327 * order is relevant in the mode_list_*: we fall back to the
1299 * lower indices if any mode is missed in the EEPROM. 1328 * lower indices if any mode is missed in the EEPROM.
1300 */ 1329 */
1301 struct ctl_modes mode_list_2ghz[] = { 1330 struct ctl_modes mode_list_2ghz[] = {
@@ -1313,7 +1342,10 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1313 1342
1314#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n]) 1343#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
1315 1344
1316 /* TODO: investigate the differences between OTUS' 1345 ar->phy_heavy_clip = 0;
1346
1347 /*
1348 * TODO: investigate the differences between OTUS'
1317 * hpreg.c::zfHpGetRegulatoryDomain() and 1349 * hpreg.c::zfHpGetRegulatoryDomain() and
1318 * ath/regd.c::ath_regd_get_band_ctl() - 1350 * ath/regd.c::ath_regd_get_band_ctl() -
1319 * e.g. for FCC3_WORLD the OTUS procedure 1351 * e.g. for FCC3_WORLD the OTUS procedure
@@ -1347,6 +1379,15 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1347 if (ctl_idx < AR5416_NUM_CTLS) { 1379 if (ctl_idx < AR5416_NUM_CTLS) {
1348 int f_off = 0; 1380 int f_off = 0;
1349 1381
1382 /* determine heav clip parameter from
1383 the 11G edges array */
1384 if (modes[i].ctl_mode == CTL_11G) {
1385 ar->phy_heavy_clip =
1386 ar9170_get_heavy_clip(ar,
1387 EDGES(ctl_idx, 1),
1388 freq, bw);
1389 }
1390
1350 /* adjust freq for 40MHz */ 1391 /* adjust freq for 40MHz */
1351 if (modes[i].ctl_mode == CTL_2GHT40 || 1392 if (modes[i].ctl_mode == CTL_2GHT40 ||
1352 modes[i].ctl_mode == CTL_5GHT40) { 1393 modes[i].ctl_mode == CTL_5GHT40) {
@@ -1360,13 +1401,15 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1360 ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1), 1401 ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1),
1361 freq+f_off); 1402 freq+f_off);
1362 1403
1363 /* TODO: check if the regulatory max. power is 1404 /*
1405 * TODO: check if the regulatory max. power is
1364 * controlled by cfg80211 for DFS 1406 * controlled by cfg80211 for DFS
1365 * (hpmain applies it to max_power itself for DFS freq) 1407 * (hpmain applies it to max_power itself for DFS freq)
1366 */ 1408 */
1367 1409
1368 } else { 1410 } else {
1369 /* Workaround in otus driver, hpmain.c, line 3906: 1411 /*
1412 * Workaround in otus driver, hpmain.c, line 3906:
1370 * if no data for 5GHT20 are found, take the 1413 * if no data for 5GHT20 are found, take the
1371 * legacy 5G value. 1414 * legacy 5G value.
1372 * We extend this here to fallback from any other *HT or 1415 * We extend this here to fallback from any other *HT or
@@ -1390,6 +1433,19 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1390 modes[i].max_power); 1433 modes[i].max_power);
1391 } 1434 }
1392 } 1435 }
1436
1437 if (ar->phy_heavy_clip & 0xf0) {
1438 ar->power_2G_ht40[0]--;
1439 ar->power_2G_ht40[1]--;
1440 ar->power_2G_ht40[2]--;
1441 }
1442 if (ar->phy_heavy_clip & 0xf) {
1443 ar->power_2G_ht20[0]++;
1444 ar->power_2G_ht20[1]++;
1445 ar->power_2G_ht20[2]++;
1446 }
1447
1448
1393#undef EDGES 1449#undef EDGES
1394} 1450}
1395 1451
@@ -1499,8 +1555,6 @@ static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1499 /* calc. conformance test limits and apply to ar->power*[] */ 1555 /* calc. conformance test limits and apply to ar->power*[] */
1500 ar9170_calc_ctl(ar, freq, bw); 1556 ar9170_calc_ctl(ar, freq, bw);
1501 1557
1502 /* TODO: (heavy clip) regulatory domain power level fine-tuning. */
1503
1504 /* set ACK/CTS TX power */ 1558 /* set ACK/CTS TX power */
1505 ar9170_regwrite_begin(ar); 1559 ar9170_regwrite_begin(ar);
1506 1560
@@ -1643,6 +1697,17 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1643 if (err) 1697 if (err)
1644 return err; 1698 return err;
1645 1699
1700 if (ar->phy_heavy_clip) {
1701 err = ar9170_write_reg(ar, 0x1c59e0,
1702 0x200 | ar->phy_heavy_clip);
1703 if (err) {
1704 if (ar9170_nag_limiter(ar))
1705 printk(KERN_ERR "%s: failed to set "
1706 "heavy clip\n",
1707 wiphy_name(ar->hw->wiphy));
1708 }
1709 }
1710
1646 for (i = 0; i < 2; i++) { 1711 for (i = 0; i < 2; i++) {
1647 ar->noise[i] = ar9170_calc_noise_dbm( 1712 ar->noise[i] = ar9170_calc_noise_dbm(
1648 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff); 1713 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e974e5829e1a..e0799d924057 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -68,8 +68,10 @@ static struct usb_device_id ar9170_usb_ids[] = {
68 { USB_DEVICE(0x0cf3, 0x1002) }, 68 { USB_DEVICE(0x0cf3, 0x1002) },
69 /* Cace Airpcap NX */ 69 /* Cace Airpcap NX */
70 { USB_DEVICE(0xcace, 0x0300) }, 70 { USB_DEVICE(0xcace, 0x0300) },
71 /* D-Link DWA 160A */ 71 /* D-Link DWA 160 A1 */
72 { USB_DEVICE(0x07d1, 0x3c10) }, 72 { USB_DEVICE(0x07d1, 0x3c10) },
73 /* D-Link DWA 160 A2 */
74 { USB_DEVICE(0x07d1, 0x3a09) },
73 /* Netgear WNDA3100 */ 75 /* Netgear WNDA3100 */
74 { USB_DEVICE(0x0846, 0x9010) }, 76 { USB_DEVICE(0x0846, 0x9010) },
75 /* Netgear WN111 v2 */ 77 /* Netgear WN111 v2 */
@@ -108,15 +110,15 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
108 return ; 110 return ;
109 111
110 spin_lock_irqsave(&aru->tx_urb_lock, flags); 112 spin_lock_irqsave(&aru->tx_urb_lock, flags);
111 if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) { 113 if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
112 spin_unlock_irqrestore(&aru->tx_urb_lock, flags); 114 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
113 return ; 115 return ;
114 } 116 }
115 aru->tx_submitted_urbs++; 117 atomic_inc(&aru->tx_submitted_urbs);
116 118
117 urb = usb_get_from_anchor(&aru->tx_pending); 119 urb = usb_get_from_anchor(&aru->tx_pending);
118 if (!urb) { 120 if (!urb) {
119 aru->tx_submitted_urbs--; 121 atomic_dec(&aru->tx_submitted_urbs);
120 spin_unlock_irqrestore(&aru->tx_urb_lock, flags); 122 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
121 123
122 return ; 124 return ;
@@ -133,7 +135,7 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
133 err); 135 err);
134 136
135 usb_unanchor_urb(urb); 137 usb_unanchor_urb(urb);
136 aru->tx_submitted_urbs--; 138 atomic_dec(&aru->tx_submitted_urbs);
137 ar9170_tx_callback(&aru->common, urb->context); 139 ar9170_tx_callback(&aru->common, urb->context);
138 } 140 }
139 141
@@ -151,7 +153,7 @@ static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
151 return ; 153 return ;
152 } 154 }
153 155
154 aru->tx_submitted_urbs--; 156 atomic_dec(&aru->tx_submitted_urbs);
155 157
156 ar9170_tx_callback(&aru->common, skb); 158 ar9170_tx_callback(&aru->common, skb);
157 159
@@ -794,7 +796,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
794 spin_lock_init(&aru->tx_urb_lock); 796 spin_lock_init(&aru->tx_urb_lock);
795 797
796 aru->tx_pending_urbs = 0; 798 aru->tx_pending_urbs = 0;
797 aru->tx_submitted_urbs = 0; 799 atomic_set(&aru->tx_submitted_urbs, 0);
798 800
799 aru->common.stop = ar9170_usb_stop; 801 aru->common.stop = ar9170_usb_stop;
800 aru->common.flush = ar9170_usb_flush; 802 aru->common.flush = ar9170_usb_flush;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index d098f4d5d2f2..a2ce3b169ceb 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -67,7 +67,7 @@ struct ar9170_usb {
67 bool req_one_stage_fw; 67 bool req_one_stage_fw;
68 68
69 spinlock_t tx_urb_lock; 69 spinlock_t tx_urb_lock;
70 unsigned int tx_submitted_urbs; 70 atomic_t tx_submitted_urbs;
71 unsigned int tx_pending_urbs; 71 unsigned int tx_pending_urbs;
72 72
73 struct completion cmd_wait; 73 struct completion cmd_wait;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index a63e90cbf9e5..9e05648356fe 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -18,6 +18,35 @@
18#define ATH_H 18#define ATH_H
19 19
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/if_ether.h>
22#include <net/mac80211.h>
23
24/*
25 * The key cache is used for h/w cipher state and also for
26 * tracking station state such as the current tx antenna.
27 * We also setup a mapping table between key cache slot indices
28 * and station state to short-circuit node lookups on rx.
29 * Different parts have different size key caches. We handle
30 * up to ATH_KEYMAX entries (could dynamically allocate state).
31 */
32#define ATH_KEYMAX 128 /* max key cache size we handle */
33
34static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
35
36struct ath_ani {
37 bool caldone;
38 int16_t noise_floor;
39 unsigned int longcal_timer;
40 unsigned int shortcal_timer;
41 unsigned int resetcal_timer;
42 unsigned int checkani_timer;
43 struct timer_list timer;
44};
45
46enum ath_device_state {
47 ATH_HW_UNAVAILABLE,
48 ATH_HW_INITIALIZED,
49};
21 50
22struct reg_dmn_pair_mapping { 51struct reg_dmn_pair_mapping {
23 u16 regDmnEnum; 52 u16 regDmnEnum;
@@ -36,13 +65,53 @@ struct ath_regulatory {
36 struct reg_dmn_pair_mapping *regpair; 65 struct reg_dmn_pair_mapping *regpair;
37}; 66};
38 67
68struct ath_ops {
69 unsigned int (*read)(void *, u32 reg_offset);
70 void (*write)(void *, u32 val, u32 reg_offset);
71};
72
73struct ath_common;
74
75struct ath_bus_ops {
76 void (*read_cachesize)(struct ath_common *common, int *csz);
77 void (*cleanup)(struct ath_common *common);
78 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
79 void (*bt_coex_prep)(struct ath_common *common);
80};
81
39struct ath_common { 82struct ath_common {
83 void *ah;
84 void *priv;
85 struct ieee80211_hw *hw;
86 int debug_mask;
87 enum ath_device_state state;
88
89 struct ath_ani ani;
90
40 u16 cachelsz; 91 u16 cachelsz;
92 u16 curaid;
93 u8 macaddr[ETH_ALEN];
94 u8 curbssid[ETH_ALEN];
95 u8 bssidmask[ETH_ALEN];
96
97 u8 tx_chainmask;
98 u8 rx_chainmask;
99
100 u32 rx_bufsize;
101
102 u32 keymax;
103 DECLARE_BITMAP(keymap, ATH_KEYMAX);
104 u8 splitmic;
105
41 struct ath_regulatory regulatory; 106 struct ath_regulatory regulatory;
107 const struct ath_ops *ops;
108 const struct ath_bus_ops *bus_ops;
42}; 109};
43 110
44struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 111struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
45 u32 len, 112 u32 len,
46 gfp_t gfp_mask); 113 gfp_t gfp_mask);
47 114
115void ath_hw_setbssidmask(struct ath_common *common);
116
48#endif /* ATH_H */ 117#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 06d006675d7d..eb83b7b4d0e3 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,6 +1,6 @@
1config ATH5K 1config ATH5K
2 tristate "Atheros 5xxx wireless cards support" 2 tristate "Atheros 5xxx wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 3 depends on PCI && MAC80211
4 select MAC80211_LEDS 4 select MAC80211_LEDS
5 select LEDS_CLASS 5 select LEDS_CLASS
6 select NEW_LEDS 6 select NEW_LEDS
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6cd5efcec417..6a2a96761111 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -35,6 +35,7 @@
35 * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities) 35 * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
36 * and clean up common bits, then introduce set/get functions in eeprom.c */ 36 * and clean up common bits, then introduce set/get functions in eeprom.c */
37#include "eeprom.h" 37#include "eeprom.h"
38#include "../ath.h"
38 39
39/* PCI IDs */ 40/* PCI IDs */
40#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */ 41#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
@@ -165,13 +166,6 @@
165#define AR5K_INI_VAL_XR 0 166#define AR5K_INI_VAL_XR 0
166#define AR5K_INI_VAL_MAX 5 167#define AR5K_INI_VAL_MAX 5
167 168
168/* Used for BSSID etc manipulation */
169#define AR5K_LOW_ID(_a)( \
170(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
171)
172
173#define AR5K_HIGH_ID(_a) ((_a)[4] | (_a)[5] << 8)
174
175/* 169/*
176 * Some tuneable values (these should be changeable by the user) 170 * Some tuneable values (these should be changeable by the user)
177 * TODO: Make use of them and add more options OR use debug/configfs 171 * TODO: Make use of them and add more options OR use debug/configfs
@@ -204,6 +198,7 @@
204#define AR5K_TUNE_CWMAX_11B 1023 198#define AR5K_TUNE_CWMAX_11B 1023
205#define AR5K_TUNE_CWMAX_XR 7 199#define AR5K_TUNE_CWMAX_XR 7
206#define AR5K_TUNE_NOISE_FLOOR -72 200#define AR5K_TUNE_NOISE_FLOOR -72
201#define AR5K_TUNE_CCA_MAX_GOOD_VALUE -95
207#define AR5K_TUNE_MAX_TXPOWER 63 202#define AR5K_TUNE_MAX_TXPOWER 63
208#define AR5K_TUNE_DEFAULT_TXPOWER 25 203#define AR5K_TUNE_DEFAULT_TXPOWER 25
209#define AR5K_TUNE_TPC_TXPOWER false 204#define AR5K_TUNE_TPC_TXPOWER false
@@ -1012,6 +1007,14 @@ struct ath5k_capabilities {
1012 } cap_queues; 1007 } cap_queues;
1013}; 1008};
1014 1009
1010/* size of noise floor history (keep it a power of two) */
1011#define ATH5K_NF_CAL_HIST_MAX 8
1012struct ath5k_nfcal_hist
1013{
1014 s16 index; /* current index into nfval */
1015 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
1016};
1017
1015 1018
1016/***************************************\ 1019/***************************************\
1017 HARDWARE ABSTRACTION LAYER STRUCTURE 1020 HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1027,6 +1030,7 @@ struct ath5k_capabilities {
1027/* TODO: Clean up and merge with ath5k_softc */ 1030/* TODO: Clean up and merge with ath5k_softc */
1028struct ath5k_hw { 1031struct ath5k_hw {
1029 u32 ah_magic; 1032 u32 ah_magic;
1033 struct ath_common common;
1030 1034
1031 struct ath5k_softc *ah_sc; 1035 struct ath5k_softc *ah_sc;
1032 void __iomem *ah_iobase; 1036 void __iomem *ah_iobase;
@@ -1067,14 +1071,6 @@ struct ath5k_hw {
1067 u8 ah_def_ant; 1071 u8 ah_def_ant;
1068 bool ah_software_retry; 1072 bool ah_software_retry;
1069 1073
1070 u8 ah_sta_id[ETH_ALEN];
1071
1072 /* Current BSSID we are trying to assoc to / create.
1073 * This is passed by mac80211 on config_interface() and cached here for
1074 * use in resets */
1075 u8 ah_bssid[ETH_ALEN];
1076 u8 ah_bssid_mask[ETH_ALEN];
1077
1078 int ah_gpio_npins; 1074 int ah_gpio_npins;
1079 1075
1080 struct ath5k_capabilities ah_capabilities; 1076 struct ath5k_capabilities ah_capabilities;
@@ -1125,6 +1121,8 @@ struct ath5k_hw {
1125 struct ieee80211_channel r_last_channel; 1121 struct ieee80211_channel r_last_channel;
1126 } ah_radar; 1122 } ah_radar;
1127 1123
1124 struct ath5k_nfcal_hist ah_nfcal_hist;
1125
1128 /* noise floor from last periodic calibration */ 1126 /* noise floor from last periodic calibration */
1129 s32 ah_noise_floor; 1127 s32 ah_noise_floor;
1130 1128
@@ -1160,7 +1158,7 @@ struct ath5k_hw {
1160 */ 1158 */
1161 1159
1162/* Attach/Detach Functions */ 1160/* Attach/Detach Functions */
1163extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc); 1161extern int ath5k_hw_attach(struct ath5k_softc *sc);
1164extern void ath5k_hw_detach(struct ath5k_hw *ah); 1162extern void ath5k_hw_detach(struct ath5k_hw *ah);
1165 1163
1166/* LED functions */ 1164/* LED functions */
@@ -1203,10 +1201,9 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1203/* Protocol Control Unit Functions */ 1201/* Protocol Control Unit Functions */
1204extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1205/* BSSID Functions */ 1203/* BSSID Functions */
1206extern void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac);
1207extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1204extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1208extern void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id); 1205extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
1209extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1206extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1210/* Receive start/stop functions */ 1207/* Receive start/stop functions */
1211extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1208extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1212extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); 1209extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
@@ -1288,8 +1285,10 @@ extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1288extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1285extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1289extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1286extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1290/* PHY calibration */ 1287/* PHY calibration */
1288void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1291extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1289extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1292extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1290extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
1291extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
1293extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah); 1292extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
1294/* Spur mitigation */ 1293/* Spur mitigation */
1295bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1294bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
@@ -1329,17 +1328,21 @@ static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1329 return turbo ? (clock / 80) : (clock / 40); 1328 return turbo ? (clock / 80) : (clock / 40);
1330} 1329}
1331 1330
1332/* 1331static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
1333 * Read from a register 1332{
1334 */ 1333 return &ah->common;
1334}
1335
1336static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
1337{
1338 return &(ath5k_hw_common(ah)->regulatory);
1339}
1340
1335static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) 1341static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1336{ 1342{
1337 return ioread32(ah->ah_iobase + reg); 1343 return ioread32(ah->ah_iobase + reg);
1338} 1344}
1339 1345
1340/*
1341 * Write to a register
1342 */
1343static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) 1346static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1344{ 1347{
1345 iowrite32(val, ah->ah_iobase + reg); 1348 iowrite32(val, ah->ah_iobase + reg);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 71a1bd254517..42284445b75e 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -101,25 +101,15 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
101 * -ENODEV if the device is not supported or prints an error msg if something 101 * -ENODEV if the device is not supported or prints an error msg if something
102 * else went wrong. 102 * else went wrong.
103 */ 103 */
104struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc) 104int ath5k_hw_attach(struct ath5k_softc *sc)
105{ 105{
106 struct ath5k_hw *ah; 106 struct ath5k_hw *ah = sc->ah;
107 struct ath_common *common = ath5k_hw_common(ah);
107 struct pci_dev *pdev = sc->pdev; 108 struct pci_dev *pdev = sc->pdev;
108 struct ath5k_eeprom_info *ee; 109 struct ath5k_eeprom_info *ee;
109 int ret; 110 int ret;
110 u32 srev; 111 u32 srev;
111 112
112 /*If we passed the test malloc a ath5k_hw struct*/
113 ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
114 if (ah == NULL) {
115 ret = -ENOMEM;
116 ATH5K_ERR(sc, "out of memory\n");
117 goto err;
118 }
119
120 ah->ah_sc = sc;
121 ah->ah_iobase = sc->iobase;
122
123 /* 113 /*
124 * HW information 114 * HW information
125 */ 115 */
@@ -278,12 +268,12 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
278 goto err_free; 268 goto err_free;
279 } 269 }
280 270
271 ee = &ah->ah_capabilities.cap_eeprom;
272
281 /* 273 /*
282 * Write PCI-E power save settings 274 * Write PCI-E power save settings
283 */ 275 */
284 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) { 276 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
285 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
286
287 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES); 277 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
288 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES); 278 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
289 279
@@ -321,7 +311,6 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
321 } 311 }
322 312
323 /* Crypto settings */ 313 /* Crypto settings */
324 ee = &ah->ah_capabilities.cap_eeprom;
325 ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 && 314 ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 &&
326 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 && 315 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 &&
327 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)); 316 !AR5K_EEPROM_AES_DIS(ee->ee_misc5));
@@ -336,20 +325,21 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
336 ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){}); 325 ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){});
337 326
338 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 327 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
339 memset(ah->ah_bssid, 0xff, ETH_ALEN); 328 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
340 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 329 ath5k_hw_set_associd(ah);
341 ath5k_hw_set_opmode(ah); 330 ath5k_hw_set_opmode(ah);
342 331
343 ath5k_hw_rfgain_opt_init(ah); 332 ath5k_hw_rfgain_opt_init(ah);
344 333
334 ath5k_hw_init_nfcal_hist(ah);
335
345 /* turn on HW LEDs */ 336 /* turn on HW LEDs */
346 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); 337 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
347 338
348 return ah; 339 return 0;
349err_free: 340err_free:
350 kfree(ah); 341 kfree(ah);
351err: 342 return ret;
352 return ERR_PTR(ret);
353} 343}
354 344
355/** 345/**
@@ -369,5 +359,4 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
369 ath5k_eeprom_detach(ah); 359 ath5k_eeprom_detach(ah);
370 360
371 /* assume interrupts are down */ 361 /* assume interrupts are down */
372 kfree(ah);
373} 362}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 95a8e232b58f..a4c086f069b1 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -195,12 +195,13 @@ static int __devinit ath5k_pci_probe(struct pci_dev *pdev,
195 const struct pci_device_id *id); 195 const struct pci_device_id *id);
196static void __devexit ath5k_pci_remove(struct pci_dev *pdev); 196static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
197#ifdef CONFIG_PM 197#ifdef CONFIG_PM
198static int ath5k_pci_suspend(struct pci_dev *pdev, 198static int ath5k_pci_suspend(struct device *dev);
199 pm_message_t state); 199static int ath5k_pci_resume(struct device *dev);
200static int ath5k_pci_resume(struct pci_dev *pdev); 200
201SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
202#define ATH5K_PM_OPS (&ath5k_pm_ops)
201#else 203#else
202#define ath5k_pci_suspend NULL 204#define ATH5K_PM_OPS NULL
203#define ath5k_pci_resume NULL
204#endif /* CONFIG_PM */ 205#endif /* CONFIG_PM */
205 206
206static struct pci_driver ath5k_pci_driver = { 207static struct pci_driver ath5k_pci_driver = {
@@ -208,8 +209,7 @@ static struct pci_driver ath5k_pci_driver = {
208 .id_table = ath5k_pci_id_table, 209 .id_table = ath5k_pci_id_table,
209 .probe = ath5k_pci_probe, 210 .probe = ath5k_pci_probe,
210 .remove = __devexit_p(ath5k_pci_remove), 211 .remove = __devexit_p(ath5k_pci_remove),
211 .suspend = ath5k_pci_suspend, 212 .driver.pm = ATH5K_PM_OPS,
212 .resume = ath5k_pci_resume,
213}; 213};
214 214
215 215
@@ -323,10 +323,13 @@ static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
323static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, 323static inline void ath5k_rxbuf_free(struct ath5k_softc *sc,
324 struct ath5k_buf *bf) 324 struct ath5k_buf *bf)
325{ 325{
326 struct ath5k_hw *ah = sc->ah;
327 struct ath_common *common = ath5k_hw_common(ah);
328
326 BUG_ON(!bf); 329 BUG_ON(!bf);
327 if (!bf->skb) 330 if (!bf->skb)
328 return; 331 return;
329 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, 332 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
330 PCI_DMA_FROMDEVICE); 333 PCI_DMA_FROMDEVICE);
331 dev_kfree_skb_any(bf->skb); 334 dev_kfree_skb_any(bf->skb);
332 bf->skb = NULL; 335 bf->skb = NULL;
@@ -437,6 +440,22 @@ ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
437 440
438 return name; 441 return name;
439} 442}
443static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
444{
445 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
446 return ath5k_hw_reg_read(ah, reg_offset);
447}
448
449static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
450{
451 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
452 ath5k_hw_reg_write(ah, val, reg_offset);
453}
454
455static const struct ath_ops ath5k_common_ops = {
456 .read = ath5k_ioread32,
457 .write = ath5k_iowrite32,
458};
440 459
441static int __devinit 460static int __devinit
442ath5k_pci_probe(struct pci_dev *pdev, 461ath5k_pci_probe(struct pci_dev *pdev,
@@ -444,6 +463,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
444{ 463{
445 void __iomem *mem; 464 void __iomem *mem;
446 struct ath5k_softc *sc; 465 struct ath5k_softc *sc;
466 struct ath_common *common;
447 struct ieee80211_hw *hw; 467 struct ieee80211_hw *hw;
448 int ret; 468 int ret;
449 u8 csz; 469 u8 csz;
@@ -547,7 +567,6 @@ ath5k_pci_probe(struct pci_dev *pdev,
547 __set_bit(ATH_STAT_INVALID, sc->status); 567 __set_bit(ATH_STAT_INVALID, sc->status);
548 568
549 sc->iobase = mem; /* So we can unmap it on detach */ 569 sc->iobase = mem; /* So we can unmap it on detach */
550 sc->common.cachelsz = csz << 2; /* convert to bytes */
551 sc->opmode = NL80211_IFTYPE_STATION; 570 sc->opmode = NL80211_IFTYPE_STATION;
552 sc->bintval = 1000; 571 sc->bintval = 1000;
553 mutex_init(&sc->lock); 572 mutex_init(&sc->lock);
@@ -565,13 +584,28 @@ ath5k_pci_probe(struct pci_dev *pdev,
565 goto err_free; 584 goto err_free;
566 } 585 }
567 586
568 /* Initialize device */ 587 /*If we passed the test malloc a ath5k_hw struct*/
569 sc->ah = ath5k_hw_attach(sc); 588 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
570 if (IS_ERR(sc->ah)) { 589 if (!sc->ah) {
571 ret = PTR_ERR(sc->ah); 590 ret = -ENOMEM;
591 ATH5K_ERR(sc, "out of memory\n");
572 goto err_irq; 592 goto err_irq;
573 } 593 }
574 594
595 sc->ah->ah_sc = sc;
596 sc->ah->ah_iobase = sc->iobase;
597 common = ath5k_hw_common(sc->ah);
598 common->ops = &ath5k_common_ops;
599 common->ah = sc->ah;
600 common->hw = hw;
601 common->cachelsz = csz << 2; /* convert to bytes */
602
603 /* Initialize device */
604 ret = ath5k_hw_attach(sc);
605 if (ret) {
606 goto err_free_ah;
607 }
608
575 /* set up multi-rate retry capabilities */ 609 /* set up multi-rate retry capabilities */
576 if (sc->ah->ah_version == AR5K_AR5212) { 610 if (sc->ah->ah_version == AR5K_AR5212) {
577 hw->max_rates = 4; 611 hw->max_rates = 4;
@@ -640,6 +674,8 @@ err_ah:
640 ath5k_hw_detach(sc->ah); 674 ath5k_hw_detach(sc->ah);
641err_irq: 675err_irq:
642 free_irq(pdev->irq, sc); 676 free_irq(pdev->irq, sc);
677err_free_ah:
678 kfree(sc->ah);
643err_free: 679err_free:
644 ieee80211_free_hw(hw); 680 ieee80211_free_hw(hw);
645err_map: 681err_map:
@@ -661,6 +697,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
661 ath5k_debug_finish_device(sc); 697 ath5k_debug_finish_device(sc);
662 ath5k_detach(pdev, hw); 698 ath5k_detach(pdev, hw);
663 ath5k_hw_detach(sc->ah); 699 ath5k_hw_detach(sc->ah);
700 kfree(sc->ah);
664 free_irq(pdev->irq, sc); 701 free_irq(pdev->irq, sc);
665 pci_iounmap(pdev, sc->iobase); 702 pci_iounmap(pdev, sc->iobase);
666 pci_release_region(pdev, 0); 703 pci_release_region(pdev, 0);
@@ -669,33 +706,20 @@ ath5k_pci_remove(struct pci_dev *pdev)
669} 706}
670 707
671#ifdef CONFIG_PM 708#ifdef CONFIG_PM
672static int 709static int ath5k_pci_suspend(struct device *dev)
673ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
674{ 710{
675 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 711 struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev));
676 struct ath5k_softc *sc = hw->priv; 712 struct ath5k_softc *sc = hw->priv;
677 713
678 ath5k_led_off(sc); 714 ath5k_led_off(sc);
679
680 pci_save_state(pdev);
681 pci_disable_device(pdev);
682 pci_set_power_state(pdev, PCI_D3hot);
683
684 return 0; 715 return 0;
685} 716}
686 717
687static int 718static int ath5k_pci_resume(struct device *dev)
688ath5k_pci_resume(struct pci_dev *pdev)
689{ 719{
720 struct pci_dev *pdev = to_pci_dev(dev);
690 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 721 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
691 struct ath5k_softc *sc = hw->priv; 722 struct ath5k_softc *sc = hw->priv;
692 int err;
693
694 pci_restore_state(pdev);
695
696 err = pci_enable_device(pdev);
697 if (err)
698 return err;
699 723
700 /* 724 /*
701 * Suspend/Resume resets the PCI configuration space, so we have to 725 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -718,7 +742,7 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
718{ 742{
719 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 743 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
720 struct ath5k_softc *sc = hw->priv; 744 struct ath5k_softc *sc = hw->priv;
721 struct ath_regulatory *regulatory = &sc->common.regulatory; 745 struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
722 746
723 return ath_reg_notifier_apply(wiphy, request, regulatory); 747 return ath_reg_notifier_apply(wiphy, request, regulatory);
724} 748}
@@ -728,7 +752,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
728{ 752{
729 struct ath5k_softc *sc = hw->priv; 753 struct ath5k_softc *sc = hw->priv;
730 struct ath5k_hw *ah = sc->ah; 754 struct ath5k_hw *ah = sc->ah;
731 struct ath_regulatory *regulatory = &sc->common.regulatory; 755 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
732 u8 mac[ETH_ALEN] = {}; 756 u8 mac[ETH_ALEN] = {};
733 int ret; 757 int ret;
734 758
@@ -815,7 +839,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
815 839
816 SET_IEEE80211_PERM_ADDR(hw, mac); 840 SET_IEEE80211_PERM_ADDR(hw, mac);
817 /* All MAC address bits matter for ACKs */ 841 /* All MAC address bits matter for ACKs */
818 memset(sc->bssidmask, 0xff, ETH_ALEN); 842 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
819 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); 843 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
820 844
821 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; 845 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
@@ -1152,24 +1176,26 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1152static 1176static
1153struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) 1177struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
1154{ 1178{
1179 struct ath_common *common = ath5k_hw_common(sc->ah);
1155 struct sk_buff *skb; 1180 struct sk_buff *skb;
1156 1181
1157 /* 1182 /*
1158 * Allocate buffer with headroom_needed space for the 1183 * Allocate buffer with headroom_needed space for the
1159 * fake physical layer header at the start. 1184 * fake physical layer header at the start.
1160 */ 1185 */
1161 skb = ath_rxbuf_alloc(&sc->common, 1186 skb = ath_rxbuf_alloc(common,
1162 sc->rxbufsize + sc->common.cachelsz - 1, 1187 common->rx_bufsize,
1163 GFP_ATOMIC); 1188 GFP_ATOMIC);
1164 1189
1165 if (!skb) { 1190 if (!skb) {
1166 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", 1191 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
1167 sc->rxbufsize + sc->common.cachelsz - 1); 1192 common->rx_bufsize);
1168 return NULL; 1193 return NULL;
1169 } 1194 }
1170 1195
1171 *skb_addr = pci_map_single(sc->pdev, 1196 *skb_addr = pci_map_single(sc->pdev,
1172 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); 1197 skb->data, common->rx_bufsize,
1198 PCI_DMA_FROMDEVICE);
1173 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { 1199 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
1174 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 1200 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
1175 dev_kfree_skb(skb); 1201 dev_kfree_skb(skb);
@@ -1605,13 +1631,14 @@ static int
1605ath5k_rx_start(struct ath5k_softc *sc) 1631ath5k_rx_start(struct ath5k_softc *sc)
1606{ 1632{
1607 struct ath5k_hw *ah = sc->ah; 1633 struct ath5k_hw *ah = sc->ah;
1634 struct ath_common *common = ath5k_hw_common(ah);
1608 struct ath5k_buf *bf; 1635 struct ath5k_buf *bf;
1609 int ret; 1636 int ret;
1610 1637
1611 sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->common.cachelsz); 1638 common->rx_bufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
1612 1639
1613 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", 1640 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
1614 sc->common.cachelsz, sc->rxbufsize); 1641 common->cachelsz, common->rx_bufsize);
1615 1642
1616 spin_lock_bh(&sc->rxbuflock); 1643 spin_lock_bh(&sc->rxbuflock);
1617 sc->rxlink = NULL; 1644 sc->rxlink = NULL;
@@ -1656,6 +1683,8 @@ static unsigned int
1656ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, 1683ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1657 struct sk_buff *skb, struct ath5k_rx_status *rs) 1684 struct sk_buff *skb, struct ath5k_rx_status *rs)
1658{ 1685{
1686 struct ath5k_hw *ah = sc->ah;
1687 struct ath_common *common = ath5k_hw_common(ah);
1659 struct ieee80211_hdr *hdr = (void *)skb->data; 1688 struct ieee80211_hdr *hdr = (void *)skb->data;
1660 unsigned int keyix, hlen; 1689 unsigned int keyix, hlen;
1661 1690
@@ -1672,7 +1701,7 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1672 skb->len >= hlen + 4) { 1701 skb->len >= hlen + 4) {
1673 keyix = skb->data[hlen + 3] >> 6; 1702 keyix = skb->data[hlen + 3] >> 6;
1674 1703
1675 if (test_bit(keyix, sc->keymap)) 1704 if (test_bit(keyix, common->keymap))
1676 return RX_FLAG_DECRYPTED; 1705 return RX_FLAG_DECRYPTED;
1677 } 1706 }
1678 1707
@@ -1684,13 +1713,14 @@ static void
1684ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb, 1713ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1685 struct ieee80211_rx_status *rxs) 1714 struct ieee80211_rx_status *rxs)
1686{ 1715{
1716 struct ath_common *common = ath5k_hw_common(sc->ah);
1687 u64 tsf, bc_tstamp; 1717 u64 tsf, bc_tstamp;
1688 u32 hw_tu; 1718 u32 hw_tu;
1689 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1719 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1690 1720
1691 if (ieee80211_is_beacon(mgmt->frame_control) && 1721 if (ieee80211_is_beacon(mgmt->frame_control) &&
1692 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1722 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1693 memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) { 1723 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
1694 /* 1724 /*
1695 * Received an IBSS beacon with the same BSSID. Hardware *must* 1725 * Received an IBSS beacon with the same BSSID. Hardware *must*
1696 * have updated the local TSF. We have to work around various 1726 * have updated the local TSF. We have to work around various
@@ -1745,6 +1775,8 @@ ath5k_tasklet_rx(unsigned long data)
1745 struct sk_buff *skb, *next_skb; 1775 struct sk_buff *skb, *next_skb;
1746 dma_addr_t next_skb_addr; 1776 dma_addr_t next_skb_addr;
1747 struct ath5k_softc *sc = (void *)data; 1777 struct ath5k_softc *sc = (void *)data;
1778 struct ath5k_hw *ah = sc->ah;
1779 struct ath_common *common = ath5k_hw_common(ah);
1748 struct ath5k_buf *bf; 1780 struct ath5k_buf *bf;
1749 struct ath5k_desc *ds; 1781 struct ath5k_desc *ds;
1750 int ret; 1782 int ret;
@@ -1822,7 +1854,7 @@ accept:
1822 if (!next_skb) 1854 if (!next_skb)
1823 goto next; 1855 goto next;
1824 1856
1825 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, 1857 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
1826 PCI_DMA_FROMDEVICE); 1858 PCI_DMA_FROMDEVICE);
1827 skb_put(skb, rs.rs_datalen); 1859 skb_put(skb, rs.rs_datalen);
1828 1860
@@ -3008,6 +3040,8 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3008 struct ieee80211_key_conf *key) 3040 struct ieee80211_key_conf *key)
3009{ 3041{
3010 struct ath5k_softc *sc = hw->priv; 3042 struct ath5k_softc *sc = hw->priv;
3043 struct ath5k_hw *ah = sc->ah;
3044 struct ath_common *common = ath5k_hw_common(ah);
3011 int ret = 0; 3045 int ret = 0;
3012 3046
3013 if (modparam_nohwcrypt) 3047 if (modparam_nohwcrypt)
@@ -3040,14 +3074,14 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3040 ATH5K_ERR(sc, "can't set the key\n"); 3074 ATH5K_ERR(sc, "can't set the key\n");
3041 goto unlock; 3075 goto unlock;
3042 } 3076 }
3043 __set_bit(key->keyidx, sc->keymap); 3077 __set_bit(key->keyidx, common->keymap);
3044 key->hw_key_idx = key->keyidx; 3078 key->hw_key_idx = key->keyidx;
3045 key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV | 3079 key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV |
3046 IEEE80211_KEY_FLAG_GENERATE_MMIC); 3080 IEEE80211_KEY_FLAG_GENERATE_MMIC);
3047 break; 3081 break;
3048 case DISABLE_KEY: 3082 case DISABLE_KEY:
3049 ath5k_hw_reset_key(sc->ah, key->keyidx); 3083 ath5k_hw_reset_key(sc->ah, key->keyidx);
3050 __clear_bit(key->keyidx, sc->keymap); 3084 __clear_bit(key->keyidx, common->keymap);
3051 break; 3085 break;
3052 default: 3086 default:
3053 ret = -EINVAL; 3087 ret = -EINVAL;
@@ -3176,6 +3210,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3176{ 3210{
3177 struct ath5k_softc *sc = hw->priv; 3211 struct ath5k_softc *sc = hw->priv;
3178 struct ath5k_hw *ah = sc->ah; 3212 struct ath5k_hw *ah = sc->ah;
3213 struct ath_common *common = ath5k_hw_common(ah);
3179 unsigned long flags; 3214 unsigned long flags;
3180 3215
3181 mutex_lock(&sc->lock); 3216 mutex_lock(&sc->lock);
@@ -3184,10 +3219,9 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3184 3219
3185 if (changes & BSS_CHANGED_BSSID) { 3220 if (changes & BSS_CHANGED_BSSID) {
3186 /* Cache for later use during resets */ 3221 /* Cache for later use during resets */
3187 memcpy(ah->ah_bssid, bss_conf->bssid, ETH_ALEN); 3222 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3188 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have 3223 common->curaid = 0;
3189 * a clean way of letting us retrieve this yet. */ 3224 ath5k_hw_set_associd(ah);
3190 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
3191 mmiowb(); 3225 mmiowb();
3192 } 3226 }
3193 3227
@@ -3200,6 +3234,14 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3200 set_beacon_filter(hw, sc->assoc); 3234 set_beacon_filter(hw, sc->assoc);
3201 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3235 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3202 AR5K_LED_ASSOC : AR5K_LED_INIT); 3236 AR5K_LED_ASSOC : AR5K_LED_INIT);
3237 if (bss_conf->assoc) {
3238 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3239 "Bss Info ASSOC %d, bssid: %pM\n",
3240 bss_conf->aid, common->curbssid);
3241 common->curaid = bss_conf->aid;
3242 ath5k_hw_set_associd(ah);
3243 /* Once ANI is available you would start it here */
3244 }
3203 } 3245 }
3204 3246
3205 if (changes & BSS_CHANGED_BEACON) { 3247 if (changes & BSS_CHANGED_BEACON) {
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index a28c42f32c9d..b72338c9bde7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -115,7 +115,6 @@ struct ath5k_rfkill {
115 * associated with an instance of a device */ 115 * associated with an instance of a device */
116struct ath5k_softc { 116struct ath5k_softc {
117 struct pci_dev *pdev; /* for dma mapping */ 117 struct pci_dev *pdev; /* for dma mapping */
118 struct ath_common common;
119 void __iomem *iobase; /* address of the device */ 118 void __iomem *iobase; /* address of the device */
120 struct mutex lock; /* dev-level lock */ 119 struct mutex lock; /* dev-level lock */
121 struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES]; 120 struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
@@ -154,8 +153,6 @@ struct ath5k_softc {
154 153
155 enum ath5k_int imask; /* interrupt mask copy */ 154 enum ath5k_int imask; /* interrupt mask copy */
156 155
157 DECLARE_BITMAP(keymap, AR5K_KEYCACHE_SIZE); /* key use bit map */
158
159 u8 bssidmask[ETH_ALEN]; 156 u8 bssidmask[ETH_ALEN];
160 157
161 unsigned int led_pin, /* GPIO pin for driving LED */ 158 unsigned int led_pin, /* GPIO pin for driving LED */
@@ -202,15 +199,4 @@ struct ath5k_softc {
202#define ath5k_hw_hasveol(_ah) \ 199#define ath5k_hw_hasveol(_ah) \
203 (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0) 200 (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0)
204 201
205static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
206{
207 return &ah->ah_sc->common;
208}
209
210static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
211{
212 return &(ath5k_hw_common(ah)->regulatory);
213
214}
215
216#endif 202#endif
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 18eb5190ce4b..8fa439308828 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -560,8 +560,8 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
560 { AR5K_SLEEP0, 0x0002aaaa }, 560 { AR5K_SLEEP0, 0x0002aaaa },
561 { AR5K_SLEEP1, 0x02005555 }, 561 { AR5K_SLEEP1, 0x02005555 },
562 { AR5K_SLEEP2, 0x00000000 }, 562 { AR5K_SLEEP2, 0x00000000 },
563 { AR5K_BSS_IDM0, 0xffffffff }, 563 { AR_BSSMSKL, 0xffffffff },
564 { AR5K_BSS_IDM1, 0x0000ffff }, 564 { AR_BSSMSKU, 0x0000ffff },
565 { AR5K_TXPC, 0x00000000 }, 565 { AR5K_TXPC, 0x00000000 },
566 { AR5K_PROFCNT_TX, 0x00000000 }, 566 { AR5K_PROFCNT_TX, 0x00000000 },
567 { AR5K_PROFCNT_RX, 0x00000000 }, 567 { AR5K_PROFCNT_RX, 0x00000000 },
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index b548c8eaaae1..d495890355d9 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -59,6 +59,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
59 { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) }, 59 { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) },
60 /* Acer Aspire One A150 (maximlevitsky@gmail.com) */ 60 /* Acer Aspire One A150 (maximlevitsky@gmail.com) */
61 { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) }, 61 { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) },
62 /* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */
63 { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) },
62 /* Acer Ferrari 5000 (russ.dill@gmail.com) */ 64 /* Acer Ferrari 5000 (russ.dill@gmail.com) */
63 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, 65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
64 /* E-machines E510 (tuliom@gmail.com) */ 66 /* E-machines E510 (tuliom@gmail.com) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 2942f13c9c4a..64fc1eb9b6d9 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -24,6 +24,8 @@
24* Protocol Control Unit Functions * 24* Protocol Control Unit Functions *
25\*********************************/ 25\*********************************/
26 26
27#include <asm/unaligned.h>
28
27#include "ath5k.h" 29#include "ath5k.h"
28#include "reg.h" 30#include "reg.h"
29#include "debug.h" 31#include "debug.h"
@@ -44,6 +46,7 @@
44 */ 46 */
45int ath5k_hw_set_opmode(struct ath5k_hw *ah) 47int ath5k_hw_set_opmode(struct ath5k_hw *ah)
46{ 48{
49 struct ath_common *common = ath5k_hw_common(ah);
47 u32 pcu_reg, beacon_reg, low_id, high_id; 50 u32 pcu_reg, beacon_reg, low_id, high_id;
48 51
49 52
@@ -95,8 +98,8 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
95 /* 98 /*
96 * Set PCU registers 99 * Set PCU registers
97 */ 100 */
98 low_id = AR5K_LOW_ID(ah->ah_sta_id); 101 low_id = get_unaligned_le32(common->macaddr);
99 high_id = AR5K_HIGH_ID(ah->ah_sta_id); 102 high_id = get_unaligned_le16(common->macaddr + 4);
100 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); 103 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
101 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); 104 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
102 105
@@ -238,28 +241,6 @@ int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
238 return 0; 241 return 0;
239} 242}
240 243
241
242/****************\
243* BSSID handling *
244\****************/
245
246/**
247 * ath5k_hw_get_lladdr - Get station id
248 *
249 * @ah: The &struct ath5k_hw
250 * @mac: The card's mac address
251 *
252 * Initialize ah->ah_sta_id using the mac address provided
253 * (just a memcpy).
254 *
255 * TODO: Remove it once we merge ath5k_softc and ath5k_hw
256 */
257void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
258{
259 ATH5K_TRACE(ah->ah_sc);
260 memcpy(mac, ah->ah_sta_id, ETH_ALEN);
261}
262
263/** 244/**
264 * ath5k_hw_set_lladdr - Set station id 245 * ath5k_hw_set_lladdr - Set station id
265 * 246 *
@@ -270,17 +251,18 @@ void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
270 */ 251 */
271int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) 252int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
272{ 253{
254 struct ath_common *common = ath5k_hw_common(ah);
273 u32 low_id, high_id; 255 u32 low_id, high_id;
274 u32 pcu_reg; 256 u32 pcu_reg;
275 257
276 ATH5K_TRACE(ah->ah_sc); 258 ATH5K_TRACE(ah->ah_sc);
277 /* Set new station ID */ 259 /* Set new station ID */
278 memcpy(ah->ah_sta_id, mac, ETH_ALEN); 260 memcpy(common->macaddr, mac, ETH_ALEN);
279 261
280 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; 262 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
281 263
282 low_id = AR5K_LOW_ID(mac); 264 low_id = get_unaligned_le32(mac);
283 high_id = AR5K_HIGH_ID(mac); 265 high_id = get_unaligned_le16(mac + 4);
284 266
285 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); 267 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
286 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); 268 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
@@ -297,159 +279,51 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
297 * 279 *
298 * Sets the BSSID which trigers the "SME Join" operation 280 * Sets the BSSID which trigers the "SME Join" operation
299 */ 281 */
300void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id) 282void ath5k_hw_set_associd(struct ath5k_hw *ah)
301{ 283{
302 u32 low_id, high_id; 284 struct ath_common *common = ath5k_hw_common(ah);
303 u16 tim_offset = 0; 285 u16 tim_offset = 0;
304 286
305 /* 287 /*
306 * Set simple BSSID mask on 5212 288 * Set simple BSSID mask on 5212
307 */ 289 */
308 if (ah->ah_version == AR5K_AR5212) { 290 if (ah->ah_version == AR5K_AR5212)
309 ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_bssid_mask), 291 ath_hw_setbssidmask(common);
310 AR5K_BSS_IDM0);
311 ath5k_hw_reg_write(ah, AR5K_HIGH_ID(ah->ah_bssid_mask),
312 AR5K_BSS_IDM1);
313 }
314 292
315 /* 293 /*
316 * Set BSSID which triggers the "SME Join" operation 294 * Set BSSID which triggers the "SME Join" operation
317 */ 295 */
318 low_id = AR5K_LOW_ID(bssid); 296 ath5k_hw_reg_write(ah,
319 high_id = AR5K_HIGH_ID(bssid); 297 get_unaligned_le32(common->curbssid),
320 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0); 298 AR5K_BSS_ID0);
321 ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) << 299 ath5k_hw_reg_write(ah,
322 AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1); 300 get_unaligned_le16(common->curbssid + 4) |
323 301 ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
324 if (assoc_id == 0) { 302 AR5K_BSS_ID1);
303
304 if (common->curaid == 0) {
325 ath5k_hw_disable_pspoll(ah); 305 ath5k_hw_disable_pspoll(ah);
326 return; 306 return;
327 } 307 }
328 308
329 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM, 309 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
330 tim_offset ? tim_offset + 4 : 0); 310 tim_offset ? tim_offset + 4 : 0);
331 311
332 ath5k_hw_enable_pspoll(ah, NULL, 0); 312 ath5k_hw_enable_pspoll(ah, NULL, 0);
333} 313}
334 314
335/** 315void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
336 * ath5k_hw_set_bssid_mask - filter out bssids we listen
337 *
338 * @ah: the &struct ath5k_hw
339 * @mask: the bssid_mask, a u8 array of size ETH_ALEN
340 *
341 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
342 * which bits of the interface's MAC address should be looked at when trying
343 * to decide which packets to ACK. In station mode and AP mode with a single
344 * BSS every bit matters since we lock to only one BSS. In AP mode with
345 * multiple BSSes (virtual interfaces) not every bit matters because hw must
346 * accept frames for all BSSes and so we tweak some bits of our mac address
347 * in order to have multiple BSSes.
348 *
349 * NOTE: This is a simple filter and does *not* filter out all
350 * relevant frames. Some frames that are not for us might get ACKed from us
351 * by PCU because they just match the mask.
352 *
353 * When handling multiple BSSes you can get the BSSID mask by computing the
354 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
355 *
356 * When you do this you are essentially computing the common bits of all your
357 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
358 * the MAC address to obtain the relevant bits and compare the result with
359 * (frame's BSSID & mask) to see if they match.
360 */
361/*
362 * Simple example: on your card you have have two BSSes you have created with
363 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
364 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
365 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
366 *
367 * \
368 * MAC: 0001 |
369 * BSSID-01: 0100 | --> Belongs to us
370 * BSSID-02: 1001 |
371 * /
372 * -------------------
373 * BSSID-03: 0110 | --> External
374 * -------------------
375 *
376 * Our bssid_mask would then be:
377 *
378 * On loop iteration for BSSID-01:
379 * ~(0001 ^ 0100) -> ~(0101)
380 * -> 1010
381 * bssid_mask = 1010
382 *
383 * On loop iteration for BSSID-02:
384 * bssid_mask &= ~(0001 ^ 1001)
385 * bssid_mask = (1010) & ~(0001 ^ 1001)
386 * bssid_mask = (1010) & ~(1001)
387 * bssid_mask = (1010) & (0110)
388 * bssid_mask = 0010
389 *
390 * A bssid_mask of 0010 means "only pay attention to the second least
391 * significant bit". This is because its the only bit common
392 * amongst the MAC and all BSSIDs we support. To findout what the real
393 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
394 * or our MAC address (we assume the hardware uses the MAC address).
395 *
396 * Now, suppose there's an incoming frame for BSSID-03:
397 *
398 * IFRAME-01: 0110
399 *
400 * An easy eye-inspeciton of this already should tell you that this frame
401 * will not pass our check. This is beacuse the bssid_mask tells the
402 * hardware to only look at the second least significant bit and the
403 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
404 * as 1, which does not match 0.
405 *
406 * So with IFRAME-01 we *assume* the hardware will do:
407 *
408 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
409 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
410 * --> allow = (0010) == 0000 ? 1 : 0;
411 * --> allow = 0
412 *
413 * Lets now test a frame that should work:
414 *
415 * IFRAME-02: 0001 (we should allow)
416 *
417 * allow = (0001 & 1010) == 1010
418 *
419 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
420 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
421 * --> allow = (0010) == (0010)
422 * --> allow = 1
423 *
424 * Other examples:
425 *
426 * IFRAME-03: 0100 --> allowed
427 * IFRAME-04: 1001 --> allowed
428 * IFRAME-05: 1101 --> allowed but its not for us!!!
429 *
430 */
431int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
432{ 316{
433 u32 low_id, high_id; 317 struct ath_common *common = ath5k_hw_common(ah);
434 ATH5K_TRACE(ah->ah_sc); 318 ATH5K_TRACE(ah->ah_sc);
435 319
436 /* Cache bssid mask so that we can restore it 320 /* Cache bssid mask so that we can restore it
437 * on reset */ 321 * on reset */
438 memcpy(ah->ah_bssid_mask, mask, ETH_ALEN); 322 memcpy(common->bssidmask, mask, ETH_ALEN);
439 if (ah->ah_version == AR5K_AR5212) { 323 if (ah->ah_version == AR5K_AR5212)
440 low_id = AR5K_LOW_ID(mask); 324 ath_hw_setbssidmask(common);
441 high_id = AR5K_HIGH_ID(mask);
442
443 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
444 ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
445
446 return 0;
447 }
448
449 return -EIO;
450} 325}
451 326
452
453/************\ 327/************\
454* RX Control * 328* RX Control *
455\************/ 329\************/
@@ -1157,14 +1031,17 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
1157 /* Invalid entry (key table overflow) */ 1031 /* Invalid entry (key table overflow) */
1158 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); 1032 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
1159 1033
1160 /* MAC may be NULL if it's a broadcast key. In this case no need to 1034 /*
1161 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */ 1035 * MAC may be NULL if it's a broadcast key. In this case no need to
1036 * to compute get_unaligned_le32 and get_unaligned_le16 as we
1037 * already know it.
1038 */
1162 if (!mac) { 1039 if (!mac) {
1163 low_id = 0xffffffff; 1040 low_id = 0xffffffff;
1164 high_id = 0xffff | AR5K_KEYTABLE_VALID; 1041 high_id = 0xffff | AR5K_KEYTABLE_VALID;
1165 } else { 1042 } else {
1166 low_id = AR5K_LOW_ID(mac); 1043 low_id = get_unaligned_le32(mac);
1167 high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID; 1044 high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID;
1168 } 1045 }
1169 1046
1170 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry)); 1047 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a039f2bd732..bbfdcd5e7cb1 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1124,77 +1124,148 @@ ath5k_hw_calibration_poll(struct ath5k_hw *ah)
1124 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION; 1124 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
1125 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); 1125 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
1126 } 1126 }
1127}
1127 1128
1129static int sign_extend(int val, const int nbits)
1130{
1131 int order = BIT(nbits-1);
1132 return (val ^ order) - order;
1128} 1133}
1129 1134
1130/** 1135static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1131 * ath5k_hw_noise_floor_calibration - perform PHY noise floor calibration 1136{
1132 * 1137 s32 val;
1133 * @ah: struct ath5k_hw pointer we are operating on 1138
1134 * @freq: the channel frequency, just used for error logging 1139 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
1135 * 1140 return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
1136 * This function performs a noise floor calibration of the PHY and waits for 1141}
1137 * it to complete. Then the noise floor value is compared to some maximum 1142
1138 * noise floor we consider valid. 1143void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
1139 * 1144{
1140 * Note that this is different from what the madwifi HAL does: it reads the 1145 int i;
1141 * noise floor and afterwards initiates the calibration. Since the noise floor 1146
1142 * calibration can take some time to finish, depending on the current channel 1147 ah->ah_nfcal_hist.index = 0;
1143 * use, that avoids the occasional timeout warnings we are seeing now. 1148 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
1144 * 1149 ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1145 * See the following link for an Atheros patent on noise floor calibration: 1150}
1146 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \ 1151
1147 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7 1152static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
1153{
1154 struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
1155 hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1);
1156 hist->nfval[hist->index] = noise_floor;
1157}
1158
1159static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1160{
1161 s16 sort[ATH5K_NF_CAL_HIST_MAX];
1162 s16 tmp;
1163 int i, j;
1164
1165 memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
1166 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
1167 for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
1168 if (sort[j] > sort[j-1]) {
1169 tmp = sort[j];
1170 sort[j] = sort[j-1];
1171 sort[j-1] = tmp;
1172 }
1173 }
1174 }
1175 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
1176 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1177 "cal %d:%d\n", i, sort[i]);
1178 }
1179 return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2];
1180}
1181
1182/*
1183 * When we tell the hardware to perform a noise floor calibration
1184 * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
1185 * sample-and-hold the minimum noise level seen at the antennas.
1186 * This value is then stored in a ring buffer of recently measured
1187 * noise floor values so we have a moving window of the last few
1188 * samples.
1148 * 1189 *
1149 * XXX: Since during noise floor calibration antennas are detached according to 1190 * The median of the values in the history is then loaded into the
1150 * the patent, we should stop tx queues here. 1191 * hardware for its own use for RSSI and CCA measurements.
1151 */ 1192 */
1152int 1193void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1153ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1154{ 1194{
1155 int ret; 1195 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1156 unsigned int i; 1196 u32 val;
1157 s32 noise_floor; 1197 s16 nf, threshold;
1198 u8 ee_mode;
1158 1199
1159 /* 1200 /* keep last value if calibration hasn't completed */
1160 * Enable noise floor calibration 1201 if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
1161 */ 1202 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1162 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1203 "NF did not complete in calibration window\n");
1163 AR5K_PHY_AGCCTL_NF);
1164 1204
1165 ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, 1205 return;
1166 AR5K_PHY_AGCCTL_NF, 0, false);
1167 if (ret) {
1168 ATH5K_ERR(ah->ah_sc,
1169 "noise floor calibration timeout (%uMHz)\n", freq);
1170 return -EAGAIN;
1171 } 1206 }
1172 1207
1173 /* Wait until the noise floor is calibrated and read the value */ 1208 switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) {
1174 for (i = 20; i > 0; i--) { 1209 case CHANNEL_A:
1175 mdelay(1); 1210 case CHANNEL_T:
1176 noise_floor = ath5k_hw_reg_read(ah, AR5K_PHY_NF); 1211 case CHANNEL_XR:
1177 noise_floor = AR5K_PHY_NF_RVAL(noise_floor); 1212 ee_mode = AR5K_EEPROM_MODE_11A;
1178 if (noise_floor & AR5K_PHY_NF_ACTIVE) { 1213 break;
1179 noise_floor = AR5K_PHY_NF_AVAL(noise_floor); 1214 case CHANNEL_G:
1180 1215 case CHANNEL_TG:
1181 if (noise_floor <= AR5K_TUNE_NOISE_FLOOR) 1216 ee_mode = AR5K_EEPROM_MODE_11G;
1182 break; 1217 break;
1183 } 1218 default:
1219 case CHANNEL_B:
1220 ee_mode = AR5K_EEPROM_MODE_11B;
1221 break;
1184 } 1222 }
1185 1223
1186 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1187 "noise floor %d\n", noise_floor);
1188 1224
1189 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) { 1225 /* completed NF calibration, test threshold */
1190 ATH5K_ERR(ah->ah_sc, 1226 nf = ath5k_hw_read_measured_noise_floor(ah);
1191 "noise floor calibration failed (%uMHz)\n", freq); 1227 threshold = ee->ee_noise_floor_thr[ee_mode];
1192 return -EAGAIN; 1228
1229 if (nf > threshold) {
1230 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1231 "noise floor failure detected; "
1232 "read %d, threshold %d\n",
1233 nf, threshold);
1234
1235 nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1193 } 1236 }
1194 1237
1195 ah->ah_noise_floor = noise_floor; 1238 ath5k_hw_update_nfcal_hist(ah, nf);
1239 nf = ath5k_hw_get_median_noise_floor(ah);
1196 1240
1197 return 0; 1241 /* load noise floor (in .5 dBm) so the hardware will use it */
1242 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
1243 val |= (nf * 2) & AR5K_PHY_NF_M;
1244 ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
1245
1246 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
1247 ~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
1248
1249 ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
1250 0, false);
1251
1252 /*
1253 * Load a high max CCA Power value (-50 dBm in .5 dBm units)
1254 * so that we're not capped by the median we just loaded.
1255 * This will be used as the initial value for the next noise
1256 * floor calibration.
1257 */
1258 val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
1259 ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
1260 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1261 AR5K_PHY_AGCCTL_NF_EN |
1262 AR5K_PHY_AGCCTL_NF_NOUPDATE |
1263 AR5K_PHY_AGCCTL_NF);
1264
1265 ah->ah_noise_floor = nf;
1266
1267 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1268 "noise floor calibrated: %d\n", nf);
1198} 1269}
1199 1270
1200/* 1271/*
@@ -1287,7 +1358,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1287 return ret; 1358 return ret;
1288 } 1359 }
1289 1360
1290 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1361 ath5k_hw_update_noise_floor(ah);
1291 1362
1292 /* 1363 /*
1293 * Re-enable RX/TX and beacons 1364 * Re-enable RX/TX and beacons
@@ -1328,7 +1399,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1328 if (i_coffd == 0 || q_coffd == 0) 1399 if (i_coffd == 0 || q_coffd == 0)
1329 goto done; 1400 goto done;
1330 1401
1331 i_coff = ((-iq_corr) / i_coffd) & 0x3f; 1402 i_coff = ((-iq_corr) / i_coffd);
1332 1403
1333 /* Boundary check */ 1404 /* Boundary check */
1334 if (i_coff > 31) 1405 if (i_coff > 31)
@@ -1336,7 +1407,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1336 if (i_coff < -32) 1407 if (i_coff < -32)
1337 i_coff = -32; 1408 i_coff = -32;
1338 1409
1339 q_coff = (((s32)i_pwr / q_coffd) - 128) & 0x1f; 1410 q_coff = (((s32)i_pwr / q_coffd) - 128);
1340 1411
1341 /* Boundary check */ 1412 /* Boundary check */
1342 if (q_coff > 15) 1413 if (q_coff > 15)
@@ -1360,7 +1431,7 @@ done:
1360 * since noise floor calibration interrupts rx path while I/Q 1431 * since noise floor calibration interrupts rx path while I/Q
1361 * calibration doesn't. We don't need to run noise floor calibration 1432 * calibration doesn't. We don't need to run noise floor calibration
1362 * as often as I/Q calibration.*/ 1433 * as often as I/Q calibration.*/
1363 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1434 ath5k_hw_update_noise_floor(ah);
1364 1435
1365 /* Initiate a gain_F calibration */ 1436 /* Initiate a gain_F calibration */
1366 ath5k_hw_request_rfgain_probe(ah); 1437 ath5k_hw_request_rfgain_probe(ah);
@@ -2954,8 +3025,6 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2954 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower); 3025 ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
2955 return -EINVAL; 3026 return -EINVAL;
2956 } 3027 }
2957 if (txpower == 0)
2958 txpower = AR5K_TUNE_DEFAULT_TXPOWER;
2959 3028
2960 /* Reset TX power values */ 3029 /* Reset TX power values */
2961 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 3030 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index c63ea6afd96f..4cb9c5df9f46 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -35,7 +35,7 @@
35 * released by Atheros and on various debug messages found on the net. 35 * released by Atheros and on various debug messages found on the net.
36 */ 36 */
37 37
38 38#include "../reg.h"
39 39
40/*====MAC DMA REGISTERS====*/ 40/*====MAC DMA REGISTERS====*/
41 41
@@ -1650,12 +1650,6 @@
1650#define AR5K_SLEEP2_DTIM_PER_S 16 1650#define AR5K_SLEEP2_DTIM_PER_S 16
1651 1651
1652/* 1652/*
1653 * BSSID mask registers
1654 */
1655#define AR5K_BSS_IDM0 0x80e0 /* Upper bits */
1656#define AR5K_BSS_IDM1 0x80e4 /* Lower bits */
1657
1658/*
1659 * TX power control (TPC) register 1653 * TX power control (TPC) register
1660 * 1654 *
1661 * XXX: PCDAC steps (0.5dbm) or DBM ? 1655 * XXX: PCDAC steps (0.5dbm) or DBM ?
@@ -2039,17 +2033,14 @@
2039#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */ 2033#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */
2040 2034
2041/* 2035/*
2042 * PHY noise floor status register 2036 * PHY noise floor status register (CCA = Clear Channel Assessment)
2043 */ 2037 */
2044#define AR5K_PHY_NF 0x9864 /* Register address */ 2038#define AR5K_PHY_NF 0x9864 /* Register address */
2045#define AR5K_PHY_NF_M 0x000001ff /* Noise floor mask */ 2039#define AR5K_PHY_NF_M 0x000001ff /* Noise floor, written to hardware in 1/2 dBm units */
2046#define AR5K_PHY_NF_ACTIVE 0x00000100 /* Noise floor calibration still active */ 2040#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
2047#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
2048#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
2049#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
2050#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */ 2041#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */
2051#define AR5K_PHY_NF_THRESH62_S 12 2042#define AR5K_PHY_NF_THRESH62_S 12
2052#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* ??? */ 2043#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* Minimum measured noise level, read from hardware in 1 dBm units */
2053#define AR5K_PHY_NF_MINCCA_PWR_S 19 2044#define AR5K_PHY_NF_MINCCA_PWR_S 19
2054 2045
2055/* 2046/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 34e13c700849..62954fc77869 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -25,6 +25,8 @@
25 Reset functions and helpers 25 Reset functions and helpers
26\*****************************/ 26\*****************************/
27 27
28#include <asm/unaligned.h>
29
28#include <linux/pci.h> /* To determine if a card is pci-e */ 30#include <linux/pci.h> /* To determine if a card is pci-e */
29#include <linux/log2.h> 31#include <linux/log2.h>
30#include "ath5k.h" 32#include "ath5k.h"
@@ -870,6 +872,7 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
870int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 872int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
871 struct ieee80211_channel *channel, bool change_channel) 873 struct ieee80211_channel *channel, bool change_channel)
872{ 874{
875 struct ath_common *common = ath5k_hw_common(ah);
873 u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo; 876 u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo;
874 u32 phy_tst1; 877 u32 phy_tst1;
875 u8 mode, freq, ee_mode, ant[2]; 878 u8 mode, freq, ee_mode, ant[2];
@@ -1171,10 +1174,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1171 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO); 1174 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
1172 1175
1173 /* Restore sta_id flags and preserve our mac address*/ 1176 /* Restore sta_id flags and preserve our mac address*/
1174 ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_sta_id), 1177 ath5k_hw_reg_write(ah,
1175 AR5K_STA_ID0); 1178 get_unaligned_le32(common->macaddr),
1176 ath5k_hw_reg_write(ah, staid1_flags | AR5K_HIGH_ID(ah->ah_sta_id), 1179 AR5K_STA_ID0);
1177 AR5K_STA_ID1); 1180 ath5k_hw_reg_write(ah,
1181 staid1_flags | get_unaligned_le16(common->macaddr + 4),
1182 AR5K_STA_ID1);
1178 1183
1179 1184
1180 /* 1185 /*
@@ -1182,8 +1187,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1182 */ 1187 */
1183 1188
1184 /* Restore bssid and bssid mask */ 1189 /* Restore bssid and bssid mask */
1185 /* XXX: add ah->aid once mac80211 gives this to us */ 1190 ath5k_hw_set_associd(ah);
1186 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1187 1191
1188 /* Set PCU config */ 1192 /* Set PCU config */
1189 ath5k_hw_set_opmode(ah); 1193 ath5k_hw_set_opmode(ah);
@@ -1289,7 +1293,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1289 * out and/or noise floor calibration might timeout. 1293 * out and/or noise floor calibration might timeout.
1290 */ 1294 */
1291 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1295 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1292 AR5K_PHY_AGCCTL_CAL); 1296 AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
1293 1297
1294 /* At the same time start I/Q calibration for QAM constellation 1298 /* At the same time start I/Q calibration for QAM constellation
1295 * -no need for CCK- */ 1299 * -no need for CCK- */
@@ -1310,21 +1314,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1310 channel->center_freq); 1314 channel->center_freq);
1311 } 1315 }
1312 1316
1313 /*
1314 * If we run NF calibration before AGC, it always times out.
1315 * Binary HAL starts NF and AGC calibration at the same time
1316 * and only waits for AGC to finish. Also if AGC or NF cal.
1317 * times out, reset doesn't fail on binary HAL. I believe
1318 * that's wrong because since rx path is routed to a detector,
1319 * if cal. doesn't finish we won't have RX. Sam's HAL for AR5210/5211
1320 * enables noise floor calibration after offset calibration and if noise
1321 * floor calibration fails, reset fails. I believe that's
1322 * a better approach, we just need to find a polling interval
1323 * that suits best, even if reset continues we need to make
1324 * sure that rx path is ready.
1325 */
1326 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1327
1328 /* Restore antenna mode */ 1317 /* Restore antenna mode */
1329 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 1318 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1330 1319
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index ef5f59c4dd80..006364f76bb4 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -1,9 +1,16 @@
1config ATH9K_HW
2 tristate
3config ATH9K_COMMON
4 tristate
5
1config ATH9K 6config ATH9K
2 tristate "Atheros 802.11n wireless cards support" 7 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 8 depends on PCI && MAC80211
9 select ATH9K_HW
4 select MAC80211_LEDS 10 select MAC80211_LEDS
5 select LEDS_CLASS 11 select LEDS_CLASS
6 select NEW_LEDS 12 select NEW_LEDS
13 select ATH9K_COMMON
7 ---help--- 14 ---help---
8 This module adds support for wireless adapters based on 15 This module adds support for wireless adapters based on
9 Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family 16 Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -16,6 +23,8 @@ config ATH9K
16 23
17 If you choose to build a module, it'll be called ath9k. 24 If you choose to build a module, it'll be called ath9k.
18 25
26if ATH_DEBUG
27
19config ATH9K_DEBUG 28config ATH9K_DEBUG
20 bool "Atheros ath9k debugging" 29 bool "Atheros ath9k debugging"
21 depends on ATH9K 30 depends on ATH9K
@@ -26,3 +35,5 @@ config ATH9K_DEBUG
26 modprobe ath9k debug=0x00000200 35 modprobe ath9k debug=0x00000200
27 36
28 Look in ath9k/debug.h for possible debug masks 37 Look in ath9k/debug.h for possible debug masks
38
39endif # ATH_DEBUG
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index ff2c9a26c10c..e53f9680a385 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,22 +1,28 @@
1ath9k-y += hw.o \ 1ath9k-y += beacon.o \
2 eeprom.o \
3 eeprom_def.o \
4 eeprom_4k.o \
5 eeprom_9287.o \
6 mac.o \
7 calib.o \
8 ani.o \
9 phy.o \
10 beacon.o \
11 main.o \ 2 main.o \
12 recv.o \ 3 recv.o \
13 xmit.o \ 4 xmit.o \
14 virtual.o \ 5 virtual.o \
15 rc.o \ 6 rc.o
16 btcoex.o
17 7
18ath9k-$(CONFIG_PCI) += pci.o 8ath9k-$(CONFIG_PCI) += pci.o
19ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o 9ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o
20ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o 10ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o
21 11
22obj-$(CONFIG_ATH9K) += ath9k.o 12obj-$(CONFIG_ATH9K) += ath9k.o
13
14ath9k_hw-y:= hw.o \
15 eeprom.o \
16 eeprom_def.o \
17 eeprom_4k.o \
18 eeprom_9287.o \
19 calib.o \
20 ani.o \
21 phy.o \
22 btcoex.o \
23 mac.o \
24
25obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
26
27obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
28ath9k_common-y:= common.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2ad7d0280f7a..329e6bc137ab 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -22,27 +22,29 @@
22#include "ath9k.h" 22#include "ath9k.h"
23 23
24/* return bus cachesize in 4B word units */ 24/* return bus cachesize in 4B word units */
25static void ath_ahb_read_cachesize(struct ath_softc *sc, int *csz) 25static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
26{ 26{
27 *csz = L1_CACHE_BYTES >> 2; 27 *csz = L1_CACHE_BYTES >> 2;
28} 28}
29 29
30static void ath_ahb_cleanup(struct ath_softc *sc) 30static void ath_ahb_cleanup(struct ath_common *common)
31{ 31{
32 struct ath_softc *sc = (struct ath_softc *)common->priv;
32 iounmap(sc->mem); 33 iounmap(sc->mem);
33} 34}
34 35
35static bool ath_ahb_eeprom_read(struct ath_hw *ah, u32 off, u16 *data) 36static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
36{ 37{
37 struct ath_softc *sc = ah->ah_sc; 38 struct ath_softc *sc = (struct ath_softc *)common->priv;
38 struct platform_device *pdev = to_platform_device(sc->dev); 39 struct platform_device *pdev = to_platform_device(sc->dev);
39 struct ath9k_platform_data *pdata; 40 struct ath9k_platform_data *pdata;
40 41
41 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data; 42 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
42 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { 43 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
43 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 44 ath_print(common, ATH_DBG_FATAL,
44 "%s: flash read failed, offset %08x is out of range\n", 45 "%s: flash read failed, offset %08x "
45 __func__, off); 46 "is out of range\n",
47 __func__, off);
46 return false; 48 return false;
47 } 49 }
48 50
@@ -67,6 +69,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
67 int irq; 69 int irq;
68 int ret = 0; 70 int ret = 0;
69 struct ath_hw *ah; 71 struct ath_hw *ah;
72 char hw_name[64];
70 73
71 if (!pdev->dev.platform_data) { 74 if (!pdev->dev.platform_data) {
72 dev_err(&pdev->dev, "no platform data specified\n"); 75 dev_err(&pdev->dev, "no platform data specified\n");
@@ -116,10 +119,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
116 sc->hw = hw; 119 sc->hw = hw;
117 sc->dev = &pdev->dev; 120 sc->dev = &pdev->dev;
118 sc->mem = mem; 121 sc->mem = mem;
119 sc->bus_ops = &ath_ahb_bus_ops;
120 sc->irq = irq; 122 sc->irq = irq;
121 123
122 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0); 124 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
123 if (ret) { 125 if (ret) {
124 dev_err(&pdev->dev, "failed to initialize device\n"); 126 dev_err(&pdev->dev, "failed to initialize device\n");
125 goto err_free_hw; 127 goto err_free_hw;
@@ -132,14 +134,11 @@ static int ath_ahb_probe(struct platform_device *pdev)
132 } 134 }
133 135
134 ah = sc->sc_ah; 136 ah = sc->sc_ah;
137 ath9k_hw_name(ah, hw_name, sizeof(hw_name));
135 printk(KERN_INFO 138 printk(KERN_INFO
136 "%s: Atheros AR%s MAC/BB Rev:%x, " 139 "%s: %s mem=0x%lx, irq=%d\n",
137 "AR%s RF Rev:%x, mem=0x%lx, irq=%d\n",
138 wiphy_name(hw->wiphy), 140 wiphy_name(hw->wiphy),
139 ath_mac_bb_name(ah->hw_version.macVersion), 141 hw_name,
140 ah->hw_version.macRev,
141 ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
142 ah->hw_version.phyRev,
143 (unsigned long)mem, irq); 142 (unsigned long)mem, irq);
144 143
145 return 0; 144 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2b493742ef10..2a0cd64c2bfb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, 19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
20 struct ath9k_channel *chan) 20 struct ath9k_channel *chan)
@@ -31,8 +31,8 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
31 } 31 }
32 } 32 }
33 33
34 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 34 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI,
35 "No more channel states left. Using channel 0\n"); 35 "No more channel states left. Using channel 0\n");
36 36
37 return 0; 37 return 0;
38} 38}
@@ -41,16 +41,17 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
41 enum ath9k_ani_cmd cmd, int param) 41 enum ath9k_ani_cmd cmd, int param)
42{ 42{
43 struct ar5416AniState *aniState = ah->curani; 43 struct ar5416AniState *aniState = ah->curani;
44 struct ath_common *common = ath9k_hw_common(ah);
44 45
45 switch (cmd & ah->ani_function) { 46 switch (cmd & ah->ani_function) {
46 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ 47 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
47 u32 level = param; 48 u32 level = param;
48 49
49 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { 50 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
50 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 51 ath_print(common, ATH_DBG_ANI,
51 "level out of range (%u > %u)\n", 52 "level out of range (%u > %u)\n",
52 level, 53 level,
53 (unsigned)ARRAY_SIZE(ah->totalSizeDesired)); 54 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
54 return false; 55 return false;
55 } 56 }
56 57
@@ -152,10 +153,10 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
152 u32 level = param; 153 u32 level = param;
153 154
154 if (level >= ARRAY_SIZE(firstep)) { 155 if (level >= ARRAY_SIZE(firstep)) {
155 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 156 ath_print(common, ATH_DBG_ANI,
156 "level out of range (%u > %u)\n", 157 "level out of range (%u > %u)\n",
157 level, 158 level,
158 (unsigned) ARRAY_SIZE(firstep)); 159 (unsigned) ARRAY_SIZE(firstep));
159 return false; 160 return false;
160 } 161 }
161 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 162 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
@@ -174,11 +175,10 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
174 u32 level = param; 175 u32 level = param;
175 176
176 if (level >= ARRAY_SIZE(cycpwrThr1)) { 177 if (level >= ARRAY_SIZE(cycpwrThr1)) {
177 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 178 ath_print(common, ATH_DBG_ANI,
178 "level out of range (%u > %u)\n", 179 "level out of range (%u > %u)\n",
179 level, 180 level,
180 (unsigned) 181 (unsigned) ARRAY_SIZE(cycpwrThr1));
181 ARRAY_SIZE(cycpwrThr1));
182 return false; 182 return false;
183 } 183 }
184 REG_RMW_FIELD(ah, AR_PHY_TIMING5, 184 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
@@ -194,25 +194,28 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
194 case ATH9K_ANI_PRESENT: 194 case ATH9K_ANI_PRESENT:
195 break; 195 break;
196 default: 196 default:
197 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 197 ath_print(common, ATH_DBG_ANI,
198 "invalid cmd %u\n", cmd); 198 "invalid cmd %u\n", cmd);
199 return false; 199 return false;
200 } 200 }
201 201
202 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "ANI parameters:\n"); 202 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
203 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 203 ath_print(common, ATH_DBG_ANI,
204 "noiseImmunityLevel=%d, spurImmunityLevel=%d, " 204 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
205 "ofdmWeakSigDetectOff=%d\n", 205 "ofdmWeakSigDetectOff=%d\n",
206 aniState->noiseImmunityLevel, aniState->spurImmunityLevel, 206 aniState->noiseImmunityLevel,
207 !aniState->ofdmWeakSigDetectOff); 207 aniState->spurImmunityLevel,
208 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 208 !aniState->ofdmWeakSigDetectOff);
209 "cckWeakSigThreshold=%d, " 209 ath_print(common, ATH_DBG_ANI,
210 "firstepLevel=%d, listenTime=%d\n", 210 "cckWeakSigThreshold=%d, "
211 aniState->cckWeakSigThreshold, aniState->firstepLevel, 211 "firstepLevel=%d, listenTime=%d\n",
212 aniState->listenTime); 212 aniState->cckWeakSigThreshold,
213 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 213 aniState->firstepLevel,
214 aniState->listenTime);
215 ath_print(common, ATH_DBG_ANI,
214 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", 216 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
215 aniState->cycleCount, aniState->ofdmPhyErrCount, 217 aniState->cycleCount,
218 aniState->ofdmPhyErrCount,
216 aniState->cckPhyErrCount); 219 aniState->cckPhyErrCount);
217 220
218 return true; 221 return true;
@@ -231,6 +234,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
231static void ath9k_ani_restart(struct ath_hw *ah) 234static void ath9k_ani_restart(struct ath_hw *ah)
232{ 235{
233 struct ar5416AniState *aniState; 236 struct ar5416AniState *aniState;
237 struct ath_common *common = ath9k_hw_common(ah);
234 238
235 if (!DO_ANI(ah)) 239 if (!DO_ANI(ah))
236 return; 240 return;
@@ -240,24 +244,24 @@ static void ath9k_ani_restart(struct ath_hw *ah)
240 244
241 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) { 245 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
242 aniState->ofdmPhyErrBase = 0; 246 aniState->ofdmPhyErrBase = 0;
243 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 247 ath_print(common, ATH_DBG_ANI,
244 "OFDM Trigger is too high for hw counters\n"); 248 "OFDM Trigger is too high for hw counters\n");
245 } else { 249 } else {
246 aniState->ofdmPhyErrBase = 250 aniState->ofdmPhyErrBase =
247 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh; 251 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
248 } 252 }
249 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) { 253 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
250 aniState->cckPhyErrBase = 0; 254 aniState->cckPhyErrBase = 0;
251 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 255 ath_print(common, ATH_DBG_ANI,
252 "CCK Trigger is too high for hw counters\n"); 256 "CCK Trigger is too high for hw counters\n");
253 } else { 257 } else {
254 aniState->cckPhyErrBase = 258 aniState->cckPhyErrBase =
255 AR_PHY_COUNTMAX - aniState->cckTrigHigh; 259 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
256 } 260 }
257 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 261 ath_print(common, ATH_DBG_ANI,
258 "Writing ofdmbase=%u cckbase=%u\n", 262 "Writing ofdmbase=%u cckbase=%u\n",
259 aniState->ofdmPhyErrBase, 263 aniState->ofdmPhyErrBase,
260 aniState->cckPhyErrBase); 264 aniState->cckPhyErrBase);
261 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); 265 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
262 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); 266 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
263 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 267 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
@@ -271,7 +275,7 @@ static void ath9k_ani_restart(struct ath_hw *ah)
271 275
272static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) 276static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
273{ 277{
274 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 278 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
275 struct ar5416AniState *aniState; 279 struct ar5416AniState *aniState;
276 int32_t rssi; 280 int32_t rssi;
277 281
@@ -343,7 +347,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
343 347
344static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) 348static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
345{ 349{
346 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 350 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
347 struct ar5416AniState *aniState; 351 struct ar5416AniState *aniState;
348 int32_t rssi; 352 int32_t rssi;
349 353
@@ -464,6 +468,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
464{ 468{
465 struct ar5416AniState *aniState; 469 struct ar5416AniState *aniState;
466 struct ath9k_channel *chan = ah->curchan; 470 struct ath9k_channel *chan = ah->curchan;
471 struct ath_common *common = ath9k_hw_common(ah);
467 int index; 472 int index;
468 473
469 if (!DO_ANI(ah)) 474 if (!DO_ANI(ah))
@@ -475,8 +480,8 @@ void ath9k_ani_reset(struct ath_hw *ah)
475 480
476 if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION 481 if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION
477 && ah->opmode != NL80211_IFTYPE_ADHOC) { 482 && ah->opmode != NL80211_IFTYPE_ADHOC) {
478 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 483 ath_print(common, ATH_DBG_ANI,
479 "Reset ANI state opmode %u\n", ah->opmode); 484 "Reset ANI state opmode %u\n", ah->opmode);
480 ah->stats.ast_ani_reset++; 485 ah->stats.ast_ani_reset++;
481 486
482 if (ah->opmode == NL80211_IFTYPE_AP) { 487 if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -543,6 +548,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
543 struct ath9k_channel *chan) 548 struct ath9k_channel *chan)
544{ 549{
545 struct ar5416AniState *aniState; 550 struct ar5416AniState *aniState;
551 struct ath_common *common = ath9k_hw_common(ah);
546 int32_t listenTime; 552 int32_t listenTime;
547 u32 phyCnt1, phyCnt2; 553 u32 phyCnt1, phyCnt2;
548 u32 ofdmPhyErrCnt, cckPhyErrCnt; 554 u32 ofdmPhyErrCnt, cckPhyErrCnt;
@@ -569,20 +575,22 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
569 if (phyCnt1 < aniState->ofdmPhyErrBase || 575 if (phyCnt1 < aniState->ofdmPhyErrBase ||
570 phyCnt2 < aniState->cckPhyErrBase) { 576 phyCnt2 < aniState->cckPhyErrBase) {
571 if (phyCnt1 < aniState->ofdmPhyErrBase) { 577 if (phyCnt1 < aniState->ofdmPhyErrBase) {
572 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 578 ath_print(common, ATH_DBG_ANI,
573 "phyCnt1 0x%x, resetting " 579 "phyCnt1 0x%x, resetting "
574 "counter value to 0x%x\n", 580 "counter value to 0x%x\n",
575 phyCnt1, aniState->ofdmPhyErrBase); 581 phyCnt1,
582 aniState->ofdmPhyErrBase);
576 REG_WRITE(ah, AR_PHY_ERR_1, 583 REG_WRITE(ah, AR_PHY_ERR_1,
577 aniState->ofdmPhyErrBase); 584 aniState->ofdmPhyErrBase);
578 REG_WRITE(ah, AR_PHY_ERR_MASK_1, 585 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
579 AR_PHY_ERR_OFDM_TIMING); 586 AR_PHY_ERR_OFDM_TIMING);
580 } 587 }
581 if (phyCnt2 < aniState->cckPhyErrBase) { 588 if (phyCnt2 < aniState->cckPhyErrBase) {
582 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 589 ath_print(common, ATH_DBG_ANI,
583 "phyCnt2 0x%x, resetting " 590 "phyCnt2 0x%x, resetting "
584 "counter value to 0x%x\n", 591 "counter value to 0x%x\n",
585 phyCnt2, aniState->cckPhyErrBase); 592 phyCnt2,
593 aniState->cckPhyErrBase);
586 REG_WRITE(ah, AR_PHY_ERR_2, 594 REG_WRITE(ah, AR_PHY_ERR_2,
587 aniState->cckPhyErrBase); 595 aniState->cckPhyErrBase);
588 REG_WRITE(ah, AR_PHY_ERR_MASK_2, 596 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
@@ -621,10 +629,13 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
621 } 629 }
622 } 630 }
623} 631}
632EXPORT_SYMBOL(ath9k_hw_ani_monitor);
624 633
625void ath9k_enable_mib_counters(struct ath_hw *ah) 634void ath9k_enable_mib_counters(struct ath_hw *ah)
626{ 635{
627 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable MIB counters\n"); 636 struct ath_common *common = ath9k_hw_common(ah);
637
638 ath_print(common, ATH_DBG_ANI, "Enable MIB counters\n");
628 639
629 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 640 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
630 641
@@ -640,7 +651,10 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
640/* Freeze the MIB counters, get the stats and then clear them */ 651/* Freeze the MIB counters, get the stats and then clear them */
641void ath9k_hw_disable_mib_counters(struct ath_hw *ah) 652void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
642{ 653{
643 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disable MIB counters\n"); 654 struct ath_common *common = ath9k_hw_common(ah);
655
656 ath_print(common, ATH_DBG_ANI, "Disable MIB counters\n");
657
644 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); 658 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
645 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 659 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
646 REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC); 660 REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC);
@@ -653,6 +667,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
653 u32 *rxf_pcnt, 667 u32 *rxf_pcnt,
654 u32 *txf_pcnt) 668 u32 *txf_pcnt)
655{ 669{
670 struct ath_common *common = ath9k_hw_common(ah);
656 static u32 cycles, rx_clear, rx_frame, tx_frame; 671 static u32 cycles, rx_clear, rx_frame, tx_frame;
657 u32 good = 1; 672 u32 good = 1;
658 673
@@ -662,8 +677,8 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
662 u32 cc = REG_READ(ah, AR_CCCNT); 677 u32 cc = REG_READ(ah, AR_CCCNT);
663 678
664 if (cycles == 0 || cycles > cc) { 679 if (cycles == 0 || cycles > cc) {
665 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 680 ath_print(common, ATH_DBG_ANI,
666 "cycle counter wrap. ExtBusy = 0\n"); 681 "cycle counter wrap. ExtBusy = 0\n");
667 good = 0; 682 good = 0;
668 } else { 683 } else {
669 u32 cc_d = cc - cycles; 684 u32 cc_d = cc - cycles;
@@ -742,6 +757,7 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
742 ath9k_ani_restart(ah); 757 ath9k_ani_restart(ah);
743 } 758 }
744} 759}
760EXPORT_SYMBOL(ath9k_hw_procmibevent);
745 761
746void ath9k_hw_ani_setup(struct ath_hw *ah) 762void ath9k_hw_ani_setup(struct ath_hw *ah)
747{ 763{
@@ -762,9 +778,10 @@ void ath9k_hw_ani_setup(struct ath_hw *ah)
762 778
763void ath9k_hw_ani_init(struct ath_hw *ah) 779void ath9k_hw_ani_init(struct ath_hw *ah)
764{ 780{
781 struct ath_common *common = ath9k_hw_common(ah);
765 int i; 782 int i;
766 783
767 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Initialize ANI\n"); 784 ath_print(common, ATH_DBG_ANI, "Initialize ANI\n");
768 785
769 memset(ah->ani, 0, sizeof(ah->ani)); 786 memset(ah->ani, 0, sizeof(ah->ani));
770 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { 787 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
@@ -786,11 +803,11 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
786 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; 803 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
787 } 804 }
788 805
789 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 806 ath_print(common, ATH_DBG_ANI,
790 "Setting OfdmErrBase = 0x%08x\n", 807 "Setting OfdmErrBase = 0x%08x\n",
791 ah->ani[0].ofdmPhyErrBase); 808 ah->ani[0].ofdmPhyErrBase);
792 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", 809 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
793 ah->ani[0].cckPhyErrBase); 810 ah->ani[0].cckPhyErrBase);
794 811
795 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase); 812 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
796 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase); 813 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
@@ -803,7 +820,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
803 820
804void ath9k_hw_ani_disable(struct ath_hw *ah) 821void ath9k_hw_ani_disable(struct ath_hw *ah)
805{ 822{
806 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling ANI\n"); 823 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
807 824
808 ath9k_hw_disable_mib_counters(ah); 825 ath9k_hw_disable_mib_counters(ah);
809 REG_WRITE(ah, AR_PHY_ERR_1, 0); 826 REG_WRITE(ah, AR_PHY_ERR_1, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1d59f10f68da..2a40fa2cd914 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -19,14 +19,16 @@
19 19
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <net/mac80211.h>
23#include <linux/leds.h> 22#include <linux/leds.h>
24 23
25#include "hw.h"
26#include "rc.h" 24#include "rc.h"
27#include "debug.h" 25#include "debug.h"
28#include "../ath.h" 26#include "common.h"
29#include "btcoex.h" 27
28/*
29 * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
30 * should rely on this file or its contents.
31 */
30 32
31struct ath_node; 33struct ath_node;
32 34
@@ -54,15 +56,11 @@ struct ath_node;
54 56
55#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) 57#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
56 58
57#define ASSERT(exp) BUG_ON(!(exp))
58
59#define TSF_TO_TU(_h,_l) \ 59#define TSF_TO_TU(_h,_l) \
60 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 60 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
61 61
62#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i)) 62#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
63 63
64static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
65
66struct ath_config { 64struct ath_config {
67 u32 ath_aggr_prot; 65 u32 ath_aggr_prot;
68 u16 txpowlimit; 66 u16 txpowlimit;
@@ -103,18 +101,6 @@ enum buffer_type {
103 BUF_XRETRY = BIT(5), 101 BUF_XRETRY = BIT(5),
104}; 102};
105 103
106struct ath_buf_state {
107 int bfs_nframes;
108 u16 bfs_al;
109 u16 bfs_frmlen;
110 int bfs_seqno;
111 int bfs_tidno;
112 int bfs_retries;
113 u8 bf_type;
114 u32 bfs_keyix;
115 enum ath9k_key_type bfs_keytype;
116};
117
118#define bf_nframes bf_state.bfs_nframes 104#define bf_nframes bf_state.bfs_nframes
119#define bf_al bf_state.bfs_al 105#define bf_al bf_state.bfs_al
120#define bf_frmlen bf_state.bfs_frmlen 106#define bf_frmlen bf_state.bfs_frmlen
@@ -129,21 +115,6 @@ struct ath_buf_state {
129#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY) 115#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
130#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY) 116#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
131 117
132struct ath_buf {
133 struct list_head list;
134 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
135 an aggregate) */
136 struct ath_buf *bf_next; /* next subframe in the aggregate */
137 struct sk_buff *bf_mpdu; /* enclosing frame structure */
138 struct ath_desc *bf_desc; /* virtual addr of desc */
139 dma_addr_t bf_daddr; /* physical addr of desc */
140 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
141 bool bf_stale;
142 u16 bf_flags;
143 struct ath_buf_state bf_state;
144 dma_addr_t bf_dmacontext;
145};
146
147struct ath_descdma { 118struct ath_descdma {
148 struct ath_desc *dd_desc; 119 struct ath_desc *dd_desc;
149 dma_addr_t dd_desc_paddr; 120 dma_addr_t dd_desc_paddr;
@@ -163,13 +134,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
163 134
164#define ATH_MAX_ANTENNA 3 135#define ATH_MAX_ANTENNA 3
165#define ATH_RXBUF 512 136#define ATH_RXBUF 512
166#define WME_NUM_TID 16
167#define ATH_TXBUF 512 137#define ATH_TXBUF 512
168#define ATH_TXMAXTRY 13 138#define ATH_TXMAXTRY 13
169#define ATH_MGT_TXMAXTRY 4 139#define ATH_MGT_TXMAXTRY 4
170#define WME_BA_BMP_SIZE 64
171#define WME_MAX_BA WME_BA_BMP_SIZE
172#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
173 140
174#define TID_TO_WME_AC(_tid) \ 141#define TID_TO_WME_AC(_tid) \
175 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ 142 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
@@ -177,12 +144,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
177 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ 144 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
178 WME_AC_VO) 145 WME_AC_VO)
179 146
180#define WME_AC_BE 0
181#define WME_AC_BK 1
182#define WME_AC_VI 2
183#define WME_AC_VO 3
184#define WME_NUM_AC 4
185
186#define ADDBA_EXCHANGE_ATTEMPTS 10 147#define ADDBA_EXCHANGE_ATTEMPTS 10
187#define ATH_AGGR_DELIM_SZ 4 148#define ATH_AGGR_DELIM_SZ 4
188#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ 149#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */
@@ -191,7 +152,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
191/* minimum h/w qdepth to be sustained to maximize aggregation */ 152/* minimum h/w qdepth to be sustained to maximize aggregation */
192#define ATH_AGGR_MIN_QDEPTH 2 153#define ATH_AGGR_MIN_QDEPTH 2
193#define ATH_AMPDU_SUBFRAME_DEFAULT 32 154#define ATH_AMPDU_SUBFRAME_DEFAULT 32
194#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
195 155
196#define IEEE80211_SEQ_SEQ_SHIFT 4 156#define IEEE80211_SEQ_SEQ_SHIFT 4
197#define IEEE80211_SEQ_MAX 4096 157#define IEEE80211_SEQ_MAX 4096
@@ -238,18 +198,8 @@ struct ath_txq {
238 struct list_head axq_q; 198 struct list_head axq_q;
239 spinlock_t axq_lock; 199 spinlock_t axq_lock;
240 u32 axq_depth; 200 u32 axq_depth;
241 u8 axq_aggr_depth;
242 bool stopped; 201 bool stopped;
243 bool axq_tx_inprogress; 202 bool axq_tx_inprogress;
244 struct ath_buf *axq_linkbuf;
245
246 /* first desc of the last descriptor that contains CTS */
247 struct ath_desc *axq_lastdsWithCTS;
248
249 /* final desc of the gating desc that determines whether
250 lastdsWithCTS has been DMA'ed or not */
251 struct ath_desc *axq_gatingds;
252
253 struct list_head axq_acq; 203 struct list_head axq_acq;
254}; 204};
255 205
@@ -257,30 +207,6 @@ struct ath_txq {
257#define AGGR_ADDBA_COMPLETE BIT(2) 207#define AGGR_ADDBA_COMPLETE BIT(2)
258#define AGGR_ADDBA_PROGRESS BIT(3) 208#define AGGR_ADDBA_PROGRESS BIT(3)
259 209
260struct ath_atx_tid {
261 struct list_head list;
262 struct list_head buf_q;
263 struct ath_node *an;
264 struct ath_atx_ac *ac;
265 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
266 u16 seq_start;
267 u16 seq_next;
268 u16 baw_size;
269 int tidno;
270 int baw_head; /* first un-acked tx buffer */
271 int baw_tail; /* next unused tx buffer slot */
272 int sched;
273 int paused;
274 u8 state;
275};
276
277struct ath_atx_ac {
278 int sched;
279 int qnum;
280 struct list_head list;
281 struct list_head tid_q;
282};
283
284struct ath_tx_control { 210struct ath_tx_control {
285 struct ath_txq *txq; 211 struct ath_txq *txq;
286 int if_id; 212 int if_id;
@@ -291,30 +217,6 @@ struct ath_tx_control {
291#define ATH_TX_XRETRY 0x02 217#define ATH_TX_XRETRY 0x02
292#define ATH_TX_BAR 0x04 218#define ATH_TX_BAR 0x04
293 219
294#define ATH_RSSI_LPF_LEN 10
295#define RSSI_LPF_THRESHOLD -20
296#define ATH9K_RSSI_BAD 0x80
297#define ATH_RSSI_EP_MULTIPLIER (1<<7)
298#define ATH_EP_MUL(x, mul) ((x) * (mul))
299#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
300#define ATH_LPF_RSSI(x, y, len) \
301 ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
302#define ATH_RSSI_LPF(x, y) do { \
303 if ((y) >= RSSI_LPF_THRESHOLD) \
304 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
305} while (0)
306#define ATH_EP_RND(x, mul) \
307 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
308
309struct ath_node {
310 struct ath_softc *an_sc;
311 struct ath_atx_tid tid[WME_NUM_TID];
312 struct ath_atx_ac ac[WME_NUM_AC];
313 u16 maxampdu;
314 u8 mpdudensity;
315 int last_rssi;
316};
317
318struct ath_tx { 220struct ath_tx {
319 u16 seq_no; 221 u16 seq_no;
320 u32 txqsetup; 222 u32 txqsetup;
@@ -329,7 +231,6 @@ struct ath_rx {
329 u8 defant; 231 u8 defant;
330 u8 rxotherant; 232 u8 rxotherant;
331 u32 *rxlink; 233 u32 *rxlink;
332 int bufsize;
333 unsigned int rxfilter; 234 unsigned int rxfilter;
334 spinlock_t rxflushlock; 235 spinlock_t rxflushlock;
335 spinlock_t rxbuflock; 236 spinlock_t rxbuflock;
@@ -427,7 +328,6 @@ struct ath_beacon {
427 328
428void ath_beacon_tasklet(unsigned long data); 329void ath_beacon_tasklet(unsigned long data);
429void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif); 330void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
430int ath_beaconq_setup(struct ath_hw *ah);
431int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif); 331int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
432void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); 332void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
433 333
@@ -441,14 +341,24 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
441#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */ 341#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
442#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */ 342#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
443 343
444struct ath_ani { 344/* Defines the BT AR_BT_COEX_WGHT used */
445 bool caldone; 345enum ath_stomp_type {
446 int16_t noise_floor; 346 ATH_BTCOEX_NO_STOMP,
447 unsigned int longcal_timer; 347 ATH_BTCOEX_STOMP_ALL,
448 unsigned int shortcal_timer; 348 ATH_BTCOEX_STOMP_LOW,
449 unsigned int resetcal_timer; 349 ATH_BTCOEX_STOMP_NONE
450 unsigned int checkani_timer; 350};
451 struct timer_list timer; 351
352struct ath_btcoex {
353 bool hw_timer_enabled;
354 spinlock_t btcoex_lock;
355 struct timer_list period_timer; /* Timer for BT period */
356 u32 bt_priority_cnt;
357 unsigned long bt_priority_time;
358 int bt_stomp_type; /* Types of BT stomping */
359 u32 btcoex_no_stomp; /* in usec */
360 u32 btcoex_period; /* in usec */
361 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
452}; 362};
453 363
454/********************/ 364/********************/
@@ -484,25 +394,13 @@ struct ath_led {
484 * Used when PCI device not fully initialized by bootrom/BIOS 394 * Used when PCI device not fully initialized by bootrom/BIOS
485*/ 395*/
486#define DEFAULT_CACHELINE 32 396#define DEFAULT_CACHELINE 32
487#define ATH_DEFAULT_NOISE_FLOOR -95
488#define ATH_REGCLASSIDS_MAX 10 397#define ATH_REGCLASSIDS_MAX 10
489#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 398#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
490#define ATH_MAX_SW_RETRIES 10 399#define ATH_MAX_SW_RETRIES 10
491#define ATH_CHAN_MAX 255 400#define ATH_CHAN_MAX 255
492#define IEEE80211_WEP_NKID 4 /* number of key ids */ 401#define IEEE80211_WEP_NKID 4 /* number of key ids */
493 402
494/*
495 * The key cache is used for h/w cipher state and also for
496 * tracking station state such as the current tx antenna.
497 * We also setup a mapping table between key cache slot indices
498 * and station state to short-circuit node lookups on rx.
499 * Different parts have different size key caches. We handle
500 * up to ATH_KEYMAX entries (could dynamically allocate state).
501 */
502#define ATH_KEYMAX 128 /* max key cache size we handle */
503
504#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 403#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
505#define ATH_RSSI_DUMMY_MARKER 0x127
506#define ATH_RATE_DUMMY_MARKER 0 404#define ATH_RATE_DUMMY_MARKER 0
507 405
508#define SC_OP_INVALID BIT(0) 406#define SC_OP_INVALID BIT(0)
@@ -522,23 +420,14 @@ struct ath_led {
522#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17) 420#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
523#define SC_OP_WAIT_FOR_TX_ACK BIT(18) 421#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
524#define SC_OP_BEACON_SYNC BIT(19) 422#define SC_OP_BEACON_SYNC BIT(19)
525#define SC_OP_BTCOEX_ENABLED BIT(20)
526#define SC_OP_BT_PRIORITY_DETECTED BIT(21) 423#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
527 424
528struct ath_bus_ops {
529 void (*read_cachesize)(struct ath_softc *sc, int *csz);
530 void (*cleanup)(struct ath_softc *sc);
531 bool (*eeprom_read)(struct ath_hw *ah, u32 off, u16 *data);
532};
533
534struct ath_wiphy; 425struct ath_wiphy;
535 426
536struct ath_softc { 427struct ath_softc {
537 struct ieee80211_hw *hw; 428 struct ieee80211_hw *hw;
538 struct device *dev; 429 struct device *dev;
539 430
540 struct ath_common common;
541
542 spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */ 431 spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
543 struct ath_wiphy *pri_wiphy; 432 struct ath_wiphy *pri_wiphy;
544 struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may 433 struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
@@ -565,24 +454,14 @@ struct ath_softc {
565 spinlock_t sc_pm_lock; 454 spinlock_t sc_pm_lock;
566 struct mutex mutex; 455 struct mutex mutex;
567 456
568 u8 curbssid[ETH_ALEN];
569 u8 bssidmask[ETH_ALEN];
570 u32 intrstatus; 457 u32 intrstatus;
571 u32 sc_flags; /* SC_OP_* */ 458 u32 sc_flags; /* SC_OP_* */
572 u16 curtxpow; 459 u16 curtxpow;
573 u16 curaid;
574 u8 nbcnvifs; 460 u8 nbcnvifs;
575 u16 nvifs; 461 u16 nvifs;
576 u8 tx_chainmask;
577 u8 rx_chainmask;
578 u32 keymax;
579 DECLARE_BITMAP(keymap, ATH_KEYMAX);
580 u8 splitmic;
581 bool ps_enabled; 462 bool ps_enabled;
582 unsigned long ps_usecount; 463 unsigned long ps_usecount;
583 enum ath9k_int imask; 464 enum ath9k_int imask;
584 enum ath9k_ht_extprotspacing ht_extprotspacing;
585 enum ath9k_ht_macmode tx_chan_width;
586 465
587 struct ath_config config; 466 struct ath_config config;
588 struct ath_rx rx; 467 struct ath_rx rx;
@@ -605,14 +484,12 @@ struct ath_softc {
605 484
606 int beacon_interval; 485 int beacon_interval;
607 486
608 struct ath_ani ani;
609#ifdef CONFIG_ATH9K_DEBUG 487#ifdef CONFIG_ATH9K_DEBUG
610 struct ath9k_debug debug; 488 struct ath9k_debug debug;
611#endif 489#endif
612 struct ath_bus_ops *bus_ops;
613 struct ath_beacon_config cur_beacon_conf; 490 struct ath_beacon_config cur_beacon_conf;
614 struct delayed_work tx_complete_work; 491 struct delayed_work tx_complete_work;
615 struct ath_btcoex_info btcoex_info; 492 struct ath_btcoex btcoex;
616}; 493};
617 494
618struct ath_wiphy { 495struct ath_wiphy {
@@ -625,6 +502,7 @@ struct ath_wiphy {
625 ATH_WIPHY_PAUSED, 502 ATH_WIPHY_PAUSED,
626 ATH_WIPHY_SCAN, 503 ATH_WIPHY_SCAN,
627 } state; 504 } state;
505 bool idle;
628 int chan_idx; 506 int chan_idx;
629 int chan_is_ht; 507 int chan_is_ht;
630}; 508};
@@ -634,31 +512,22 @@ int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
634int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 512int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
635int ath_cabq_update(struct ath_softc *); 513int ath_cabq_update(struct ath_softc *);
636 514
637static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 515static inline void ath_read_cachesize(struct ath_common *common, int *csz)
638{
639 return &ah->ah_sc->common;
640}
641
642static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
643{ 516{
644 return &(ath9k_hw_common(ah)->regulatory); 517 common->bus_ops->read_cachesize(common, csz);
645} 518}
646 519
647static inline void ath_read_cachesize(struct ath_softc *sc, int *csz) 520static inline void ath_bus_cleanup(struct ath_common *common)
648{ 521{
649 sc->bus_ops->read_cachesize(sc, csz); 522 common->bus_ops->cleanup(common);
650}
651
652static inline void ath_bus_cleanup(struct ath_softc *sc)
653{
654 sc->bus_ops->cleanup(sc);
655} 523}
656 524
657extern struct ieee80211_ops ath9k_ops; 525extern struct ieee80211_ops ath9k_ops;
658 526
659irqreturn_t ath_isr(int irq, void *dev); 527irqreturn_t ath_isr(int irq, void *dev);
660void ath_cleanup(struct ath_softc *sc); 528void ath_cleanup(struct ath_softc *sc);
661int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid); 529int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
530 const struct ath_bus_ops *bus_ops);
662void ath_detach(struct ath_softc *sc); 531void ath_detach(struct ath_softc *sc);
663const char *ath_mac_bb_name(u32 mac_bb_version); 532const char *ath_mac_bb_name(u32 mac_bb_version);
664const char *ath_rf_name(u16 rf_version); 533const char *ath_rf_name(u16 rf_version);
@@ -668,8 +537,9 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
668void ath_update_chainmask(struct ath_softc *sc, int is_ht); 537void ath_update_chainmask(struct ath_softc *sc, int is_ht);
669int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 538int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
670 struct ath9k_channel *hchan); 539 struct ath9k_channel *hchan);
671void ath_radio_enable(struct ath_softc *sc); 540
672void ath_radio_disable(struct ath_softc *sc); 541void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
542void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
673 543
674#ifdef CONFIG_PCI 544#ifdef CONFIG_PCI
675int ath_pci_init(void); 545int ath_pci_init(void);
@@ -705,9 +575,10 @@ void ath9k_wiphy_pause_all_forced(struct ath_softc *sc,
705bool ath9k_wiphy_scanning(struct ath_softc *sc); 575bool ath9k_wiphy_scanning(struct ath_softc *sc);
706void ath9k_wiphy_work(struct work_struct *work); 576void ath9k_wiphy_work(struct work_struct *work);
707bool ath9k_all_wiphys_idle(struct ath_softc *sc); 577bool ath9k_all_wiphys_idle(struct ath_softc *sc);
578void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle);
708 579
709void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val); 580void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
710unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset); 581void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
711 582
712int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 583int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
713#endif /* ATH9K_H */ 584#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 45c4ea57616b..b10c884f2933 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -26,6 +26,7 @@
26static int ath_beaconq_config(struct ath_softc *sc) 26static int ath_beaconq_config(struct ath_softc *sc)
27{ 27{
28 struct ath_hw *ah = sc->sc_ah; 28 struct ath_hw *ah = sc->sc_ah;
29 struct ath_common *common = ath9k_hw_common(ah);
29 struct ath9k_tx_queue_info qi; 30 struct ath9k_tx_queue_info qi;
30 31
31 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); 32 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
@@ -42,8 +43,8 @@ static int ath_beaconq_config(struct ath_softc *sc)
42 } 43 }
43 44
44 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) { 45 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
45 DPRINTF(sc, ATH_DBG_FATAL, 46 ath_print(common, ATH_DBG_FATAL,
46 "Unable to update h/w beacon queue parameters\n"); 47 "Unable to update h/w beacon queue parameters\n");
47 return 0; 48 return 0;
48 } else { 49 } else {
49 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq); 50 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
@@ -61,6 +62,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
61{ 62{
62 struct sk_buff *skb = bf->bf_mpdu; 63 struct sk_buff *skb = bf->bf_mpdu;
63 struct ath_hw *ah = sc->sc_ah; 64 struct ath_hw *ah = sc->sc_ah;
65 struct ath_common *common = ath9k_hw_common(ah);
64 struct ath_desc *ds; 66 struct ath_desc *ds;
65 struct ath9k_11n_rate_series series[4]; 67 struct ath9k_11n_rate_series series[4];
66 const struct ath_rate_table *rt; 68 const struct ath_rate_table *rt;
@@ -108,7 +110,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
108 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 110 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
109 series[0].Tries = 1; 111 series[0].Tries = 1;
110 series[0].Rate = rate; 112 series[0].Rate = rate;
111 series[0].ChSel = sc->tx_chainmask; 113 series[0].ChSel = common->tx_chainmask;
112 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0; 114 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
113 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration, 115 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration,
114 series, 4, 0); 116 series, 4, 0);
@@ -119,6 +121,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
119{ 121{
120 struct ath_wiphy *aphy = hw->priv; 122 struct ath_wiphy *aphy = hw->priv;
121 struct ath_softc *sc = aphy->sc; 123 struct ath_softc *sc = aphy->sc;
124 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
122 struct ath_buf *bf; 125 struct ath_buf *bf;
123 struct ath_vif *avp; 126 struct ath_vif *avp;
124 struct sk_buff *skb; 127 struct sk_buff *skb;
@@ -172,7 +175,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
172 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 175 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
173 dev_kfree_skb_any(skb); 176 dev_kfree_skb_any(skb);
174 bf->bf_mpdu = NULL; 177 bf->bf_mpdu = NULL;
175 DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error on beaconing\n"); 178 ath_print(common, ATH_DBG_FATAL,
179 "dma_mapping_error on beaconing\n");
176 return NULL; 180 return NULL;
177 } 181 }
178 182
@@ -192,8 +196,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
192 196
193 if (skb && cabq_depth) { 197 if (skb && cabq_depth) {
194 if (sc->nvifs > 1) { 198 if (sc->nvifs > 1) {
195 DPRINTF(sc, ATH_DBG_BEACON, 199 ath_print(common, ATH_DBG_BEACON,
196 "Flushing previous cabq traffic\n"); 200 "Flushing previous cabq traffic\n");
197 ath_draintxq(sc, cabq, false); 201 ath_draintxq(sc, cabq, false);
198 } 202 }
199 } 203 }
@@ -216,6 +220,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
216 struct ieee80211_vif *vif) 220 struct ieee80211_vif *vif)
217{ 221{
218 struct ath_hw *ah = sc->sc_ah; 222 struct ath_hw *ah = sc->sc_ah;
223 struct ath_common *common = ath9k_hw_common(ah);
219 struct ath_buf *bf; 224 struct ath_buf *bf;
220 struct ath_vif *avp; 225 struct ath_vif *avp;
221 struct sk_buff *skb; 226 struct sk_buff *skb;
@@ -233,25 +238,14 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
233 /* NB: caller is known to have already stopped tx dma */ 238 /* NB: caller is known to have already stopped tx dma */
234 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); 239 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
235 ath9k_hw_txstart(ah, sc->beacon.beaconq); 240 ath9k_hw_txstart(ah, sc->beacon.beaconq);
236 DPRINTF(sc, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", 241 ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
237 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); 242 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
238}
239
240int ath_beaconq_setup(struct ath_hw *ah)
241{
242 struct ath9k_tx_queue_info qi;
243
244 memset(&qi, 0, sizeof(qi));
245 qi.tqi_aifs = 1;
246 qi.tqi_cwmin = 0;
247 qi.tqi_cwmax = 0;
248 /* NB: don't enable any interrupts */
249 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
250} 243}
251 244
252int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 245int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
253{ 246{
254 struct ath_softc *sc = aphy->sc; 247 struct ath_softc *sc = aphy->sc;
248 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
255 struct ath_vif *avp; 249 struct ath_vif *avp;
256 struct ath_buf *bf; 250 struct ath_buf *bf;
257 struct sk_buff *skb; 251 struct sk_buff *skb;
@@ -309,7 +303,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
309 /* NB: the beacon data buffer must be 32-bit aligned. */ 303 /* NB: the beacon data buffer must be 32-bit aligned. */
310 skb = ieee80211_beacon_get(sc->hw, vif); 304 skb = ieee80211_beacon_get(sc->hw, vif);
311 if (skb == NULL) { 305 if (skb == NULL) {
312 DPRINTF(sc, ATH_DBG_BEACON, "cannot get skb\n"); 306 ath_print(common, ATH_DBG_BEACON, "cannot get skb\n");
313 return -ENOMEM; 307 return -ENOMEM;
314 } 308 }
315 309
@@ -333,9 +327,10 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
333 tsfadjust = intval * avp->av_bslot / ATH_BCBUF; 327 tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
334 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); 328 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
335 329
336 DPRINTF(sc, ATH_DBG_BEACON, 330 ath_print(common, ATH_DBG_BEACON,
337 "stagger beacons, bslot %d intval %u tsfadjust %llu\n", 331 "stagger beacons, bslot %d intval "
338 avp->av_bslot, intval, (unsigned long long)tsfadjust); 332 "%u tsfadjust %llu\n",
333 avp->av_bslot, intval, (unsigned long long)tsfadjust);
339 334
340 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = 335 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
341 avp->tsf_adjust; 336 avp->tsf_adjust;
@@ -349,8 +344,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
349 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 344 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
350 dev_kfree_skb_any(skb); 345 dev_kfree_skb_any(skb);
351 bf->bf_mpdu = NULL; 346 bf->bf_mpdu = NULL;
352 DPRINTF(sc, ATH_DBG_FATAL, 347 ath_print(common, ATH_DBG_FATAL,
353 "dma_mapping_error on beacon alloc\n"); 348 "dma_mapping_error on beacon alloc\n");
354 return -ENOMEM; 349 return -ENOMEM;
355 } 350 }
356 351
@@ -386,6 +381,7 @@ void ath_beacon_tasklet(unsigned long data)
386{ 381{
387 struct ath_softc *sc = (struct ath_softc *)data; 382 struct ath_softc *sc = (struct ath_softc *)data;
388 struct ath_hw *ah = sc->sc_ah; 383 struct ath_hw *ah = sc->sc_ah;
384 struct ath_common *common = ath9k_hw_common(ah);
389 struct ath_buf *bf = NULL; 385 struct ath_buf *bf = NULL;
390 struct ieee80211_vif *vif; 386 struct ieee80211_vif *vif;
391 struct ath_wiphy *aphy; 387 struct ath_wiphy *aphy;
@@ -405,12 +401,12 @@ void ath_beacon_tasklet(unsigned long data)
405 sc->beacon.bmisscnt++; 401 sc->beacon.bmisscnt++;
406 402
407 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 403 if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
408 DPRINTF(sc, ATH_DBG_BEACON, 404 ath_print(common, ATH_DBG_BEACON,
409 "missed %u consecutive beacons\n", 405 "missed %u consecutive beacons\n",
410 sc->beacon.bmisscnt); 406 sc->beacon.bmisscnt);
411 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 407 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
412 DPRINTF(sc, ATH_DBG_BEACON, 408 ath_print(common, ATH_DBG_BEACON,
413 "beacon is officially stuck\n"); 409 "beacon is officially stuck\n");
414 sc->sc_flags |= SC_OP_TSF_RESET; 410 sc->sc_flags |= SC_OP_TSF_RESET;
415 ath_reset(sc, false); 411 ath_reset(sc, false);
416 } 412 }
@@ -419,9 +415,9 @@ void ath_beacon_tasklet(unsigned long data)
419 } 415 }
420 416
421 if (sc->beacon.bmisscnt != 0) { 417 if (sc->beacon.bmisscnt != 0) {
422 DPRINTF(sc, ATH_DBG_BEACON, 418 ath_print(common, ATH_DBG_BEACON,
423 "resume beacon xmit after %u misses\n", 419 "resume beacon xmit after %u misses\n",
424 sc->beacon.bmisscnt); 420 sc->beacon.bmisscnt);
425 sc->beacon.bmisscnt = 0; 421 sc->beacon.bmisscnt = 0;
426 } 422 }
427 423
@@ -447,9 +443,9 @@ void ath_beacon_tasklet(unsigned long data)
447 vif = sc->beacon.bslot[slot]; 443 vif = sc->beacon.bslot[slot];
448 aphy = sc->beacon.bslot_aphy[slot]; 444 aphy = sc->beacon.bslot_aphy[slot];
449 445
450 DPRINTF(sc, ATH_DBG_BEACON, 446 ath_print(common, ATH_DBG_BEACON,
451 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 447 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
452 slot, tsf, tsftu, intval, vif); 448 slot, tsf, tsftu, intval, vif);
453 449
454 bfaddr = 0; 450 bfaddr = 0;
455 if (vif) { 451 if (vif) {
@@ -490,7 +486,7 @@ void ath_beacon_tasklet(unsigned long data)
490 * are still pending on the queue. 486 * are still pending on the queue.
491 */ 487 */
492 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) { 488 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
493 DPRINTF(sc, ATH_DBG_FATAL, 489 ath_print(common, ATH_DBG_FATAL,
494 "beacon queue %u did not stop?\n", sc->beacon.beaconq); 490 "beacon queue %u did not stop?\n", sc->beacon.beaconq);
495 } 491 }
496 492
@@ -502,6 +498,19 @@ void ath_beacon_tasklet(unsigned long data)
502 } 498 }
503} 499}
504 500
501static void ath9k_beacon_init(struct ath_softc *sc,
502 u32 next_beacon,
503 u32 beacon_period)
504{
505 if (beacon_period & ATH9K_BEACON_RESET_TSF)
506 ath9k_ps_wakeup(sc);
507
508 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
509
510 if (beacon_period & ATH9K_BEACON_RESET_TSF)
511 ath9k_ps_restore(sc);
512}
513
505/* 514/*
506 * For multi-bss ap support beacons are either staggered evenly over N slots or 515 * For multi-bss ap support beacons are either staggered evenly over N slots or
507 * burst together. For the former arrange for the SWBA to be delivered for each 516 * burst together. For the former arrange for the SWBA to be delivered for each
@@ -534,7 +543,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
534 /* Set the computed AP beacon timers */ 543 /* Set the computed AP beacon timers */
535 544
536 ath9k_hw_set_interrupts(sc->sc_ah, 0); 545 ath9k_hw_set_interrupts(sc->sc_ah, 0);
537 ath9k_hw_beaconinit(sc->sc_ah, nexttbtt, intval); 546 ath9k_beacon_init(sc, nexttbtt, intval);
538 sc->beacon.bmisscnt = 0; 547 sc->beacon.bmisscnt = 0;
539 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 548 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
540 549
@@ -555,6 +564,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
555static void ath_beacon_config_sta(struct ath_softc *sc, 564static void ath_beacon_config_sta(struct ath_softc *sc,
556 struct ath_beacon_config *conf) 565 struct ath_beacon_config *conf)
557{ 566{
567 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
558 struct ath9k_beacon_state bs; 568 struct ath9k_beacon_state bs;
559 int dtimperiod, dtimcount, sleepduration; 569 int dtimperiod, dtimcount, sleepduration;
560 int cfpperiod, cfpcount; 570 int cfpperiod, cfpcount;
@@ -651,11 +661,11 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
651 /* TSF out of range threshold fixed at 1 second */ 661 /* TSF out of range threshold fixed at 1 second */
652 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; 662 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
653 663
654 DPRINTF(sc, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); 664 ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
655 DPRINTF(sc, ATH_DBG_BEACON, 665 ath_print(common, ATH_DBG_BEACON,
656 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", 666 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
657 bs.bs_bmissthreshold, bs.bs_sleepduration, 667 bs.bs_bmissthreshold, bs.bs_sleepduration,
658 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); 668 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
659 669
660 /* Set the computed STA beacon timers */ 670 /* Set the computed STA beacon timers */
661 671
@@ -669,6 +679,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
669 struct ath_beacon_config *conf, 679 struct ath_beacon_config *conf,
670 struct ieee80211_vif *vif) 680 struct ieee80211_vif *vif)
671{ 681{
682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
672 u64 tsf; 683 u64 tsf;
673 u32 tsftu, intval, nexttbtt; 684 u32 tsftu, intval, nexttbtt;
674 685
@@ -689,9 +700,9 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
689 nexttbtt += intval; 700 nexttbtt += intval;
690 } while (nexttbtt < tsftu); 701 } while (nexttbtt < tsftu);
691 702
692 DPRINTF(sc, ATH_DBG_BEACON, 703 ath_print(common, ATH_DBG_BEACON,
693 "IBSS nexttbtt %u intval %u (%u)\n", 704 "IBSS nexttbtt %u intval %u (%u)\n",
694 nexttbtt, intval, conf->beacon_interval); 705 nexttbtt, intval, conf->beacon_interval);
695 706
696 /* 707 /*
697 * In IBSS mode enable the beacon timers but only enable SWBA interrupts 708 * In IBSS mode enable the beacon timers but only enable SWBA interrupts
@@ -707,7 +718,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
707 /* Set the computed ADHOC beacon timers */ 718 /* Set the computed ADHOC beacon timers */
708 719
709 ath9k_hw_set_interrupts(sc->sc_ah, 0); 720 ath9k_hw_set_interrupts(sc->sc_ah, 0);
710 ath9k_hw_beaconinit(sc->sc_ah, nexttbtt, intval); 721 ath9k_beacon_init(sc, nexttbtt, intval);
711 sc->beacon.bmisscnt = 0; 722 sc->beacon.bmisscnt = 0;
712 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 723 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
713 724
@@ -719,6 +730,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
719void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) 730void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
720{ 731{
721 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 732 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
733 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
722 enum nl80211_iftype iftype; 734 enum nl80211_iftype iftype;
723 735
724 /* Setup the beacon configuration parameters */ 736 /* Setup the beacon configuration parameters */
@@ -759,8 +771,8 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
759 ath_beacon_config_sta(sc, cur_conf); 771 ath_beacon_config_sta(sc, cur_conf);
760 break; 772 break;
761 default: 773 default:
762 DPRINTF(sc, ATH_DBG_CONFIG, 774 ath_print(common, ATH_DBG_CONFIG,
763 "Unsupported beaconing mode\n"); 775 "Unsupported beaconing mode\n");
764 return; 776 return;
765 } 777 }
766 778
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 55f607b7699e..fb4ac15f3b93 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -14,10 +14,26 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static const struct ath_btcoex_config ath_bt_config = { 0, true, true, 19enum ath_bt_mode {
20 ATH_BT_COEX_MODE_SLOTTED, true, true, 2, 5, true }; 20 ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
21 ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
22 ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
23 ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
24};
25
26struct ath_btcoex_config {
27 u8 bt_time_extend;
28 bool bt_txstate_extend;
29 bool bt_txframe_extend;
30 enum ath_bt_mode bt_mode; /* coexistence mode */
31 bool bt_quiet_collision;
32 bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
33 u8 bt_priority_time;
34 u8 bt_first_slot_time;
35 bool bt_hold_rx_clear;
36};
21 37
22static const u16 ath_subsysid_tbl[] = { 38static const u16 ath_subsysid_tbl[] = {
23 AR9280_COEX2WIRE_SUBSYSID, 39 AR9280_COEX2WIRE_SUBSYSID,
@@ -29,141 +45,38 @@ static const u16 ath_subsysid_tbl[] = {
29 * Checks the subsystem id of the device to see if it 45 * Checks the subsystem id of the device to see if it
30 * supports btcoex 46 * supports btcoex
31 */ 47 */
32bool ath_btcoex_supported(u16 subsysid) 48bool ath9k_hw_btcoex_supported(struct ath_hw *ah)
33{ 49{
34 int i; 50 int i;
35 51
36 if (!subsysid) 52 if (!ah->hw_version.subsysid)
37 return false; 53 return false;
38 54
39 for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++) 55 for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++)
40 if (subsysid == ath_subsysid_tbl[i]) 56 if (ah->hw_version.subsysid == ath_subsysid_tbl[i])
41 return true; 57 return true;
42 58
43 return false; 59 return false;
44} 60}
45 61
46/* 62void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
47 * Detects if there is any priority bt traffic
48 */
49static void ath_detect_bt_priority(struct ath_softc *sc)
50{
51 struct ath_btcoex_info *btinfo = &sc->btcoex_info;
52
53 if (ath9k_hw_gpio_get(sc->sc_ah, btinfo->btpriority_gpio))
54 btinfo->bt_priority_cnt++;
55
56 if (time_after(jiffies, btinfo->bt_priority_time +
57 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
58 if (btinfo->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
59 DPRINTF(sc, ATH_DBG_BTCOEX,
60 "BT priority traffic detected");
61 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
62 } else {
63 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
64 }
65
66 btinfo->bt_priority_cnt = 0;
67 btinfo->bt_priority_time = jiffies;
68 }
69}
70
71/*
72 * Configures appropriate weight based on stomp type.
73 */
74static void ath_btcoex_bt_stomp(struct ath_softc *sc,
75 struct ath_btcoex_info *btinfo,
76 int stomp_type)
77{
78
79 switch (stomp_type) {
80 case ATH_BTCOEX_STOMP_ALL:
81 ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
82 AR_STOMP_ALL_WLAN_WGHT);
83 break;
84 case ATH_BTCOEX_STOMP_LOW:
85 ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
86 AR_STOMP_LOW_WLAN_WGHT);
87 break;
88 case ATH_BTCOEX_STOMP_NONE:
89 ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
90 AR_STOMP_NONE_WLAN_WGHT);
91 break;
92 default:
93 DPRINTF(sc, ATH_DBG_BTCOEX, "Invalid Stomptype\n");
94 break;
95 }
96
97 ath9k_hw_btcoex_enable(sc->sc_ah);
98}
99
100/*
101 * This is the master bt coex timer which runs for every
102 * 45ms, bt traffic will be given priority during 55% of this
103 * period while wlan gets remaining 45%
104 */
105
106static void ath_btcoex_period_timer(unsigned long data)
107{
108 struct ath_softc *sc = (struct ath_softc *) data;
109 struct ath_btcoex_info *btinfo = &sc->btcoex_info;
110
111 ath_detect_bt_priority(sc);
112
113 spin_lock_bh(&btinfo->btcoex_lock);
114
115 ath_btcoex_bt_stomp(sc, btinfo, btinfo->bt_stomp_type);
116
117 spin_unlock_bh(&btinfo->btcoex_lock);
118
119 if (btinfo->btcoex_period != btinfo->btcoex_no_stomp) {
120 if (btinfo->hw_timer_enabled)
121 ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
122
123 ath_gen_timer_start(sc->sc_ah,
124 btinfo->no_stomp_timer,
125 (ath9k_hw_gettsf32(sc->sc_ah) +
126 btinfo->btcoex_no_stomp),
127 btinfo->btcoex_no_stomp * 10);
128 btinfo->hw_timer_enabled = true;
129 }
130
131 mod_timer(&btinfo->period_timer, jiffies +
132 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
133}
134
135/*
136 * Generic tsf based hw timer which configures weight
137 * registers to time slice between wlan and bt traffic
138 */
139
140static void ath_btcoex_no_stomp_timer(void *arg)
141{
142 struct ath_softc *sc = (struct ath_softc *)arg;
143 struct ath_btcoex_info *btinfo = &sc->btcoex_info;
144
145 DPRINTF(sc, ATH_DBG_BTCOEX, "no stomp timer running \n");
146
147 spin_lock_bh(&btinfo->btcoex_lock);
148
149 if (btinfo->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
150 ath_btcoex_bt_stomp(sc, btinfo, ATH_BTCOEX_STOMP_NONE);
151 else if (btinfo->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
152 ath_btcoex_bt_stomp(sc, btinfo, ATH_BTCOEX_STOMP_LOW);
153
154 spin_unlock_bh(&btinfo->btcoex_lock);
155}
156
157static int ath_init_btcoex_info(struct ath_hw *hw,
158 struct ath_btcoex_info *btcoex_info)
159{ 63{
64 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
65 const struct ath_btcoex_config ath_bt_config = {
66 .bt_time_extend = 0,
67 .bt_txstate_extend = true,
68 .bt_txframe_extend = true,
69 .bt_mode = ATH_BT_COEX_MODE_SLOTTED,
70 .bt_quiet_collision = true,
71 .bt_rxclear_polarity = true,
72 .bt_priority_time = 2,
73 .bt_first_slot_time = 5,
74 .bt_hold_rx_clear = true,
75 };
160 u32 i; 76 u32 i;
161 int qnum;
162 77
163 qnum = ath_tx_get_qnum(hw->ah_sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); 78 btcoex_hw->bt_coex_mode =
164 79 (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
165 btcoex_info->bt_coex_mode =
166 (btcoex_info->bt_coex_mode & AR_BT_QCU_THRESH) |
167 SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) | 80 SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
168 SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) | 81 SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
169 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) | 82 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
@@ -174,167 +87,141 @@ static int ath_init_btcoex_info(struct ath_hw *hw,
174 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) | 87 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
175 SM(qnum, AR_BT_QCU_THRESH); 88 SM(qnum, AR_BT_QCU_THRESH);
176 89
177 btcoex_info->bt_coex_mode2 = 90 btcoex_hw->bt_coex_mode2 =
178 SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) | 91 SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
179 SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) | 92 SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
180 AR_BT_DISABLE_BT_ANT; 93 AR_BT_DISABLE_BT_ANT;
181 94
182 btcoex_info->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 95 for (i = 0; i < 32; i++)
96 ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
97}
98EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
183 99
184 btcoex_info->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; 100void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
101{
102 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
185 103
186 btcoex_info->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 104 /* connect bt_active to baseband */
187 btcoex_info->btcoex_period / 100; 105 REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
106 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
107 AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
188 108
189 for (i = 0; i < 32; i++) 109 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
190 hw->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i; 110 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
191 111
192 setup_timer(&btcoex_info->period_timer, ath_btcoex_period_timer, 112 /* Set input mux for bt_active to gpio pin */
193 (unsigned long) hw->ah_sc); 113 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
114 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
115 btcoex_hw->btactive_gpio);
194 116
195 btcoex_info->no_stomp_timer = ath_gen_timer_alloc(hw, 117 /* Configure the desired gpio port for input */
196 ath_btcoex_no_stomp_timer, 118 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
197 ath_btcoex_no_stomp_timer, 119}
198 (void *)hw->ah_sc, AR_FIRST_NDP_TIMER); 120EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
121
122void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
123{
124 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
199 125
200 if (btcoex_info->no_stomp_timer == NULL) 126 /* btcoex 3-wire */
201 return -ENOMEM; 127 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
128 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
129 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
202 130
203 spin_lock_init(&btcoex_info->btcoex_lock); 131 /* Set input mux for bt_prority_async and
132 * bt_active_async to GPIO pins */
133 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
134 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
135 btcoex_hw->btactive_gpio);
204 136
205 return 0; 137 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
138 AR_GPIO_INPUT_MUX1_BT_PRIORITY,
139 btcoex_hw->btpriority_gpio);
140
141 /* Configure the desired GPIO ports for input */
142
143 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
144 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
206} 145}
146EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
207 147
208int ath9k_hw_btcoex_init(struct ath_hw *ah) 148static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
209{ 149{
210 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 150 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
211 int ret = 0;
212
213 if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_2WIRE) {
214 /* connect bt_active to baseband */
215 REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
216 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
217 AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
218
219 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
220 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
221
222 /* Set input mux for bt_active to gpio pin */
223 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
224 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
225 btcoex_info->btactive_gpio);
226
227 /* Configure the desired gpio port for input */
228 ath9k_hw_cfg_gpio_input(ah, btcoex_info->btactive_gpio);
229 } else {
230 /* btcoex 3-wire */
231 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
232 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
233 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
234
235 /* Set input mux for bt_prority_async and
236 * bt_active_async to GPIO pins */
237 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
238 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
239 btcoex_info->btactive_gpio);
240
241 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
242 AR_GPIO_INPUT_MUX1_BT_PRIORITY,
243 btcoex_info->btpriority_gpio);
244
245 /* Configure the desired GPIO ports for input */
246
247 ath9k_hw_cfg_gpio_input(ah, btcoex_info->btactive_gpio);
248 ath9k_hw_cfg_gpio_input(ah, btcoex_info->btpriority_gpio);
249
250 ret = ath_init_btcoex_info(ah, btcoex_info);
251 }
252 151
253 return ret; 152 /* Configure the desired GPIO port for TX_FRAME output */
153 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
154 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
254} 155}
255 156
256void ath9k_hw_btcoex_enable(struct ath_hw *ah) 157void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
158 u32 bt_weight,
159 u32 wlan_weight)
257{ 160{
258 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 161 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
259
260 if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_2WIRE) {
261 /* Configure the desired GPIO port for TX_FRAME output */
262 ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
263 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
264 } else {
265 /*
266 * Program coex mode and weight registers to
267 * enable coex 3-wire
268 */
269 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_info->bt_coex_mode);
270 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_info->bt_coex_weights);
271 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_info->bt_coex_mode2);
272
273 REG_RMW_FIELD(ah, AR_QUIET1,
274 AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
275 REG_RMW_FIELD(ah, AR_PCU_MISC,
276 AR_PCU_BT_ANT_PREVENT_RX, 0);
277
278 ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
279 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
280 }
281 162
282 REG_RMW(ah, AR_GPIO_PDPU, 163 btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
283 (0x2 << (btcoex_info->btactive_gpio * 2)), 164 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
284 (0x3 << (btcoex_info->btactive_gpio * 2)));
285
286 ah->ah_sc->sc_flags |= SC_OP_BTCOEX_ENABLED;
287} 165}
166EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
288 167
289void ath9k_hw_btcoex_disable(struct ath_hw *ah) 168static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
290{ 169{
291 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 170 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
292 171
293 ath9k_hw_set_gpio(ah, btcoex_info->wlanactive_gpio, 0); 172 /*
173 * Program coex mode and weight registers to
174 * enable coex 3-wire
175 */
176 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
177 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
178 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
294 179
295 ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio, 180 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
296 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 181 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
297 182
298 if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) { 183 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
299 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE); 184 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
300 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
301 REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
302 }
303
304 ah->ah_sc->sc_flags &= ~SC_OP_BTCOEX_ENABLED;
305} 185}
306 186
307/* 187void ath9k_hw_btcoex_enable(struct ath_hw *ah)
308 * Pause btcoex timer and bt duty cycle timer
309 */
310void ath_btcoex_timer_pause(struct ath_softc *sc,
311 struct ath_btcoex_info *btinfo)
312{ 188{
189 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
313 190
314 del_timer_sync(&btinfo->period_timer); 191 switch (btcoex_hw->scheme) {
192 case ATH_BTCOEX_CFG_NONE:
193 break;
194 case ATH_BTCOEX_CFG_2WIRE:
195 ath9k_hw_btcoex_enable_2wire(ah);
196 break;
197 case ATH_BTCOEX_CFG_3WIRE:
198 ath9k_hw_btcoex_enable_3wire(ah);
199 break;
200 }
315 201
316 if (btinfo->hw_timer_enabled) 202 REG_RMW(ah, AR_GPIO_PDPU,
317 ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer); 203 (0x2 << (btcoex_hw->btactive_gpio * 2)),
204 (0x3 << (btcoex_hw->btactive_gpio * 2)));
318 205
319 btinfo->hw_timer_enabled = false; 206 ah->btcoex_hw.enabled = true;
320} 207}
208EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
321 209
322/* 210void ath9k_hw_btcoex_disable(struct ath_hw *ah)
323 * (Re)start btcoex timers
324 */
325void ath_btcoex_timer_resume(struct ath_softc *sc,
326 struct ath_btcoex_info *btinfo)
327{ 211{
212 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
328 213
329 DPRINTF(sc, ATH_DBG_BTCOEX, "Starting btcoex timers"); 214 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
330 215
331 /* make sure duty cycle timer is also stopped when resuming */ 216 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
332 if (btinfo->hw_timer_enabled) 217 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
333 ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
334 218
335 btinfo->bt_priority_cnt = 0; 219 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
336 btinfo->bt_priority_time = jiffies; 220 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
337 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED; 221 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
222 REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
223 }
338 224
339 mod_timer(&btinfo->period_timer, jiffies); 225 ah->btcoex_hw.enabled = false;
340} 226}
227EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 297b027fd3c3..1ba31a73317c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -17,6 +17,8 @@
17#ifndef BTCOEX_H 17#ifndef BTCOEX_H
18#define BTCOEX_H 18#define BTCOEX_H
19 19
20#include "hw.h"
21
20#define ATH_WLANACTIVE_GPIO 5 22#define ATH_WLANACTIVE_GPIO 5
21#define ATH_BTACTIVE_GPIO 6 23#define ATH_BTACTIVE_GPIO 6
22#define ATH_BTPRIORITY_GPIO 7 24#define ATH_BTPRIORITY_GPIO 7
@@ -34,67 +36,25 @@ enum ath_btcoex_scheme {
34 ATH_BTCOEX_CFG_3WIRE, 36 ATH_BTCOEX_CFG_3WIRE,
35}; 37};
36 38
37enum ath_stomp_type { 39struct ath_btcoex_hw {
38 ATH_BTCOEX_NO_STOMP, 40 enum ath_btcoex_scheme scheme;
39 ATH_BTCOEX_STOMP_ALL, 41 bool enabled;
40 ATH_BTCOEX_STOMP_LOW,
41 ATH_BTCOEX_STOMP_NONE
42};
43
44enum ath_bt_mode {
45 ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
46 ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
47 ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
48 ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
49};
50
51struct ath_btcoex_config {
52 u8 bt_time_extend;
53 bool bt_txstate_extend;
54 bool bt_txframe_extend;
55 enum ath_bt_mode bt_mode; /* coexistence mode */
56 bool bt_quiet_collision;
57 bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
58 u8 bt_priority_time;
59 u8 bt_first_slot_time;
60 bool bt_hold_rx_clear;
61};
62
63struct ath_btcoex_info {
64 enum ath_btcoex_scheme btcoex_scheme;
65 u8 wlanactive_gpio; 42 u8 wlanactive_gpio;
66 u8 btactive_gpio; 43 u8 btactive_gpio;
67 u8 btpriority_gpio; 44 u8 btpriority_gpio;
68 u8 bt_duty_cycle; /* BT duty cycle in percentage */
69 int bt_stomp_type; /* Types of BT stomping */
70 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ 45 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
71 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ 46 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
72 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ 47 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
73 u32 btcoex_no_stomp; /* in usec */
74 u32 btcoex_period; /* in usec */
75 u32 bt_priority_cnt;
76 unsigned long bt_priority_time;
77 bool hw_timer_enabled;
78 spinlock_t btcoex_lock;
79 struct timer_list period_timer; /* Timer for BT period */
80 struct ath_gen_timer *no_stomp_timer; /*Timer for no BT stomping*/
81}; 48};
82 49
83bool ath_btcoex_supported(u16 subsysid); 50bool ath9k_hw_btcoex_supported(struct ath_hw *ah);
84int ath9k_hw_btcoex_init(struct ath_hw *ah); 51void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
52void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
53void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
54void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
55 u32 bt_weight,
56 u32 wlan_weight);
85void ath9k_hw_btcoex_enable(struct ath_hw *ah); 57void ath9k_hw_btcoex_enable(struct ath_hw *ah);
86void ath9k_hw_btcoex_disable(struct ath_hw *ah); 58void ath9k_hw_btcoex_disable(struct ath_hw *ah);
87void ath_btcoex_timer_resume(struct ath_softc *sc,
88 struct ath_btcoex_info *btinfo);
89void ath_btcoex_timer_pause(struct ath_softc *sc,
90 struct ath_btcoex_info *btinfo);
91
92static inline void ath_btcoex_set_weight(struct ath_btcoex_info *btcoex_info,
93 u32 bt_weight,
94 u32 wlan_weight)
95{
96 btcoex_info->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
97 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
98}
99 59
100#endif 60#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 0ad6d0b76e9e..238a5744d8e9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19/* We can tune this as we go by monitoring really low values */ 19/* We can tune this as we go by monitoring really low values */
20#define ATH9K_NF_TOO_LOW -60 20#define ATH9K_NF_TOO_LOW -60
@@ -26,11 +26,11 @@
26static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf) 26static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf)
27{ 27{
28 if (nf > ATH9K_NF_TOO_LOW) { 28 if (nf > ATH9K_NF_TOO_LOW) {
29 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 29 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
30 "noise floor value detected (%d) is " 30 "noise floor value detected (%d) is "
31 "lower than what we think is a " 31 "lower than what we think is a "
32 "reasonable value (%d)\n", 32 "reasonable value (%d)\n",
33 nf, ATH9K_NF_TOO_LOW); 33 nf, ATH9K_NF_TOO_LOW);
34 return false; 34 return false;
35 } 35 }
36 return true; 36 return true;
@@ -89,6 +89,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
89static void ath9k_hw_do_getnf(struct ath_hw *ah, 89static void ath9k_hw_do_getnf(struct ath_hw *ah,
90 int16_t nfarray[NUM_NF_READINGS]) 90 int16_t nfarray[NUM_NF_READINGS])
91{ 91{
92 struct ath_common *common = ath9k_hw_common(ah);
92 int16_t nf; 93 int16_t nf;
93 94
94 if (AR_SREV_9280_10_OR_LATER(ah)) 95 if (AR_SREV_9280_10_OR_LATER(ah))
@@ -98,8 +99,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
98 99
99 if (nf & 0x100) 100 if (nf & 0x100)
100 nf = 0 - ((nf ^ 0x1ff) + 1); 101 nf = 0 - ((nf ^ 0x1ff) + 1);
101 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 102 ath_print(common, ATH_DBG_CALIBRATE,
102 "NF calibrated [ctl] [chain 0] is %d\n", nf); 103 "NF calibrated [ctl] [chain 0] is %d\n", nf);
103 nfarray[0] = nf; 104 nfarray[0] = nf;
104 105
105 if (!AR_SREV_9285(ah)) { 106 if (!AR_SREV_9285(ah)) {
@@ -112,8 +113,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
112 113
113 if (nf & 0x100) 114 if (nf & 0x100)
114 nf = 0 - ((nf ^ 0x1ff) + 1); 115 nf = 0 - ((nf ^ 0x1ff) + 1);
115 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 116 ath_print(common, ATH_DBG_CALIBRATE,
116 "NF calibrated [ctl] [chain 1] is %d\n", nf); 117 "NF calibrated [ctl] [chain 1] is %d\n", nf);
117 nfarray[1] = nf; 118 nfarray[1] = nf;
118 119
119 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) { 120 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
@@ -121,8 +122,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
121 AR_PHY_CH2_MINCCA_PWR); 122 AR_PHY_CH2_MINCCA_PWR);
122 if (nf & 0x100) 123 if (nf & 0x100)
123 nf = 0 - ((nf ^ 0x1ff) + 1); 124 nf = 0 - ((nf ^ 0x1ff) + 1);
124 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 125 ath_print(common, ATH_DBG_CALIBRATE,
125 "NF calibrated [ctl] [chain 2] is %d\n", nf); 126 "NF calibrated [ctl] [chain 2] is %d\n", nf);
126 nfarray[2] = nf; 127 nfarray[2] = nf;
127 } 128 }
128 } 129 }
@@ -136,8 +137,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
136 137
137 if (nf & 0x100) 138 if (nf & 0x100)
138 nf = 0 - ((nf ^ 0x1ff) + 1); 139 nf = 0 - ((nf ^ 0x1ff) + 1);
139 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 140 ath_print(common, ATH_DBG_CALIBRATE,
140 "NF calibrated [ext] [chain 0] is %d\n", nf); 141 "NF calibrated [ext] [chain 0] is %d\n", nf);
141 nfarray[3] = nf; 142 nfarray[3] = nf;
142 143
143 if (!AR_SREV_9285(ah)) { 144 if (!AR_SREV_9285(ah)) {
@@ -150,8 +151,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
150 151
151 if (nf & 0x100) 152 if (nf & 0x100)
152 nf = 0 - ((nf ^ 0x1ff) + 1); 153 nf = 0 - ((nf ^ 0x1ff) + 1);
153 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 154 ath_print(common, ATH_DBG_CALIBRATE,
154 "NF calibrated [ext] [chain 1] is %d\n", nf); 155 "NF calibrated [ext] [chain 1] is %d\n", nf);
155 nfarray[4] = nf; 156 nfarray[4] = nf;
156 157
157 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) { 158 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
@@ -159,8 +160,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
159 AR_PHY_CH2_EXT_MINCCA_PWR); 160 AR_PHY_CH2_EXT_MINCCA_PWR);
160 if (nf & 0x100) 161 if (nf & 0x100)
161 nf = 0 - ((nf ^ 0x1ff) + 1); 162 nf = 0 - ((nf ^ 0x1ff) + 1);
162 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 163 ath_print(common, ATH_DBG_CALIBRATE,
163 "NF calibrated [ext] [chain 2] is %d\n", nf); 164 "NF calibrated [ext] [chain 2] is %d\n", nf);
164 nfarray[5] = nf; 165 nfarray[5] = nf;
165 } 166 }
166 } 167 }
@@ -188,6 +189,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
188static void ath9k_hw_setup_calibration(struct ath_hw *ah, 189static void ath9k_hw_setup_calibration(struct ath_hw *ah,
189 struct ath9k_cal_list *currCal) 190 struct ath9k_cal_list *currCal)
190{ 191{
192 struct ath_common *common = ath9k_hw_common(ah);
193
191 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), 194 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
192 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX, 195 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
193 currCal->calData->calCountMax); 196 currCal->calData->calCountMax);
@@ -195,23 +198,23 @@ static void ath9k_hw_setup_calibration(struct ath_hw *ah,
195 switch (currCal->calData->calType) { 198 switch (currCal->calData->calType) {
196 case IQ_MISMATCH_CAL: 199 case IQ_MISMATCH_CAL:
197 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); 200 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
198 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 201 ath_print(common, ATH_DBG_CALIBRATE,
199 "starting IQ Mismatch Calibration\n"); 202 "starting IQ Mismatch Calibration\n");
200 break; 203 break;
201 case ADC_GAIN_CAL: 204 case ADC_GAIN_CAL:
202 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); 205 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
203 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 206 ath_print(common, ATH_DBG_CALIBRATE,
204 "starting ADC Gain Calibration\n"); 207 "starting ADC Gain Calibration\n");
205 break; 208 break;
206 case ADC_DC_CAL: 209 case ADC_DC_CAL:
207 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); 210 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
208 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 211 ath_print(common, ATH_DBG_CALIBRATE,
209 "starting ADC DC Calibration\n"); 212 "starting ADC DC Calibration\n");
210 break; 213 break;
211 case ADC_DC_INIT_CAL: 214 case ADC_DC_INIT_CAL:
212 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT); 215 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
213 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 216 ath_print(common, ATH_DBG_CALIBRATE,
214 "starting Init ADC DC Calibration\n"); 217 "starting Init ADC DC Calibration\n");
215 break; 218 break;
216 } 219 }
217 220
@@ -278,7 +281,7 @@ static bool ath9k_hw_per_calibration(struct ath_hw *ah,
278static bool ath9k_hw_iscal_supported(struct ath_hw *ah, 281static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
279 enum ath9k_cal_types calType) 282 enum ath9k_cal_types calType)
280{ 283{
281 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 284 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
282 285
283 switch (calType & ah->supp_cals) { 286 switch (calType & ah->supp_cals) {
284 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */ 287 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
@@ -304,11 +307,11 @@ static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
304 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 307 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
305 ah->totalIqCorrMeas[i] += 308 ah->totalIqCorrMeas[i] +=
306 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 309 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
307 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 310 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
308 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", 311 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
309 ah->cal_samples, i, ah->totalPowerMeasI[i], 312 ah->cal_samples, i, ah->totalPowerMeasI[i],
310 ah->totalPowerMeasQ[i], 313 ah->totalPowerMeasQ[i],
311 ah->totalIqCorrMeas[i]); 314 ah->totalIqCorrMeas[i]);
312 } 315 }
313} 316}
314 317
@@ -326,14 +329,14 @@ static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
326 ah->totalAdcQEvenPhase[i] += 329 ah->totalAdcQEvenPhase[i] +=
327 REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 330 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
328 331
329 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 332 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
330 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 333 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
331 "oddq=0x%08x; evenq=0x%08x;\n", 334 "oddq=0x%08x; evenq=0x%08x;\n",
332 ah->cal_samples, i, 335 ah->cal_samples, i,
333 ah->totalAdcIOddPhase[i], 336 ah->totalAdcIOddPhase[i],
334 ah->totalAdcIEvenPhase[i], 337 ah->totalAdcIEvenPhase[i],
335 ah->totalAdcQOddPhase[i], 338 ah->totalAdcQOddPhase[i],
336 ah->totalAdcQEvenPhase[i]); 339 ah->totalAdcQEvenPhase[i]);
337 } 340 }
338} 341}
339 342
@@ -351,19 +354,20 @@ static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
351 ah->totalAdcDcOffsetQEvenPhase[i] += 354 ah->totalAdcDcOffsetQEvenPhase[i] +=
352 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 355 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
353 356
354 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 357 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
355 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 358 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
356 "oddq=0x%08x; evenq=0x%08x;\n", 359 "oddq=0x%08x; evenq=0x%08x;\n",
357 ah->cal_samples, i, 360 ah->cal_samples, i,
358 ah->totalAdcDcOffsetIOddPhase[i], 361 ah->totalAdcDcOffsetIOddPhase[i],
359 ah->totalAdcDcOffsetIEvenPhase[i], 362 ah->totalAdcDcOffsetIEvenPhase[i],
360 ah->totalAdcDcOffsetQOddPhase[i], 363 ah->totalAdcDcOffsetQOddPhase[i],
361 ah->totalAdcDcOffsetQEvenPhase[i]); 364 ah->totalAdcDcOffsetQEvenPhase[i]);
362 } 365 }
363} 366}
364 367
365static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) 368static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
366{ 369{
370 struct ath_common *common = ath9k_hw_common(ah);
367 u32 powerMeasQ, powerMeasI, iqCorrMeas; 371 u32 powerMeasQ, powerMeasI, iqCorrMeas;
368 u32 qCoffDenom, iCoffDenom; 372 u32 qCoffDenom, iCoffDenom;
369 int32_t qCoff, iCoff; 373 int32_t qCoff, iCoff;
@@ -374,13 +378,13 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
374 powerMeasQ = ah->totalPowerMeasQ[i]; 378 powerMeasQ = ah->totalPowerMeasQ[i];
375 iqCorrMeas = ah->totalIqCorrMeas[i]; 379 iqCorrMeas = ah->totalIqCorrMeas[i];
376 380
377 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 381 ath_print(common, ATH_DBG_CALIBRATE,
378 "Starting IQ Cal and Correction for Chain %d\n", 382 "Starting IQ Cal and Correction for Chain %d\n",
379 i); 383 i);
380 384
381 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 385 ath_print(common, ATH_DBG_CALIBRATE,
382 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 386 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
383 i, ah->totalIqCorrMeas[i]); 387 i, ah->totalIqCorrMeas[i]);
384 388
385 iqCorrNeg = 0; 389 iqCorrNeg = 0;
386 390
@@ -389,27 +393,28 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
389 iqCorrNeg = 1; 393 iqCorrNeg = 1;
390 } 394 }
391 395
392 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 396 ath_print(common, ATH_DBG_CALIBRATE,
393 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); 397 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
394 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 398 ath_print(common, ATH_DBG_CALIBRATE,
395 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); 399 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
396 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", 400 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
397 iqCorrNeg); 401 iqCorrNeg);
398 402
399 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; 403 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
400 qCoffDenom = powerMeasQ / 64; 404 qCoffDenom = powerMeasQ / 64;
401 405
402 if (powerMeasQ != 0) { 406 if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
407 (qCoffDenom != 0)) {
403 iCoff = iqCorrMeas / iCoffDenom; 408 iCoff = iqCorrMeas / iCoffDenom;
404 qCoff = powerMeasI / qCoffDenom - 64; 409 qCoff = powerMeasI / qCoffDenom - 64;
405 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 410 ath_print(common, ATH_DBG_CALIBRATE,
406 "Chn %d iCoff = 0x%08x\n", i, iCoff); 411 "Chn %d iCoff = 0x%08x\n", i, iCoff);
407 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 412 ath_print(common, ATH_DBG_CALIBRATE,
408 "Chn %d qCoff = 0x%08x\n", i, qCoff); 413 "Chn %d qCoff = 0x%08x\n", i, qCoff);
409 414
410 iCoff = iCoff & 0x3f; 415 iCoff = iCoff & 0x3f;
411 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 416 ath_print(common, ATH_DBG_CALIBRATE,
412 "New: Chn %d iCoff = 0x%08x\n", i, iCoff); 417 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
413 if (iqCorrNeg == 0x0) 418 if (iqCorrNeg == 0x0)
414 iCoff = 0x40 - iCoff; 419 iCoff = 0x40 - iCoff;
415 420
@@ -418,9 +423,9 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
418 else if (qCoff <= -16) 423 else if (qCoff <= -16)
419 qCoff = 16; 424 qCoff = 16;
420 425
421 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 426 ath_print(common, ATH_DBG_CALIBRATE,
422 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", 427 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
423 i, iCoff, qCoff); 428 i, iCoff, qCoff);
424 429
425 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), 430 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
426 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, 431 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
@@ -428,9 +433,9 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
428 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), 433 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
429 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, 434 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
430 qCoff); 435 qCoff);
431 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 436 ath_print(common, ATH_DBG_CALIBRATE,
432 "IQ Cal and Correction done for Chain %d\n", 437 "IQ Cal and Correction done for Chain %d\n",
433 i); 438 i);
434 } 439 }
435 } 440 }
436 441
@@ -440,6 +445,7 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
440 445
441static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) 446static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
442{ 447{
448 struct ath_common *common = ath9k_hw_common(ah);
443 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset; 449 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
444 u32 qGainMismatch, iGainMismatch, val, i; 450 u32 qGainMismatch, iGainMismatch, val, i;
445 451
@@ -449,21 +455,21 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
449 qOddMeasOffset = ah->totalAdcQOddPhase[i]; 455 qOddMeasOffset = ah->totalAdcQOddPhase[i];
450 qEvenMeasOffset = ah->totalAdcQEvenPhase[i]; 456 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
451 457
452 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 458 ath_print(common, ATH_DBG_CALIBRATE,
453 "Starting ADC Gain Cal for Chain %d\n", i); 459 "Starting ADC Gain Cal for Chain %d\n", i);
454 460
455 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 461 ath_print(common, ATH_DBG_CALIBRATE,
456 "Chn %d pwr_meas_odd_i = 0x%08x\n", i, 462 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
457 iOddMeasOffset); 463 iOddMeasOffset);
458 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 464 ath_print(common, ATH_DBG_CALIBRATE,
459 "Chn %d pwr_meas_even_i = 0x%08x\n", i, 465 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
460 iEvenMeasOffset); 466 iEvenMeasOffset);
461 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 467 ath_print(common, ATH_DBG_CALIBRATE,
462 "Chn %d pwr_meas_odd_q = 0x%08x\n", i, 468 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
463 qOddMeasOffset); 469 qOddMeasOffset);
464 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 470 ath_print(common, ATH_DBG_CALIBRATE,
465 "Chn %d pwr_meas_even_q = 0x%08x\n", i, 471 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
466 qEvenMeasOffset); 472 qEvenMeasOffset);
467 473
468 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { 474 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
469 iGainMismatch = 475 iGainMismatch =
@@ -473,20 +479,20 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
473 ((qOddMeasOffset * 32) / 479 ((qOddMeasOffset * 32) /
474 qEvenMeasOffset) & 0x3f; 480 qEvenMeasOffset) & 0x3f;
475 481
476 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 482 ath_print(common, ATH_DBG_CALIBRATE,
477 "Chn %d gain_mismatch_i = 0x%08x\n", i, 483 "Chn %d gain_mismatch_i = 0x%08x\n", i,
478 iGainMismatch); 484 iGainMismatch);
479 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 485 ath_print(common, ATH_DBG_CALIBRATE,
480 "Chn %d gain_mismatch_q = 0x%08x\n", i, 486 "Chn %d gain_mismatch_q = 0x%08x\n", i,
481 qGainMismatch); 487 qGainMismatch);
482 488
483 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); 489 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
484 val &= 0xfffff000; 490 val &= 0xfffff000;
485 val |= (qGainMismatch) | (iGainMismatch << 6); 491 val |= (qGainMismatch) | (iGainMismatch << 6);
486 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); 492 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
487 493
488 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 494 ath_print(common, ATH_DBG_CALIBRATE,
489 "ADC Gain Cal done for Chain %d\n", i); 495 "ADC Gain Cal done for Chain %d\n", i);
490 } 496 }
491 } 497 }
492 498
@@ -497,6 +503,7 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
497 503
498static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains) 504static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
499{ 505{
506 struct ath_common *common = ath9k_hw_common(ah);
500 u32 iOddMeasOffset, iEvenMeasOffset, val, i; 507 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
501 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch; 508 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
502 const struct ath9k_percal_data *calData = 509 const struct ath9k_percal_data *calData =
@@ -510,41 +517,41 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
510 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i]; 517 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
511 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i]; 518 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
512 519
513 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 520 ath_print(common, ATH_DBG_CALIBRATE,
514 "Starting ADC DC Offset Cal for Chain %d\n", i); 521 "Starting ADC DC Offset Cal for Chain %d\n", i);
515 522
516 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 523 ath_print(common, ATH_DBG_CALIBRATE,
517 "Chn %d pwr_meas_odd_i = %d\n", i, 524 "Chn %d pwr_meas_odd_i = %d\n", i,
518 iOddMeasOffset); 525 iOddMeasOffset);
519 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 526 ath_print(common, ATH_DBG_CALIBRATE,
520 "Chn %d pwr_meas_even_i = %d\n", i, 527 "Chn %d pwr_meas_even_i = %d\n", i,
521 iEvenMeasOffset); 528 iEvenMeasOffset);
522 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 529 ath_print(common, ATH_DBG_CALIBRATE,
523 "Chn %d pwr_meas_odd_q = %d\n", i, 530 "Chn %d pwr_meas_odd_q = %d\n", i,
524 qOddMeasOffset); 531 qOddMeasOffset);
525 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 532 ath_print(common, ATH_DBG_CALIBRATE,
526 "Chn %d pwr_meas_even_q = %d\n", i, 533 "Chn %d pwr_meas_even_q = %d\n", i,
527 qEvenMeasOffset); 534 qEvenMeasOffset);
528 535
529 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / 536 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
530 numSamples) & 0x1ff; 537 numSamples) & 0x1ff;
531 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / 538 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
532 numSamples) & 0x1ff; 539 numSamples) & 0x1ff;
533 540
534 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 541 ath_print(common, ATH_DBG_CALIBRATE,
535 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, 542 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
536 iDcMismatch); 543 iDcMismatch);
537 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 544 ath_print(common, ATH_DBG_CALIBRATE,
538 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, 545 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
539 qDcMismatch); 546 qDcMismatch);
540 547
541 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); 548 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
542 val &= 0xc0000fff; 549 val &= 0xc0000fff;
543 val |= (qDcMismatch << 12) | (iDcMismatch << 21); 550 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
544 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); 551 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
545 552
546 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 553 ath_print(common, ATH_DBG_CALIBRATE,
547 "ADC DC Offset Cal done for Chain %d\n", i); 554 "ADC DC Offset Cal done for Chain %d\n", i);
548 } 555 }
549 556
550 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), 557 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
@@ -555,7 +562,8 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
555/* This is done for the currently configured channel */ 562/* This is done for the currently configured channel */
556bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 563bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
557{ 564{
558 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 565 struct ath_common *common = ath9k_hw_common(ah);
566 struct ieee80211_conf *conf = &common->hw->conf;
559 struct ath9k_cal_list *currCal = ah->cal_list_curr; 567 struct ath9k_cal_list *currCal = ah->cal_list_curr;
560 568
561 if (!ah->curchan) 569 if (!ah->curchan)
@@ -568,24 +576,25 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
568 return true; 576 return true;
569 577
570 if (currCal->calState != CAL_DONE) { 578 if (currCal->calState != CAL_DONE) {
571 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 579 ath_print(common, ATH_DBG_CALIBRATE,
572 "Calibration state incorrect, %d\n", 580 "Calibration state incorrect, %d\n",
573 currCal->calState); 581 currCal->calState);
574 return true; 582 return true;
575 } 583 }
576 584
577 if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType)) 585 if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType))
578 return true; 586 return true;
579 587
580 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 588 ath_print(common, ATH_DBG_CALIBRATE,
581 "Resetting Cal %d state for channel %u\n", 589 "Resetting Cal %d state for channel %u\n",
582 currCal->calData->calType, conf->channel->center_freq); 590 currCal->calData->calType, conf->channel->center_freq);
583 591
584 ah->curchan->CalValid &= ~currCal->calData->calType; 592 ah->curchan->CalValid &= ~currCal->calData->calType;
585 currCal->calState = CAL_WAITING; 593 currCal->calState = CAL_WAITING;
586 594
587 return false; 595 return false;
588} 596}
597EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
589 598
590void ath9k_hw_start_nfcal(struct ath_hw *ah) 599void ath9k_hw_start_nfcal(struct ath_hw *ah)
591{ 600{
@@ -645,11 +654,11 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
645 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 654 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
646 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 655 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
647 656
648 for (j = 0; j < 1000; j++) { 657 for (j = 0; j < 5; j++) {
649 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & 658 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
650 AR_PHY_AGC_CONTROL_NF) == 0) 659 AR_PHY_AGC_CONTROL_NF) == 0)
651 break; 660 break;
652 udelay(10); 661 udelay(50);
653 } 662 }
654 663
655 for (i = 0; i < NUM_NF_READINGS; i++) { 664 for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -665,6 +674,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
665int16_t ath9k_hw_getnf(struct ath_hw *ah, 674int16_t ath9k_hw_getnf(struct ath_hw *ah,
666 struct ath9k_channel *chan) 675 struct ath9k_channel *chan)
667{ 676{
677 struct ath_common *common = ath9k_hw_common(ah);
668 int16_t nf, nfThresh; 678 int16_t nf, nfThresh;
669 int16_t nfarray[NUM_NF_READINGS] = { 0 }; 679 int16_t nfarray[NUM_NF_READINGS] = { 0 };
670 struct ath9k_nfcal_hist *h; 680 struct ath9k_nfcal_hist *h;
@@ -672,8 +682,8 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
672 682
673 chan->channelFlags &= (~CHANNEL_CW_INT); 683 chan->channelFlags &= (~CHANNEL_CW_INT);
674 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 684 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
675 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 685 ath_print(common, ATH_DBG_CALIBRATE,
676 "NF did not complete in calibration window\n"); 686 "NF did not complete in calibration window\n");
677 nf = 0; 687 nf = 0;
678 chan->rawNoiseFloor = nf; 688 chan->rawNoiseFloor = nf;
679 return chan->rawNoiseFloor; 689 return chan->rawNoiseFloor;
@@ -682,10 +692,10 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
682 nf = nfarray[0]; 692 nf = nfarray[0];
683 if (getNoiseFloorThresh(ah, c->band, &nfThresh) 693 if (getNoiseFloorThresh(ah, c->band, &nfThresh)
684 && nf > nfThresh) { 694 && nf > nfThresh) {
685 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 695 ath_print(common, ATH_DBG_CALIBRATE,
686 "noise floor failed detected; " 696 "noise floor failed detected; "
687 "detected %d, threshold %d\n", 697 "detected %d, threshold %d\n",
688 nf, nfThresh); 698 nf, nfThresh);
689 chan->channelFlags |= CHANNEL_CW_INT; 699 chan->channelFlags |= CHANNEL_CW_INT;
690 } 700 }
691 } 701 }
@@ -737,51 +747,73 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
737 747
738 return nf; 748 return nf;
739} 749}
750EXPORT_SYMBOL(ath9k_hw_getchan_noise);
740 751
741static void ath9k_olc_temp_compensation(struct ath_hw *ah) 752static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
742{ 753{
743 u32 rddata, i; 754 u32 rddata;
744 int delta, currPDADC, regval, slope; 755 int32_t delta, currPDADC, slope;
745 756
746 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4); 757 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
747 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT); 758 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
748 759
760 if (ah->initPDADC == 0 || currPDADC == 0) {
761 /*
762 * Zero value indicates that no frames have been transmitted yet,
763 * can't do temperature compensation until frames are transmitted.
764 */
765 return;
766 } else {
767 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
768
769 if (slope == 0) { /* to avoid divide by zero case */
770 delta = 0;
771 } else {
772 delta = ((currPDADC - ah->initPDADC)*4) / slope;
773 }
774 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
775 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
776 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
777 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
778 }
779}
780
781static void ath9k_olc_temp_compensation(struct ath_hw *ah)
782{
783 u32 rddata, i;
784 int delta, currPDADC, regval;
749 785
750 if (OLC_FOR_AR9287_10_LATER) { 786 if (OLC_FOR_AR9287_10_LATER) {
787 ath9k_olc_temp_compensation_9287(ah);
788 } else {
789 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
790 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
791
751 if (ah->initPDADC == 0 || currPDADC == 0) { 792 if (ah->initPDADC == 0 || currPDADC == 0) {
752 return; 793 return;
753 } else { 794 } else {
754 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE); 795 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
755 if (slope == 0) 796 delta = (currPDADC - ah->initPDADC + 4) / 8;
756 delta = 0;
757 else 797 else
758 delta = ((currPDADC - ah->initPDADC)*4) / slope; 798 delta = (currPDADC - ah->initPDADC + 5) / 10;
759 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11, 799
760 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta); 800 if (delta != ah->PDADCdelta) {
761 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11, 801 ah->PDADCdelta = delta;
762 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta); 802 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
763 } 803 regval = ah->originalGain[i] - delta;
764 } else { 804 if (regval < 0)
765 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G)) 805 regval = 0;
766 delta = (currPDADC - ah->initPDADC + 4) / 8; 806
767 else 807 REG_RMW_FIELD(ah,
768 delta = (currPDADC - ah->initPDADC + 5) / 10; 808 AR_PHY_TX_GAIN_TBL1 + i * 4,
769 809 AR_PHY_TX_GAIN, regval);
770 if (delta != ah->PDADCdelta) { 810 }
771 ah->PDADCdelta = delta;
772 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
773 regval = ah->originalGain[i] - delta;
774 if (regval < 0)
775 regval = 0;
776
777 REG_RMW_FIELD(ah, AR_PHY_TX_GAIN_TBL1 + i * 4,
778 AR_PHY_TX_GAIN, regval);
779 } 811 }
780 } 812 }
781 } 813 }
782} 814}
783 815
784static void ath9k_hw_9271_pa_cal(struct ath_hw *ah) 816static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
785{ 817{
786 u32 regVal; 818 u32 regVal;
787 unsigned int i; 819 unsigned int i;
@@ -845,7 +877,7 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
845 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0); 877 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
846 878
847 /* find off_6_1; */ 879 /* find off_6_1; */
848 for (i = 6; i >= 0; i--) { 880 for (i = 6; i > 0; i--) {
849 regVal = REG_READ(ah, 0x7834); 881 regVal = REG_READ(ah, 0x7834);
850 regVal |= (1 << (20 + i)); 882 regVal |= (1 << (20 + i));
851 REG_WRITE(ah, 0x7834, regVal); 883 REG_WRITE(ah, 0x7834, regVal);
@@ -857,10 +889,19 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
857 REG_WRITE(ah, 0x7834, regVal); 889 REG_WRITE(ah, 0x7834, regVal);
858 } 890 }
859 891
860 /* Empirical offset correction */ 892 regVal = (regVal >>20) & 0x7f;
861#if 0 893
862 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0x20); 894 /* Update PA cal info */
863#endif 895 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
896 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
897 ah->pacal_info.max_skipcount =
898 2 * ah->pacal_info.max_skipcount;
899 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
900 } else {
901 ah->pacal_info.max_skipcount = 1;
902 ah->pacal_info.skipcount = 0;
903 ah->pacal_info.prev_offset = regVal;
904 }
864 905
865 regVal = REG_READ(ah, 0x7834); 906 regVal = REG_READ(ah, 0x7834);
866 regVal |= 0x1; 907 regVal |= 0x1;
@@ -875,7 +916,7 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
875 916
876static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset) 917static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
877{ 918{
878 919 struct ath_common *common = ath9k_hw_common(ah);
879 u32 regVal; 920 u32 regVal;
880 int i, offset, offs_6_1, offs_0; 921 int i, offset, offs_6_1, offs_0;
881 u32 ccomp_org, reg_field; 922 u32 ccomp_org, reg_field;
@@ -889,7 +930,7 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
889 { 0x7838, 0 }, 930 { 0x7838, 0 },
890 }; 931 };
891 932
892 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); 933 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
893 934
894 /* PA CAL is not needed for high power solution */ 935 /* PA CAL is not needed for high power solution */
895 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 936 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -1011,7 +1052,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1011 if (longcal) { 1052 if (longcal) {
1012 /* Do periodic PAOffset Cal */ 1053 /* Do periodic PAOffset Cal */
1013 if (AR_SREV_9271(ah)) 1054 if (AR_SREV_9271(ah))
1014 ath9k_hw_9271_pa_cal(ah); 1055 ath9k_hw_9271_pa_cal(ah, false);
1015 else if (AR_SREV_9285_11_OR_LATER(ah)) { 1056 else if (AR_SREV_9285_11_OR_LATER(ah)) {
1016 if (!ah->pacal_info.skipcount) 1057 if (!ah->pacal_info.skipcount)
1017 ath9k_hw_9285_pa_cal(ah, false); 1058 ath9k_hw_9285_pa_cal(ah, false);
@@ -1036,9 +1077,13 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1036 1077
1037 return iscaldone; 1078 return iscaldone;
1038} 1079}
1080EXPORT_SYMBOL(ath9k_hw_calibrate);
1039 1081
1082/* Carrier leakage Calibration fix */
1040static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan) 1083static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1041{ 1084{
1085 struct ath_common *common = ath9k_hw_common(ah);
1086
1042 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); 1087 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1043 if (IS_CHAN_HT20(chan)) { 1088 if (IS_CHAN_HT20(chan)) {
1044 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE); 1089 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
@@ -1049,9 +1094,9 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1049 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); 1094 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1050 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 1095 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
1051 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { 1096 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
1052 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset " 1097 ath_print(common, ATH_DBG_CALIBRATE, "offset "
1053 "calibration failed to complete in " 1098 "calibration failed to complete in "
1054 "1ms; noisy ??\n"); 1099 "1ms; noisy ??\n");
1055 return false; 1100 return false;
1056 } 1101 }
1057 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); 1102 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
@@ -1064,8 +1109,8 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1064 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); 1109 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1065 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 1110 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1066 0, AH_WAIT_TIMEOUT)) { 1111 0, AH_WAIT_TIMEOUT)) {
1067 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset calibration " 1112 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
1068 "failed to complete in 1ms; noisy ??\n"); 1113 "failed to complete in 1ms; noisy ??\n");
1069 return false; 1114 return false;
1070 } 1115 }
1071 1116
@@ -1078,7 +1123,9 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1078 1123
1079bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) 1124bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1080{ 1125{
1081 if (AR_SREV_9285_12_OR_LATER(ah)) { 1126 struct ath_common *common = ath9k_hw_common(ah);
1127
1128 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
1082 if (!ar9285_clc(ah, chan)) 1129 if (!ar9285_clc(ah, chan))
1083 return false; 1130 return false;
1084 } else { 1131 } else {
@@ -1098,9 +1145,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1098 /* Poll for offset calibration complete */ 1145 /* Poll for offset calibration complete */
1099 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 1146 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1100 0, AH_WAIT_TIMEOUT)) { 1147 0, AH_WAIT_TIMEOUT)) {
1101 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1148 ath_print(common, ATH_DBG_CALIBRATE,
1102 "offset calibration failed to complete in 1ms; " 1149 "offset calibration failed to "
1103 "noisy environment?\n"); 1150 "complete in 1ms; noisy environment?\n");
1104 return false; 1151 return false;
1105 } 1152 }
1106 1153
@@ -1114,7 +1161,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1114 } 1161 }
1115 1162
1116 /* Do PA Calibration */ 1163 /* Do PA Calibration */
1117 if (AR_SREV_9285_11_OR_LATER(ah)) 1164 if (AR_SREV_9271(ah))
1165 ath9k_hw_9271_pa_cal(ah, true);
1166 else if (AR_SREV_9285_11_OR_LATER(ah))
1118 ath9k_hw_9285_pa_cal(ah, true); 1167 ath9k_hw_9285_pa_cal(ah, true);
1119 1168
1120 /* Do NF Calibration after DC offset and other calibrations */ 1169 /* Do NF Calibration after DC offset and other calibrations */
@@ -1128,20 +1177,20 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1128 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) { 1177 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
1129 INIT_CAL(&ah->adcgain_caldata); 1178 INIT_CAL(&ah->adcgain_caldata);
1130 INSERT_CAL(ah, &ah->adcgain_caldata); 1179 INSERT_CAL(ah, &ah->adcgain_caldata);
1131 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1180 ath_print(common, ATH_DBG_CALIBRATE,
1132 "enabling ADC Gain Calibration.\n"); 1181 "enabling ADC Gain Calibration.\n");
1133 } 1182 }
1134 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) { 1183 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
1135 INIT_CAL(&ah->adcdc_caldata); 1184 INIT_CAL(&ah->adcdc_caldata);
1136 INSERT_CAL(ah, &ah->adcdc_caldata); 1185 INSERT_CAL(ah, &ah->adcdc_caldata);
1137 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1186 ath_print(common, ATH_DBG_CALIBRATE,
1138 "enabling ADC DC Calibration.\n"); 1187 "enabling ADC DC Calibration.\n");
1139 } 1188 }
1140 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) { 1189 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
1141 INIT_CAL(&ah->iq_caldata); 1190 INIT_CAL(&ah->iq_caldata);
1142 INSERT_CAL(ah, &ah->iq_caldata); 1191 INSERT_CAL(ah, &ah->iq_caldata);
1143 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1192 ath_print(common, ATH_DBG_CALIBRATE,
1144 "enabling IQ Calibration.\n"); 1193 "enabling IQ Calibration.\n");
1145 } 1194 }
1146 1195
1147 ah->cal_list_curr = ah->cal_list; 1196 ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 9028ab193e42..b2c873e97485 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -17,6 +17,8 @@
17#ifndef CALIB_H 17#ifndef CALIB_H
18#define CALIB_H 18#define CALIB_H
19 19
20#include "hw.h"
21
20extern const struct ath9k_percal_data iq_cal_multi_sample; 22extern const struct ath9k_percal_data iq_cal_multi_sample;
21extern const struct ath9k_percal_data iq_cal_single_sample; 23extern const struct ath9k_percal_data iq_cal_single_sample;
22extern const struct ath9k_percal_data adc_gain_cal_multi_sample; 24extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
new file mode 100644
index 000000000000..4a13632e3e4d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -0,0 +1,295 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Module for common driver code between ath9k and ath9k_htc
19 */
20
21#include <linux/kernel.h>
22#include <linux/module.h>
23
24#include "common.h"
25
26MODULE_AUTHOR("Atheros Communications");
27MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
28MODULE_LICENSE("Dual BSD/GPL");
29
30/* Common RX processing */
31
32/* Assumes you've already done the endian to CPU conversion */
33static bool ath9k_rx_accept(struct ath_common *common,
34 struct sk_buff *skb,
35 struct ieee80211_rx_status *rxs,
36 struct ath_rx_status *rx_stats,
37 bool *decrypt_error)
38{
39 struct ath_hw *ah = common->ah;
40 struct ieee80211_hdr *hdr;
41 __le16 fc;
42
43 hdr = (struct ieee80211_hdr *) skb->data;
44 fc = hdr->frame_control;
45
46 if (!rx_stats->rs_datalen)
47 return false;
48 /*
49 * rs_status follows rs_datalen so if rs_datalen is too large
50 * we can take a hint that hardware corrupted it, so ignore
51 * those frames.
52 */
53 if (rx_stats->rs_datalen > common->rx_bufsize)
54 return false;
55
56 /*
57 * rs_more indicates chained descriptors which can be used
58 * to link buffers together for a sort of scatter-gather
59 * operation.
60 *
61 * The rx_stats->rs_status will not be set until the end of the
62 * chained descriptors so it can be ignored if rs_more is set. The
63 * rs_more will be false at the last element of the chained
64 * descriptors.
65 */
66 if (!rx_stats->rs_more && rx_stats->rs_status != 0) {
67 if (rx_stats->rs_status & ATH9K_RXERR_CRC)
68 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
69 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
70 return false;
71
72 if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
73 *decrypt_error = true;
74 } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
75 if (ieee80211_is_ctl(fc))
76 /*
77 * Sometimes, we get invalid
78 * MIC failures on valid control frames.
79 * Remove these mic errors.
80 */
81 rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
82 else
83 rxs->flag |= RX_FLAG_MMIC_ERROR;
84 }
85 /*
86 * Reject error frames with the exception of
87 * decryption and MIC failures. For monitor mode,
88 * we also ignore the CRC error.
89 */
90 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
91 if (rx_stats->rs_status &
92 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
93 ATH9K_RXERR_CRC))
94 return false;
95 } else {
96 if (rx_stats->rs_status &
97 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
98 return false;
99 }
100 }
101 }
102 return true;
103}
104
105static u8 ath9k_process_rate(struct ath_common *common,
106 struct ieee80211_hw *hw,
107 struct ath_rx_status *rx_stats,
108 struct ieee80211_rx_status *rxs,
109 struct sk_buff *skb)
110{
111 struct ieee80211_supported_band *sband;
112 enum ieee80211_band band;
113 unsigned int i = 0;
114
115 band = hw->conf.channel->band;
116 sband = hw->wiphy->bands[band];
117
118 if (rx_stats->rs_rate & 0x80) {
119 /* HT rate */
120 rxs->flag |= RX_FLAG_HT;
121 if (rx_stats->rs_flags & ATH9K_RX_2040)
122 rxs->flag |= RX_FLAG_40MHZ;
123 if (rx_stats->rs_flags & ATH9K_RX_GI)
124 rxs->flag |= RX_FLAG_SHORT_GI;
125 return rx_stats->rs_rate & 0x7f;
126 }
127
128 for (i = 0; i < sband->n_bitrates; i++) {
129 if (sband->bitrates[i].hw_value == rx_stats->rs_rate)
130 return i;
131 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
132 rxs->flag |= RX_FLAG_SHORTPRE;
133 return i;
134 }
135 }
136
137 /* No valid hardware bitrate found -- we should not get here */
138 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
139 "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
140 if ((common->debug_mask & ATH_DBG_XMIT))
141 print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
142
143 return 0;
144}
145
146static void ath9k_process_rssi(struct ath_common *common,
147 struct ieee80211_hw *hw,
148 struct sk_buff *skb,
149 struct ath_rx_status *rx_stats)
150{
151 struct ath_hw *ah = common->ah;
152 struct ieee80211_sta *sta;
153 struct ieee80211_hdr *hdr;
154 struct ath_node *an;
155 int last_rssi = ATH_RSSI_DUMMY_MARKER;
156 __le16 fc;
157
158 hdr = (struct ieee80211_hdr *)skb->data;
159 fc = hdr->frame_control;
160
161 rcu_read_lock();
162 /*
163 * XXX: use ieee80211_find_sta! This requires quite a bit of work
164 * under the current ath9k virtual wiphy implementation as we have
165 * no way of tying a vif to wiphy. Typically vifs are attached to
166 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
167 * wiphy you'd have to iterate over every wiphy and each sdata.
168 */
169 sta = ieee80211_find_sta_by_hw(hw, hdr->addr2);
170 if (sta) {
171 an = (struct ath_node *) sta->drv_priv;
172 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
173 !rx_stats->rs_moreaggr)
174 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
175 last_rssi = an->last_rssi;
176 }
177 rcu_read_unlock();
178
179 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
180 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
181 ATH_RSSI_EP_MULTIPLIER);
182 if (rx_stats->rs_rssi < 0)
183 rx_stats->rs_rssi = 0;
184 else if (rx_stats->rs_rssi > 127)
185 rx_stats->rs_rssi = 127;
186
187 /* Update Beacon RSSI, this is used by ANI. */
188 if (ieee80211_is_beacon(fc))
189 ah->stats.avgbrssi = rx_stats->rs_rssi;
190}
191
192/*
193 * For Decrypt or Demic errors, we only mark packet status here and always push
194 * up the frame up to let mac80211 handle the actual error case, be it no
195 * decryption key or real decryption error. This let us keep statistics there.
196 */
197int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
198 struct ieee80211_hw *hw,
199 struct sk_buff *skb,
200 struct ath_rx_status *rx_stats,
201 struct ieee80211_rx_status *rx_status,
202 bool *decrypt_error)
203{
204 struct ath_hw *ah = common->ah;
205
206 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
207 if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
208 return -EINVAL;
209
210 ath9k_process_rssi(common, hw, skb, rx_stats);
211
212 rx_status->rate_idx = ath9k_process_rate(common, hw,
213 rx_stats, rx_status, skb);
214 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
215 rx_status->band = hw->conf.channel->band;
216 rx_status->freq = hw->conf.channel->center_freq;
217 rx_status->noise = common->ani.noise_floor;
218 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
219 rx_status->antenna = rx_stats->rs_antenna;
220 rx_status->flag |= RX_FLAG_TSFT;
221
222 return 0;
223}
224EXPORT_SYMBOL(ath9k_cmn_rx_skb_preprocess);
225
226void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
227 struct sk_buff *skb,
228 struct ath_rx_status *rx_stats,
229 struct ieee80211_rx_status *rxs,
230 bool decrypt_error)
231{
232 struct ath_hw *ah = common->ah;
233 struct ieee80211_hdr *hdr;
234 int hdrlen, padpos, padsize;
235 u8 keyix;
236 __le16 fc;
237
238 /* see if any padding is done by the hw and remove it */
239 hdr = (struct ieee80211_hdr *) skb->data;
240 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
241 padpos = 24;
242 fc = hdr->frame_control;
243 if ((fc & cpu_to_le16(IEEE80211_FCTL_FROMDS|IEEE80211_FCTL_TODS)) ==
244 cpu_to_le16(IEEE80211_FCTL_FROMDS|IEEE80211_FCTL_TODS)) {
245 padpos += 6; /* ETH_ALEN */
246 }
247 if ((fc & cpu_to_le16(IEEE80211_STYPE_QOS_DATA|IEEE80211_FCTL_FTYPE)) ==
248 cpu_to_le16(IEEE80211_STYPE_QOS_DATA|IEEE80211_FTYPE_DATA)) {
249 padpos += 2;
250 }
251
252 /* The MAC header is padded to have 32-bit boundary if the
253 * packet payload is non-zero. The general calculation for
254 * padsize would take into account odd header lengths:
255 * padsize = (4 - padpos % 4) % 4; However, since only
256 * even-length headers are used, padding can only be 0 or 2
257 * bytes and we can optimize this a bit. In addition, we must
258 * not try to remove padding from short control frames that do
259 * not have payload. */
260 padsize = padpos & 3;
261 if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
262 memmove(skb->data + padsize, skb->data, padpos);
263 skb_pull(skb, padsize);
264 }
265
266 keyix = rx_stats->rs_keyix;
267
268 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
269 rxs->flag |= RX_FLAG_DECRYPTED;
270 } else if (ieee80211_has_protected(fc)
271 && !decrypt_error && skb->len >= hdrlen + 4) {
272 keyix = skb->data[hdrlen + 3] >> 6;
273
274 if (test_bit(keyix, common->keymap))
275 rxs->flag |= RX_FLAG_DECRYPTED;
276 }
277 if (ah->sw_mgmt_crypto &&
278 (rxs->flag & RX_FLAG_DECRYPTED) &&
279 ieee80211_is_mgmt(fc))
280 /* Use software decrypt for management frames. */
281 rxs->flag &= ~RX_FLAG_DECRYPTED;
282}
283EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
284
285static int __init ath9k_cmn_init(void)
286{
287 return 0;
288}
289module_init(ath9k_cmn_init);
290
291static void __exit ath9k_cmn_exit(void)
292{
293 return;
294}
295module_exit(ath9k_cmn_exit);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
new file mode 100644
index 000000000000..4e1176029356
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <net/mac80211.h>
18
19#include "../ath.h"
20#include "../debug.h"
21
22#include "hw.h"
23
24/* Common header for Atheros 802.11n base driver cores */
25
26#define WME_NUM_TID 16
27#define WME_BA_BMP_SIZE 64
28#define WME_MAX_BA WME_BA_BMP_SIZE
29#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
30
31#define WME_AC_BE 0
32#define WME_AC_BK 1
33#define WME_AC_VI 2
34#define WME_AC_VO 3
35#define WME_NUM_AC 4
36
37#define ATH_RSSI_DUMMY_MARKER 0x127
38#define ATH_RSSI_LPF_LEN 10
39#define RSSI_LPF_THRESHOLD -20
40#define ATH_RSSI_EP_MULTIPLIER (1<<7)
41#define ATH_EP_MUL(x, mul) ((x) * (mul))
42#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
43#define ATH_LPF_RSSI(x, y, len) \
44 ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))
45#define ATH_RSSI_LPF(x, y) do { \
46 if ((y) >= RSSI_LPF_THRESHOLD) \
47 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
48} while (0)
49#define ATH_EP_RND(x, mul) \
50 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
51
52struct ath_atx_ac {
53 int sched;
54 int qnum;
55 struct list_head list;
56 struct list_head tid_q;
57};
58
59struct ath_buf_state {
60 int bfs_nframes;
61 u16 bfs_al;
62 u16 bfs_frmlen;
63 int bfs_seqno;
64 int bfs_tidno;
65 int bfs_retries;
66 u8 bf_type;
67 u32 bfs_keyix;
68 enum ath9k_key_type bfs_keytype;
69};
70
71struct ath_buf {
72 struct list_head list;
73 struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or
74 an aggregate) */
75 struct ath_buf *bf_next; /* next subframe in the aggregate */
76 struct sk_buff *bf_mpdu; /* enclosing frame structure */
77 struct ath_desc *bf_desc; /* virtual addr of desc */
78 dma_addr_t bf_daddr; /* physical addr of desc */
79 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
80 bool bf_stale;
81 u16 bf_flags;
82 struct ath_buf_state bf_state;
83 dma_addr_t bf_dmacontext;
84 struct ath_wiphy *aphy;
85};
86
87struct ath_atx_tid {
88 struct list_head list;
89 struct list_head buf_q;
90 struct ath_node *an;
91 struct ath_atx_ac *ac;
92 struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
93 u16 seq_start;
94 u16 seq_next;
95 u16 baw_size;
96 int tidno;
97 int baw_head; /* first un-acked tx buffer */
98 int baw_tail; /* next unused tx buffer slot */
99 int sched;
100 int paused;
101 u8 state;
102};
103
104struct ath_node {
105 struct ath_common *common;
106 struct ath_atx_tid tid[WME_NUM_TID];
107 struct ath_atx_ac ac[WME_NUM_AC];
108 u16 maxampdu;
109 u8 mpdudensity;
110 int last_rssi;
111};
112
113int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
114 struct ieee80211_hw *hw,
115 struct sk_buff *skb,
116 struct ath_rx_status *rx_stats,
117 struct ieee80211_rx_status *rx_status,
118 bool *decrypt_error);
119
120void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
121 struct sk_buff *skb,
122 struct ath_rx_status *rx_stats,
123 struct ieee80211_rx_status *rxs,
124 bool decrypt_error);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2be4c2252047..06f1fcfb03e9 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -18,26 +18,13 @@
18 18
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21static unsigned int ath9k_debug = DBG_DEFAULT; 21#define REG_WRITE_D(_ah, _reg, _val) \
22module_param_named(debug, ath9k_debug, uint, 0); 22 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
23#define REG_READ_D(_ah, _reg) \
24 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
23 25
24static struct dentry *ath9k_debugfs_root; 26static struct dentry *ath9k_debugfs_root;
25 27
26void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...)
27{
28 if (!sc)
29 return;
30
31 if (sc->debug.debug_mask & dbg_mask) {
32 va_list args;
33
34 va_start(args, fmt);
35 printk(KERN_DEBUG "ath9k: ");
36 vprintk(fmt, args);
37 va_end(args);
38 }
39}
40
41static int ath9k_debugfs_open(struct inode *inode, struct file *file) 28static int ath9k_debugfs_open(struct inode *inode, struct file *file)
42{ 29{
43 file->private_data = inode->i_private; 30 file->private_data = inode->i_private;
@@ -48,10 +35,11 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
48 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
49{ 36{
50 struct ath_softc *sc = file->private_data; 37 struct ath_softc *sc = file->private_data;
38 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
51 char buf[32]; 39 char buf[32];
52 unsigned int len; 40 unsigned int len;
53 41
54 len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.debug_mask); 42 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask);
55 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 43 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
56} 44}
57 45
@@ -59,6 +47,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
59 size_t count, loff_t *ppos) 47 size_t count, loff_t *ppos)
60{ 48{
61 struct ath_softc *sc = file->private_data; 49 struct ath_softc *sc = file->private_data;
50 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
62 unsigned long mask; 51 unsigned long mask;
63 char buf[32]; 52 char buf[32];
64 ssize_t len; 53 ssize_t len;
@@ -71,7 +60,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
71 if (strict_strtoul(buf, 0, &mask)) 60 if (strict_strtoul(buf, 0, &mask))
72 return -EINVAL; 61 return -EINVAL;
73 62
74 sc->debug.debug_mask = mask; 63 common->debug_mask = mask;
75 return count; 64 return count;
76} 65}
77 66
@@ -95,7 +84,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
95 84
96 ath9k_ps_wakeup(sc); 85 ath9k_ps_wakeup(sc);
97 86
98 REG_WRITE(ah, AR_MACMISC, 87 REG_WRITE_D(ah, AR_MACMISC,
99 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 88 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
100 (AR_MACMISC_MISC_OBS_BUS_1 << 89 (AR_MACMISC_MISC_OBS_BUS_1 <<
101 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 90 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
@@ -107,7 +96,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
107 if (i % 4 == 0) 96 if (i % 4 == 0)
108 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 97 len += snprintf(buf + len, sizeof(buf) - len, "\n");
109 98
110 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32))); 99 val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
111 len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ", 100 len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
112 i, val[i]); 101 i, val[i]);
113 } 102 }
@@ -157,9 +146,9 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
157 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); 146 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
158 147
159 len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n", 148 len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
160 REG_READ(ah, AR_OBS_BUS_1)); 149 REG_READ_D(ah, AR_OBS_BUS_1));
161 len += snprintf(buf + len, sizeof(buf) - len, 150 len += snprintf(buf + len, sizeof(buf) - len,
162 "AR_CR: 0x%x \n", REG_READ(ah, AR_CR)); 151 "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
163 152
164 ath9k_ps_restore(sc); 153 ath9k_ps_restore(sc);
165 154
@@ -268,14 +257,17 @@ static const struct file_operations fops_interrupt = {
268 257
269void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb) 258void ath_debug_stat_rc(struct ath_softc *sc, struct sk_buff *skb)
270{ 259{
271 struct ath_tx_info_priv *tx_info_priv = NULL;
272 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 260 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
273 struct ieee80211_tx_rate *rates = tx_info->status.rates; 261 struct ieee80211_tx_rate *rates = tx_info->status.rates;
274 int final_ts_idx, idx; 262 int final_ts_idx = 0, idx, i;
275 struct ath_rc_stats *stats; 263 struct ath_rc_stats *stats;
276 264
277 tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 265 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
278 final_ts_idx = tx_info_priv->tx.ts_rateindex; 266 if (!rates[i].count)
267 break;
268
269 final_ts_idx = i;
270 }
279 idx = rates[final_ts_idx].idx; 271 idx = rates[final_ts_idx].idx;
280 stats = &sc->debug.stats.rcstats[idx]; 272 stats = &sc->debug.stats.rcstats[idx];
281 stats->success++; 273 stats->success++;
@@ -376,12 +368,12 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
376 aphy->chan_idx, aphy->chan_is_ht); 368 aphy->chan_idx, aphy->chan_is_ht);
377 } 369 }
378 370
379 put_unaligned_le32(REG_READ(sc->sc_ah, AR_STA_ID0), addr); 371 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
380 put_unaligned_le16(REG_READ(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); 372 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
381 len += snprintf(buf + len, sizeof(buf) - len, 373 len += snprintf(buf + len, sizeof(buf) - len,
382 "addr: %pM\n", addr); 374 "addr: %pM\n", addr);
383 put_unaligned_le32(REG_READ(sc->sc_ah, AR_BSSMSKL), addr); 375 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr);
384 put_unaligned_le16(REG_READ(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4); 376 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
385 len += snprintf(buf + len, sizeof(buf) - len, 377 len += snprintf(buf + len, sizeof(buf) - len,
386 "addrmask: %pM\n", addr); 378 "addrmask: %pM\n", addr);
387 379
@@ -568,9 +560,10 @@ static const struct file_operations fops_xmit = {
568 .owner = THIS_MODULE 560 .owner = THIS_MODULE
569}; 561};
570 562
571int ath9k_init_debug(struct ath_softc *sc) 563int ath9k_init_debug(struct ath_hw *ah)
572{ 564{
573 sc->debug.debug_mask = ath9k_debug; 565 struct ath_common *common = ath9k_hw_common(ah);
566 struct ath_softc *sc = (struct ath_softc *) common->priv;
574 567
575 if (!ath9k_debugfs_root) 568 if (!ath9k_debugfs_root)
576 return -ENOENT; 569 return -ENOENT;
@@ -619,12 +612,15 @@ int ath9k_init_debug(struct ath_softc *sc)
619 612
620 return 0; 613 return 0;
621err: 614err:
622 ath9k_exit_debug(sc); 615 ath9k_exit_debug(ah);
623 return -ENOMEM; 616 return -ENOMEM;
624} 617}
625 618
626void ath9k_exit_debug(struct ath_softc *sc) 619void ath9k_exit_debug(struct ath_hw *ah)
627{ 620{
621 struct ath_common *common = ath9k_hw_common(ah);
622 struct ath_softc *sc = (struct ath_softc *) common->priv;
623
628 debugfs_remove(sc->debug.debugfs_xmit); 624 debugfs_remove(sc->debug.debugfs_xmit);
629 debugfs_remove(sc->debug.debugfs_wiphy); 625 debugfs_remove(sc->debug.debugfs_wiphy);
630 debugfs_remove(sc->debug.debugfs_rcstat); 626 debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 7241f4748338..749e85d57551 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -17,25 +17,7 @@
17#ifndef DEBUG_H 17#ifndef DEBUG_H
18#define DEBUG_H 18#define DEBUG_H
19 19
20enum ATH_DEBUG { 20#include "hw.h"
21 ATH_DBG_RESET = 0x00000001,
22 ATH_DBG_QUEUE = 0x00000002,
23 ATH_DBG_EEPROM = 0x00000004,
24 ATH_DBG_CALIBRATE = 0x00000008,
25 ATH_DBG_INTERRUPT = 0x00000010,
26 ATH_DBG_REGULATORY = 0x00000020,
27 ATH_DBG_ANI = 0x00000040,
28 ATH_DBG_XMIT = 0x00000080,
29 ATH_DBG_BEACON = 0x00000100,
30 ATH_DBG_CONFIG = 0x00000200,
31 ATH_DBG_FATAL = 0x00000400,
32 ATH_DBG_PS = 0x00000800,
33 ATH_DBG_HWTIMER = 0x00001000,
34 ATH_DBG_BTCOEX = 0x00002000,
35 ATH_DBG_ANY = 0xffffffff
36};
37
38#define DBG_DEFAULT (ATH_DBG_FATAL)
39 21
40struct ath_txq; 22struct ath_txq;
41struct ath_buf; 23struct ath_buf;
@@ -140,7 +122,6 @@ struct ath_stats {
140}; 122};
141 123
142struct ath9k_debug { 124struct ath9k_debug {
143 int debug_mask;
144 struct dentry *debugfs_phy; 125 struct dentry *debugfs_phy;
145 struct dentry *debugfs_debug; 126 struct dentry *debugfs_debug;
146 struct dentry *debugfs_dma; 127 struct dentry *debugfs_dma;
@@ -151,9 +132,9 @@ struct ath9k_debug {
151 struct ath_stats stats; 132 struct ath_stats stats;
152}; 133};
153 134
154void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...); 135int ath9k_init_debug(struct ath_hw *ah);
155int ath9k_init_debug(struct ath_softc *sc); 136void ath9k_exit_debug(struct ath_hw *ah);
156void ath9k_exit_debug(struct ath_softc *sc); 137
157int ath9k_debug_create_root(void); 138int ath9k_debug_create_root(void);
158void ath9k_debug_remove_root(void); 139void ath9k_debug_remove_root(void);
159void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 140void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
@@ -165,17 +146,12 @@ void ath_debug_stat_retries(struct ath_softc *sc, int rix,
165 146
166#else 147#else
167 148
168static inline void DPRINTF(struct ath_softc *sc, int dbg_mask, 149static inline int ath9k_init_debug(struct ath_hw *ah)
169 const char *fmt, ...)
170{
171}
172
173static inline int ath9k_init_debug(struct ath_softc *sc)
174{ 150{
175 return 0; 151 return 0;
176} 152}
177 153
178static inline void ath9k_exit_debug(struct ath_softc *sc) 154static inline void ath9k_exit_debug(struct ath_hw *ah)
179{ 155{
180} 156}
181 157
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index b6e52d0f8c48..dacaae934148 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) 19static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
20{ 20{
@@ -83,11 +83,9 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
83 return false; 83 return false;
84} 84}
85 85
86bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) 86bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
87{ 87{
88 struct ath_softc *sc = ah->ah_sc; 88 return common->bus_ops->eeprom_read(common, off, data);
89
90 return sc->bus_ops->eeprom_read(ah, off, data);
91} 89}
92 90
93void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 91void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 4fe33f7eee9d..2f2993b50e2f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -17,6 +17,7 @@
17#ifndef EEPROM_H 17#ifndef EEPROM_H
18#define EEPROM_H 18#define EEPROM_H
19 19
20#include "../ath.h"
20#include <net/cfg80211.h> 21#include <net/cfg80211.h>
21 22
22#define AH_USE_EEPROM 0x1 23#define AH_USE_EEPROM 0x1
@@ -133,6 +134,7 @@
133#define AR5416_EEP_MINOR_VER_17 0x11 134#define AR5416_EEP_MINOR_VER_17 0x11
134#define AR5416_EEP_MINOR_VER_19 0x13 135#define AR5416_EEP_MINOR_VER_19 0x13
135#define AR5416_EEP_MINOR_VER_20 0x14 136#define AR5416_EEP_MINOR_VER_20 0x14
137#define AR5416_EEP_MINOR_VER_21 0x15
136#define AR5416_EEP_MINOR_VER_22 0x16 138#define AR5416_EEP_MINOR_VER_22 0x16
137 139
138#define AR5416_NUM_5G_CAL_PIERS 8 140#define AR5416_NUM_5G_CAL_PIERS 8
@@ -153,7 +155,7 @@
153#define AR5416_BCHAN_UNUSED 0xFF 155#define AR5416_BCHAN_UNUSED 0xFF
154#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
155#define AR5416_MAX_CHAINS 3 157#define AR5416_MAX_CHAINS 3
156#define AR5416_PWR_TABLE_OFFSET -5 158#define AR5416_PWR_TABLE_OFFSET_DB -5
157 159
158/* Rx gain type values */ 160/* Rx gain type values */
159#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0 161#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0
@@ -301,7 +303,7 @@ struct base_eep_header {
301 u8 txGainType; 303 u8 txGainType;
302 u8 rcChainMask; 304 u8 rcChainMask;
303 u8 desiredScaleCCK; 305 u8 desiredScaleCCK;
304 u8 power_table_offset; 306 u8 pwr_table_offset;
305 u8 frac_n_5g; 307 u8 frac_n_5g;
306 u8 futureBase_3[21]; 308 u8 futureBase_3[21];
307} __packed; 309} __packed;
@@ -638,6 +640,7 @@ struct ar9287_eeprom {
638} __packed; 640} __packed;
639 641
640enum reg_ext_bitmap { 642enum reg_ext_bitmap {
643 REG_EXT_FCC_MIDBAND = 0,
641 REG_EXT_JAPAN_MIDBAND = 1, 644 REG_EXT_JAPAN_MIDBAND = 1,
642 REG_EXT_FCC_DFS_HT40 = 2, 645 REG_EXT_FCC_DFS_HT40 = 2,
643 REG_EXT_JAPAN_NONDFS_HT40 = 3, 646 REG_EXT_JAPAN_NONDFS_HT40 = 3,
@@ -684,7 +687,7 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
684 int16_t targetRight); 687 int16_t targetRight);
685bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, 688bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
686 u16 *indexL, u16 *indexR); 689 u16 *indexL, u16 *indexR);
687bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data); 690bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
688void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 691void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
689 u8 *pVpdList, u16 numIntercepts, 692 u8 *pVpdList, u16 numIntercepts,
690 u8 *pRetVpdList); 693 u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b8eca7be5f3a..68db16690abf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) 19static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
20{ 20{
@@ -29,20 +29,21 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
29static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) 29static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
30{ 30{
31#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 31#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
32 struct ath_common *common = ath9k_hw_common(ah);
32 u16 *eep_data = (u16 *)&ah->eeprom.map4k; 33 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
33 int addr, eep_start_loc = 0; 34 int addr, eep_start_loc = 0;
34 35
35 eep_start_loc = 64; 36 eep_start_loc = 64;
36 37
37 if (!ath9k_hw_use_flash(ah)) { 38 if (!ath9k_hw_use_flash(ah)) {
38 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 39 ath_print(common, ATH_DBG_EEPROM,
39 "Reading from EEPROM, not flash\n"); 40 "Reading from EEPROM, not flash\n");
40 } 41 }
41 42
42 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 43 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
43 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 44 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
44 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 45 ath_print(common, ATH_DBG_EEPROM,
45 "Unable to read eeprom region \n"); 46 "Unable to read eeprom region \n");
46 return false; 47 return false;
47 } 48 }
48 eep_data++; 49 eep_data++;
@@ -55,6 +56,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
55static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) 56static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
56{ 57{
57#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 58#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
59 struct ath_common *common = ath9k_hw_common(ah);
58 struct ar5416_eeprom_4k *eep = 60 struct ar5416_eeprom_4k *eep =
59 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; 61 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
60 u16 *eepdata, temp, magic, magic2; 62 u16 *eepdata, temp, magic, magic2;
@@ -64,15 +66,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
64 66
65 67
66 if (!ath9k_hw_use_flash(ah)) { 68 if (!ath9k_hw_use_flash(ah)) {
67 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, 69 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
68 &magic)) { 70 &magic)) {
69 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 71 ath_print(common, ATH_DBG_FATAL,
70 "Reading Magic # failed\n"); 72 "Reading Magic # failed\n");
71 return false; 73 return false;
72 } 74 }
73 75
74 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 76 ath_print(common, ATH_DBG_EEPROM,
75 "Read Magic = 0x%04X\n", magic); 77 "Read Magic = 0x%04X\n", magic);
76 78
77 if (magic != AR5416_EEPROM_MAGIC) { 79 if (magic != AR5416_EEPROM_MAGIC) {
78 magic2 = swab16(magic); 80 magic2 = swab16(magic);
@@ -87,16 +89,16 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
87 eepdata++; 89 eepdata++;
88 } 90 }
89 } else { 91 } else {
90 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 92 ath_print(common, ATH_DBG_FATAL,
91 "Invalid EEPROM Magic. " 93 "Invalid EEPROM Magic. "
92 "endianness mismatch.\n"); 94 "endianness mismatch.\n");
93 return -EINVAL; 95 return -EINVAL;
94 } 96 }
95 } 97 }
96 } 98 }
97 99
98 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", 100 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
99 need_swap ? "True" : "False"); 101 need_swap ? "True" : "False");
100 102
101 if (need_swap) 103 if (need_swap)
102 el = swab16(ah->eeprom.map4k.baseEepHeader.length); 104 el = swab16(ah->eeprom.map4k.baseEepHeader.length);
@@ -117,8 +119,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
117 u32 integer; 119 u32 integer;
118 u16 word; 120 u16 word;
119 121
120 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 122 ath_print(common, ATH_DBG_EEPROM,
121 "EEPROM Endianness is not native.. Changing\n"); 123 "EEPROM Endianness is not native.. Changing\n");
122 124
123 word = swab16(eep->baseEepHeader.length); 125 word = swab16(eep->baseEepHeader.length);
124 eep->baseEepHeader.length = word; 126 eep->baseEepHeader.length = word;
@@ -160,9 +162,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
160 162
161 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 163 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
162 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 164 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
163 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 165 ath_print(common, ATH_DBG_FATAL,
164 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 166 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
165 sum, ah->eep_ops->get_eeprom_ver(ah)); 167 sum, ah->eep_ops->get_eeprom_ver(ah));
166 return -EINVAL; 168 return -EINVAL;
167 } 169 }
168 170
@@ -208,6 +210,8 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
208 return pBase->rxMask; 210 return pBase->rxMask;
209 case EEP_FRAC_N_5G: 211 case EEP_FRAC_N_5G:
210 return 0; 212 return 0;
213 case EEP_PWR_TABLE_OFFSET:
214 return AR5416_PWR_TABLE_OFFSET_DB;
211 default: 215 default:
212 return 0; 216 return 0;
213 } 217 }
@@ -385,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
385 struct ath9k_channel *chan, 389 struct ath9k_channel *chan,
386 int16_t *pTxPowerIndexOffset) 390 int16_t *pTxPowerIndexOffset)
387{ 391{
392 struct ath_common *common = ath9k_hw_common(ah);
388 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; 393 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
389 struct cal_data_per_freq_4k *pRawDataset; 394 struct cal_data_per_freq_4k *pRawDataset;
390 u8 *pCalBChans = NULL; 395 u8 *pCalBChans = NULL;
@@ -470,21 +475,21 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
470 ((pdadcValues[4 * j + 3] & 0xFF) << 24); 475 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
471 REG_WRITE(ah, regOffset, reg32); 476 REG_WRITE(ah, regOffset, reg32);
472 477
473 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 478 ath_print(common, ATH_DBG_EEPROM,
474 "PDADC (%d,%4x): %4.4x %8.8x\n", 479 "PDADC (%d,%4x): %4.4x %8.8x\n",
475 i, regChainOffset, regOffset, 480 i, regChainOffset, regOffset,
476 reg32); 481 reg32);
477 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 482 ath_print(common, ATH_DBG_EEPROM,
478 "PDADC: Chain %d | " 483 "PDADC: Chain %d | "
479 "PDADC %3d Value %3d | " 484 "PDADC %3d Value %3d | "
480 "PDADC %3d Value %3d | " 485 "PDADC %3d Value %3d | "
481 "PDADC %3d Value %3d | " 486 "PDADC %3d Value %3d | "
482 "PDADC %3d Value %3d |\n", 487 "PDADC %3d Value %3d |\n",
483 i, 4 * j, pdadcValues[4 * j], 488 i, 4 * j, pdadcValues[4 * j],
484 4 * j + 1, pdadcValues[4 * j + 1], 489 4 * j + 1, pdadcValues[4 * j + 1],
485 4 * j + 2, pdadcValues[4 * j + 2], 490 4 * j + 2, pdadcValues[4 * j + 2],
486 4 * j + 3, 491 4 * j + 3,
487 pdadcValues[4 * j + 3]); 492 pdadcValues[4 * j + 3]);
488 493
489 regOffset += 4; 494 regOffset += 4;
490 } 495 }
@@ -750,7 +755,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
750 755
751 if (AR_SREV_9280_10_OR_LATER(ah)) { 756 if (AR_SREV_9280_10_OR_LATER(ah)) {
752 for (i = 0; i < Ar5416RateSize; i++) 757 for (i = 0; i < Ar5416RateSize; i++)
753 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; 758 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
754 } 759 }
755 760
756 /* OFDM power per rate */ 761 /* OFDM power per rate */
@@ -1107,6 +1112,10 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1107 1112
1108 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 1113 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1109 pModal->txEndToRxOn); 1114 pModal->txEndToRxOn);
1115
1116 if (AR_SREV_9271_10(ah))
1117 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1118 pModal->txEndToRxOn);
1110 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, 1119 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
1111 pModal->thresh62); 1120 pModal->thresh62);
1112 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, 1121 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
@@ -1148,20 +1157,21 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1148{ 1157{
1149#define EEP_MAP4K_SPURCHAN \ 1158#define EEP_MAP4K_SPURCHAN \
1150 (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) 1159 (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
1160 struct ath_common *common = ath9k_hw_common(ah);
1151 1161
1152 u16 spur_val = AR_NO_SPUR; 1162 u16 spur_val = AR_NO_SPUR;
1153 1163
1154 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1164 ath_print(common, ATH_DBG_ANI,
1155 "Getting spur idx %d is2Ghz. %d val %x\n", 1165 "Getting spur idx %d is2Ghz. %d val %x\n",
1156 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1166 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1157 1167
1158 switch (ah->config.spurmode) { 1168 switch (ah->config.spurmode) {
1159 case SPUR_DISABLE: 1169 case SPUR_DISABLE:
1160 break; 1170 break;
1161 case SPUR_ENABLE_IOCTL: 1171 case SPUR_ENABLE_IOCTL:
1162 spur_val = ah->config.spurchans[i][is2GHz]; 1172 spur_val = ah->config.spurchans[i][is2GHz];
1163 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1173 ath_print(common, ATH_DBG_ANI,
1164 "Getting spur val from new loc. %d\n", spur_val); 1174 "Getting spur val from new loc. %d\n", spur_val);
1165 break; 1175 break;
1166 case SPUR_ENABLE_EEPROM: 1176 case SPUR_ENABLE_EEPROM:
1167 spur_val = EEP_MAP4K_SPURCHAN; 1177 spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index c20c21a79b21..839d05a1df29 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) 19static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
20{ 20{
@@ -29,20 +29,22 @@ static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah)
29static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah) 29static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
30{ 30{
31 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 31 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
32 struct ath_common *common = ath9k_hw_common(ah);
32 u16 *eep_data; 33 u16 *eep_data;
33 int addr, eep_start_loc = AR9287_EEP_START_LOC; 34 int addr, eep_start_loc = AR9287_EEP_START_LOC;
34 eep_data = (u16 *)eep; 35 eep_data = (u16 *)eep;
35 36
36 if (!ath9k_hw_use_flash(ah)) { 37 if (!ath9k_hw_use_flash(ah)) {
37 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 38 ath_print(common, ATH_DBG_EEPROM,
38 "Reading from EEPROM, not flash\n"); 39 "Reading from EEPROM, not flash\n");
39 } 40 }
40 41
41 for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16); 42 for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
42 addr++) { 43 addr++) {
43 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 44 if (!ath9k_hw_nvram_read(common,
44 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 45 addr + eep_start_loc, eep_data)) {
45 "Unable to read eeprom region \n"); 46 ath_print(common, ATH_DBG_EEPROM,
47 "Unable to read eeprom region \n");
46 return false; 48 return false;
47 } 49 }
48 eep_data++; 50 eep_data++;
@@ -57,17 +59,18 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
57 int i, addr; 59 int i, addr;
58 bool need_swap = false; 60 bool need_swap = false;
59 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 61 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
62 struct ath_common *common = ath9k_hw_common(ah);
60 63
61 if (!ath9k_hw_use_flash(ah)) { 64 if (!ath9k_hw_use_flash(ah)) {
62 if (!ath9k_hw_nvram_read 65 if (!ath9k_hw_nvram_read(common,
63 (ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 66 AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
64 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 67 ath_print(common, ATH_DBG_FATAL,
65 "Reading Magic # failed\n"); 68 "Reading Magic # failed\n");
66 return false; 69 return false;
67 } 70 }
68 71
69 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 72 ath_print(common, ATH_DBG_EEPROM,
70 "Read Magic = 0x%04X\n", magic); 73 "Read Magic = 0x%04X\n", magic);
71 if (magic != AR5416_EEPROM_MAGIC) { 74 if (magic != AR5416_EEPROM_MAGIC) {
72 magic2 = swab16(magic); 75 magic2 = swab16(magic);
73 76
@@ -83,15 +86,15 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
83 eepdata++; 86 eepdata++;
84 } 87 }
85 } else { 88 } else {
86 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 89 ath_print(common, ATH_DBG_FATAL,
87 "Invalid EEPROM Magic. " 90 "Invalid EEPROM Magic. "
88 "endianness mismatch.\n"); 91 "endianness mismatch.\n");
89 return -EINVAL; 92 return -EINVAL;
90 } 93 }
91 } 94 }
92 } 95 }
93 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? 96 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ?
94 "True" : "False"); 97 "True" : "False");
95 98
96 if (need_swap) 99 if (need_swap)
97 el = swab16(ah->eeprom.map9287.baseEepHeader.length); 100 el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -148,9 +151,9 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
148 151
149 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER 152 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER
150 || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 153 || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
151 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 154 ath_print(common, ATH_DBG_FATAL,
152 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 155 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
153 sum, ah->eep_ops->get_eeprom_ver(ah)); 156 sum, ah->eep_ops->get_eeprom_ver(ah));
154 return -EINVAL; 157 return -EINVAL;
155 } 158 }
156 159
@@ -436,6 +439,7 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
436 struct ath9k_channel *chan, 439 struct ath9k_channel *chan,
437 int16_t *pTxPowerIndexOffset) 440 int16_t *pTxPowerIndexOffset)
438{ 441{
442 struct ath_common *common = ath9k_hw_common(ah);
439 struct cal_data_per_freq_ar9287 *pRawDataset; 443 struct cal_data_per_freq_ar9287 *pRawDataset;
440 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop; 444 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
441 u8 *pCalBChans = NULL; 445 u8 *pCalBChans = NULL;
@@ -564,24 +568,25 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
564 & 0xFF) << 24) ; 568 & 0xFF) << 24) ;
565 REG_WRITE(ah, regOffset, reg32); 569 REG_WRITE(ah, regOffset, reg32);
566 570
567 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 571 ath_print(common, ATH_DBG_EEPROM,
568 "PDADC (%d,%4x): %4.4x %8.8x\n", 572 "PDADC (%d,%4x): %4.4x "
569 i, regChainOffset, regOffset, 573 "%8.8x\n",
570 reg32); 574 i, regChainOffset, regOffset,
571 575 reg32);
572 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 576
573 "PDADC: Chain %d | " 577 ath_print(common, ATH_DBG_EEPROM,
574 "PDADC %3d Value %3d | " 578 "PDADC: Chain %d | "
575 "PDADC %3d Value %3d | " 579 "PDADC %3d Value %3d | "
576 "PDADC %3d Value %3d | " 580 "PDADC %3d Value %3d | "
577 "PDADC %3d Value %3d |\n", 581 "PDADC %3d Value %3d | "
578 i, 4 * j, pdadcValues[4 * j], 582 "PDADC %3d Value %3d |\n",
579 4 * j + 1, 583 i, 4 * j, pdadcValues[4 * j],
580 pdadcValues[4 * j + 1], 584 4 * j + 1,
581 4 * j + 2, 585 pdadcValues[4 * j + 1],
582 pdadcValues[4 * j + 2], 586 4 * j + 2,
583 4 * j + 3, 587 pdadcValues[4 * j + 2],
584 pdadcValues[4 * j + 3]); 588 4 * j + 3,
589 pdadcValues[4 * j + 3]);
585 590
586 regOffset += 4; 591 regOffset += 4;
587 } 592 }
@@ -831,6 +836,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
831{ 836{
832#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 837#define INCREASE_MAXPOW_BY_TWO_CHAIN 6
833#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 838#define INCREASE_MAXPOW_BY_THREE_CHAIN 10
839 struct ath_common *common = ath9k_hw_common(ah);
834 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 840 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
835 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 841 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
836 struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader; 842 struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
@@ -966,8 +972,8 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
966 INCREASE_MAXPOW_BY_THREE_CHAIN; 972 INCREASE_MAXPOW_BY_THREE_CHAIN;
967 break; 973 break;
968 default: 974 default:
969 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 975 ath_print(common, ATH_DBG_EEPROM,
970 "Invalid chainmask configuration\n"); 976 "Invalid chainmask configuration\n");
971 break; 977 break;
972 } 978 }
973} 979}
@@ -1138,19 +1144,20 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
1138{ 1144{
1139#define EEP_MAP9287_SPURCHAN \ 1145#define EEP_MAP9287_SPURCHAN \
1140 (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan) 1146 (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
1147 struct ath_common *common = ath9k_hw_common(ah);
1141 u16 spur_val = AR_NO_SPUR; 1148 u16 spur_val = AR_NO_SPUR;
1142 1149
1143 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1150 ath_print(common, ATH_DBG_ANI,
1144 "Getting spur idx %d is2Ghz. %d val %x\n", 1151 "Getting spur idx %d is2Ghz. %d val %x\n",
1145 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1152 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1146 1153
1147 switch (ah->config.spurmode) { 1154 switch (ah->config.spurmode) {
1148 case SPUR_DISABLE: 1155 case SPUR_DISABLE:
1149 break; 1156 break;
1150 case SPUR_ENABLE_IOCTL: 1157 case SPUR_ENABLE_IOCTL:
1151 spur_val = ah->config.spurchans[i][is2GHz]; 1158 spur_val = ah->config.spurchans[i][is2GHz];
1152 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1159 ath_print(common, ATH_DBG_ANI,
1153 "Getting spur val from new loc. %d\n", spur_val); 1160 "Getting spur val from new loc. %d\n", spur_val);
1154 break; 1161 break;
1155 case SPUR_ENABLE_EEPROM: 1162 case SPUR_ENABLE_EEPROM:
1156 spur_val = EEP_MAP9287_SPURCHAN; 1163 spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 4071fc91da0a..404a0341242c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static void ath9k_get_txgain_index(struct ath_hw *ah, 19static void ath9k_get_txgain_index(struct ath_hw *ah,
20 struct ath9k_channel *chan, 20 struct ath9k_channel *chan,
@@ -89,14 +89,15 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
89static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) 89static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
90{ 90{
91#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 91#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
92 struct ath_common *common = ath9k_hw_common(ah);
92 u16 *eep_data = (u16 *)&ah->eeprom.def; 93 u16 *eep_data = (u16 *)&ah->eeprom.def;
93 int addr, ar5416_eep_start_loc = 0x100; 94 int addr, ar5416_eep_start_loc = 0x100;
94 95
95 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 96 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
96 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, 97 if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc,
97 eep_data)) { 98 eep_data)) {
98 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 99 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
99 "Unable to read eeprom region\n"); 100 "Unable to read eeprom region\n");
100 return false; 101 return false;
101 } 102 }
102 eep_data++; 103 eep_data++;
@@ -109,19 +110,20 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
109{ 110{
110 struct ar5416_eeprom_def *eep = 111 struct ar5416_eeprom_def *eep =
111 (struct ar5416_eeprom_def *) &ah->eeprom.def; 112 (struct ar5416_eeprom_def *) &ah->eeprom.def;
113 struct ath_common *common = ath9k_hw_common(ah);
112 u16 *eepdata, temp, magic, magic2; 114 u16 *eepdata, temp, magic, magic2;
113 u32 sum = 0, el; 115 u32 sum = 0, el;
114 bool need_swap = false; 116 bool need_swap = false;
115 int i, addr, size; 117 int i, addr, size;
116 118
117 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 119 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
118 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n"); 120 ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n");
119 return false; 121 return false;
120 } 122 }
121 123
122 if (!ath9k_hw_use_flash(ah)) { 124 if (!ath9k_hw_use_flash(ah)) {
123 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 125 ath_print(common, ATH_DBG_EEPROM,
124 "Read Magic = 0x%04X\n", magic); 126 "Read Magic = 0x%04X\n", magic);
125 127
126 if (magic != AR5416_EEPROM_MAGIC) { 128 if (magic != AR5416_EEPROM_MAGIC) {
127 magic2 = swab16(magic); 129 magic2 = swab16(magic);
@@ -137,16 +139,16 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
137 eepdata++; 139 eepdata++;
138 } 140 }
139 } else { 141 } else {
140 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 142 ath_print(common, ATH_DBG_FATAL,
141 "Invalid EEPROM Magic. " 143 "Invalid EEPROM Magic. "
142 "Endianness mismatch.\n"); 144 "Endianness mismatch.\n");
143 return -EINVAL; 145 return -EINVAL;
144 } 146 }
145 } 147 }
146 } 148 }
147 149
148 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", 150 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
149 need_swap ? "True" : "False"); 151 need_swap ? "True" : "False");
150 152
151 if (need_swap) 153 if (need_swap)
152 el = swab16(ah->eeprom.def.baseEepHeader.length); 154 el = swab16(ah->eeprom.def.baseEepHeader.length);
@@ -167,8 +169,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
167 u32 integer, j; 169 u32 integer, j;
168 u16 word; 170 u16 word;
169 171
170 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 172 ath_print(common, ATH_DBG_EEPROM,
171 "EEPROM Endianness is not native.. Changing.\n"); 173 "EEPROM Endianness is not native.. Changing.\n");
172 174
173 word = swab16(eep->baseEepHeader.length); 175 word = swab16(eep->baseEepHeader.length);
174 eep->baseEepHeader.length = word; 176 eep->baseEepHeader.length = word;
@@ -214,8 +216,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
214 216
215 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 217 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
216 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 218 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
217 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 219 ath_print(common, ATH_DBG_FATAL,
218 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 220 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
219 sum, ah->eep_ops->get_eeprom_ver(ah)); 221 sum, ah->eep_ops->get_eeprom_ver(ah));
220 return -EINVAL; 222 return -EINVAL;
221 } 223 }
@@ -289,6 +291,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
289 return pBase->frac_n_5g; 291 return pBase->frac_n_5g;
290 else 292 else
291 return 0; 293 return 0;
294 case EEP_PWR_TABLE_OFFSET:
295 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21)
296 return pBase->pwr_table_offset;
297 else
298 return AR5416_PWR_TABLE_OFFSET_DB;
292 default: 299 default:
293 return 0; 300 return 0;
294 } 301 }
@@ -739,6 +746,76 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
739 return; 746 return;
740} 747}
741 748
749static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
750 u16 *gb,
751 u16 numXpdGain,
752 u16 pdGainOverlap_t2,
753 int8_t pwr_table_offset,
754 int16_t *diff)
755
756{
757 u16 k;
758
759 /* Prior to writing the boundaries or the pdadc vs. power table
760 * into the chip registers the default starting point on the pdadc
761 * vs. power table needs to be checked and the curve boundaries
762 * adjusted accordingly
763 */
764 if (AR_SREV_9280_20_OR_LATER(ah)) {
765 u16 gb_limit;
766
767 if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
768 /* get the difference in dB */
769 *diff = (u16)(pwr_table_offset - AR5416_PWR_TABLE_OFFSET_DB);
770 /* get the number of half dB steps */
771 *diff *= 2;
772 /* change the original gain boundary settings
773 * by the number of half dB steps
774 */
775 for (k = 0; k < numXpdGain; k++)
776 gb[k] = (u16)(gb[k] - *diff);
777 }
778 /* Because of a hardware limitation, ensure the gain boundary
779 * is not larger than (63 - overlap)
780 */
781 gb_limit = (u16)(AR5416_MAX_RATE_POWER - pdGainOverlap_t2);
782
783 for (k = 0; k < numXpdGain; k++)
784 gb[k] = (u16)min(gb_limit, gb[k]);
785 }
786
787 return *diff;
788}
789
790static void ath9k_adjust_pdadc_values(struct ath_hw *ah,
791 int8_t pwr_table_offset,
792 int16_t diff,
793 u8 *pdadcValues)
794{
795#define NUM_PDADC(diff) (AR5416_NUM_PDADC_VALUES - diff)
796 u16 k;
797
798 /* If this is a board that has a pwrTableOffset that differs from
799 * the default AR5416_PWR_TABLE_OFFSET_DB then the start of the
800 * pdadc vs pwr table needs to be adjusted prior to writing to the
801 * chip.
802 */
803 if (AR_SREV_9280_20_OR_LATER(ah)) {
804 if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
805 /* shift the table to start at the new offset */
806 for (k = 0; k < (u16)NUM_PDADC(diff); k++ ) {
807 pdadcValues[k] = pdadcValues[k + diff];
808 }
809
810 /* fill the back of the table */
811 for (k = (u16)NUM_PDADC(diff); k < NUM_PDADC(0); k++) {
812 pdadcValues[k] = pdadcValues[NUM_PDADC(diff)];
813 }
814 }
815 }
816#undef NUM_PDADC
817}
818
742static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, 819static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
743 struct ath9k_channel *chan, 820 struct ath9k_channel *chan,
744 int16_t *pTxPowerIndexOffset) 821 int16_t *pTxPowerIndexOffset)
@@ -746,7 +823,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
746#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x) 823#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x)
747#define SM_PDGAIN_B(x, y) \ 824#define SM_PDGAIN_B(x, y) \
748 SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y) 825 SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y)
749 826 struct ath_common *common = ath9k_hw_common(ah);
750 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 827 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
751 struct cal_data_per_freq *pRawDataset; 828 struct cal_data_per_freq *pRawDataset;
752 u8 *pCalBChans = NULL; 829 u8 *pCalBChans = NULL;
@@ -754,15 +831,18 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
754 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; 831 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
755 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; 832 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
756 u16 numPiers, i, j; 833 u16 numPiers, i, j;
757 int16_t tMinCalPower; 834 int16_t tMinCalPower, diff = 0;
758 u16 numXpdGain, xpdMask; 835 u16 numXpdGain, xpdMask;
759 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 }; 836 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
760 u32 reg32, regOffset, regChainOffset; 837 u32 reg32, regOffset, regChainOffset;
761 int16_t modalIdx; 838 int16_t modalIdx;
839 int8_t pwr_table_offset;
762 840
763 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; 841 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
764 xpdMask = pEepData->modalHeader[modalIdx].xpdGain; 842 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
765 843
844 pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET);
845
766 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 846 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
767 AR5416_EEP_MINOR_VER_2) { 847 AR5416_EEP_MINOR_VER_2) {
768 pdGainOverlap_t2 = 848 pdGainOverlap_t2 =
@@ -842,6 +922,13 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
842 numXpdGain); 922 numXpdGain);
843 } 923 }
844 924
925 diff = ath9k_change_gain_boundary_setting(ah,
926 gainBoundaries,
927 numXpdGain,
928 pdGainOverlap_t2,
929 pwr_table_offset,
930 &diff);
931
845 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 932 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
846 if (OLC_FOR_AR9280_20_LATER) { 933 if (OLC_FOR_AR9280_20_LATER) {
847 REG_WRITE(ah, 934 REG_WRITE(ah,
@@ -862,6 +949,10 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
862 } 949 }
863 } 950 }
864 951
952
953 ath9k_adjust_pdadc_values(ah, pwr_table_offset,
954 diff, pdadcValues);
955
865 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; 956 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
866 for (j = 0; j < 32; j++) { 957 for (j = 0; j < 32; j++) {
867 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) | 958 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
@@ -870,20 +961,20 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
870 ((pdadcValues[4 * j + 3] & 0xFF) << 24); 961 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
871 REG_WRITE(ah, regOffset, reg32); 962 REG_WRITE(ah, regOffset, reg32);
872 963
873 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 964 ath_print(common, ATH_DBG_EEPROM,
874 "PDADC (%d,%4x): %4.4x %8.8x\n", 965 "PDADC (%d,%4x): %4.4x %8.8x\n",
875 i, regChainOffset, regOffset, 966 i, regChainOffset, regOffset,
876 reg32); 967 reg32);
877 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 968 ath_print(common, ATH_DBG_EEPROM,
878 "PDADC: Chain %d | PDADC %3d " 969 "PDADC: Chain %d | PDADC %3d "
879 "Value %3d | PDADC %3d Value %3d | " 970 "Value %3d | PDADC %3d Value %3d | "
880 "PDADC %3d Value %3d | PDADC %3d " 971 "PDADC %3d Value %3d | PDADC %3d "
881 "Value %3d |\n", 972 "Value %3d |\n",
882 i, 4 * j, pdadcValues[4 * j], 973 i, 4 * j, pdadcValues[4 * j],
883 4 * j + 1, pdadcValues[4 * j + 1], 974 4 * j + 1, pdadcValues[4 * j + 1],
884 4 * j + 2, pdadcValues[4 * j + 2], 975 4 * j + 2, pdadcValues[4 * j + 2],
885 4 * j + 3, 976 4 * j + 3,
886 pdadcValues[4 * j + 3]); 977 pdadcValues[4 * j + 3]);
887 978
888 regOffset += 4; 979 regOffset += 4;
889 } 980 }
@@ -1197,8 +1288,13 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1197 } 1288 }
1198 1289
1199 if (AR_SREV_9280_10_OR_LATER(ah)) { 1290 if (AR_SREV_9280_10_OR_LATER(ah)) {
1200 for (i = 0; i < Ar5416RateSize; i++) 1291 for (i = 0; i < Ar5416RateSize; i++) {
1201 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; 1292 int8_t pwr_table_offset;
1293
1294 pwr_table_offset = ah->eep_ops->get_eeprom(ah,
1295 EEP_PWR_TABLE_OFFSET);
1296 ratesArray[i] -= pwr_table_offset * 2;
1297 }
1202 } 1298 }
1203 1299
1204 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 1300 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
@@ -1297,7 +1393,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1297 1393
1298 if (AR_SREV_9280_10_OR_LATER(ah)) 1394 if (AR_SREV_9280_10_OR_LATER(ah))
1299 regulatory->max_power_level = 1395 regulatory->max_power_level =
1300 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2; 1396 ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
1301 else 1397 else
1302 regulatory->max_power_level = ratesArray[i]; 1398 regulatory->max_power_level = ratesArray[i];
1303 1399
@@ -1311,8 +1407,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1311 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; 1407 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1312 break; 1408 break;
1313 default: 1409 default:
1314 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1410 ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
1315 "Invalid chainmask configuration\n"); 1411 "Invalid chainmask configuration\n");
1316 break; 1412 break;
1317 } 1413 }
1318} 1414}
@@ -1349,20 +1445,21 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1349{ 1445{
1350#define EEP_DEF_SPURCHAN \ 1446#define EEP_DEF_SPURCHAN \
1351 (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan) 1447 (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
1448 struct ath_common *common = ath9k_hw_common(ah);
1352 1449
1353 u16 spur_val = AR_NO_SPUR; 1450 u16 spur_val = AR_NO_SPUR;
1354 1451
1355 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1452 ath_print(common, ATH_DBG_ANI,
1356 "Getting spur idx %d is2Ghz. %d val %x\n", 1453 "Getting spur idx %d is2Ghz. %d val %x\n",
1357 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1454 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1358 1455
1359 switch (ah->config.spurmode) { 1456 switch (ah->config.spurmode) {
1360 case SPUR_DISABLE: 1457 case SPUR_DISABLE:
1361 break; 1458 break;
1362 case SPUR_ENABLE_IOCTL: 1459 case SPUR_ENABLE_IOCTL:
1363 spur_val = ah->config.spurchans[i][is2GHz]; 1460 spur_val = ah->config.spurchans[i][is2GHz];
1364 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1461 ath_print(common, ATH_DBG_ANI,
1365 "Getting spur val from new loc. %d\n", spur_val); 1462 "Getting spur val from new loc. %d\n", spur_val);
1366 break; 1463 break;
1367 case SPUR_ENABLE_EEPROM: 1464 case SPUR_ENABLE_EEPROM:
1368 spur_val = EEP_DEF_SPURCHAN; 1465 spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ca7694caf364..53a7b980d8f6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -16,9 +16,9 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <asm/unaligned.h> 18#include <asm/unaligned.h>
19#include <linux/pci.h>
20 19
21#include "ath9k.h" 20#include "hw.h"
21#include "rc.h"
22#include "initvals.h" 22#include "initvals.h"
23 23
24#define ATH9K_CLOCK_RATE_CCK 22 24#define ATH9K_CLOCK_RATE_CCK 22
@@ -26,13 +26,27 @@
26#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 26#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
27 27
28static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 28static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
29static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan, 29static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
30 enum ath9k_ht_macmode macmode);
31static u32 ath9k_hw_ini_fixup(struct ath_hw *ah, 30static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
32 struct ar5416_eeprom_def *pEepData, 31 struct ar5416_eeprom_def *pEepData,
33 u32 reg, u32 value); 32 u32 reg, u32 value);
34static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan); 33
35static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan); 34MODULE_AUTHOR("Atheros Communications");
35MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
36MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
37MODULE_LICENSE("Dual BSD/GPL");
38
39static int __init ath9k_init(void)
40{
41 return 0;
42}
43module_init(ath9k_init);
44
45static void __exit ath9k_exit(void)
46{
47 return;
48}
49module_exit(ath9k_exit);
36 50
37/********************/ 51/********************/
38/* Helper Functions */ 52/* Helper Functions */
@@ -40,7 +54,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan
40 54
41static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks) 55static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
42{ 56{
43 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
44 58
45 if (!ah->curchan) /* should really check for CCK instead */ 59 if (!ah->curchan) /* should really check for CCK instead */
46 return clks / ATH9K_CLOCK_RATE_CCK; 60 return clks / ATH9K_CLOCK_RATE_CCK;
@@ -52,7 +66,7 @@ static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
52 66
53static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks) 67static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
54{ 68{
55 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 69 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
56 70
57 if (conf_is_ht40(conf)) 71 if (conf_is_ht40(conf))
58 return ath9k_hw_mac_usec(ah, clks) / 2; 72 return ath9k_hw_mac_usec(ah, clks) / 2;
@@ -62,7 +76,7 @@ static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
62 76
63static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) 77static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
64{ 78{
65 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 79 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
66 80
67 if (!ah->curchan) /* should really check for CCK instead */ 81 if (!ah->curchan) /* should really check for CCK instead */
68 return usecs *ATH9K_CLOCK_RATE_CCK; 82 return usecs *ATH9K_CLOCK_RATE_CCK;
@@ -73,7 +87,7 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
73 87
74static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) 88static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
75{ 89{
76 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 90 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
77 91
78 if (conf_is_ht40(conf)) 92 if (conf_is_ht40(conf))
79 return ath9k_hw_mac_clks(ah, usecs) * 2; 93 return ath9k_hw_mac_clks(ah, usecs) * 2;
@@ -81,38 +95,6 @@ static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
81 return ath9k_hw_mac_clks(ah, usecs); 95 return ath9k_hw_mac_clks(ah, usecs);
82} 96}
83 97
84/*
85 * Read and write, they both share the same lock. We do this to serialize
86 * reads and writes on Atheros 802.11n PCI devices only. This is required
87 * as the FIFO on these devices can only accept sanely 2 requests. After
88 * that the device goes bananas. Serializing the reads/writes prevents this
89 * from happening.
90 */
91
92void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val)
93{
94 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
95 unsigned long flags;
96 spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
97 iowrite32(val, ah->ah_sc->mem + reg_offset);
98 spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
99 } else
100 iowrite32(val, ah->ah_sc->mem + reg_offset);
101}
102
103unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset)
104{
105 u32 val;
106 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
107 unsigned long flags;
108 spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
109 val = ioread32(ah->ah_sc->mem + reg_offset);
110 spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
111 } else
112 val = ioread32(ah->ah_sc->mem + reg_offset);
113 return val;
114}
115
116bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) 98bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
117{ 99{
118 int i; 100 int i;
@@ -126,12 +108,13 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
126 udelay(AH_TIME_QUANTUM); 108 udelay(AH_TIME_QUANTUM);
127 } 109 }
128 110
129 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 111 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY,
130 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", 112 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
131 timeout, reg, REG_READ(ah, reg), mask, val); 113 timeout, reg, REG_READ(ah, reg), mask, val);
132 114
133 return false; 115 return false;
134} 116}
117EXPORT_SYMBOL(ath9k_hw_wait);
135 118
136u32 ath9k_hw_reverse_bits(u32 val, u32 n) 119u32 ath9k_hw_reverse_bits(u32 val, u32 n)
137{ 120{
@@ -210,15 +193,16 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah,
210 } 193 }
211 break; 194 break;
212 default: 195 default:
213 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 196 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
214 "Unknown phy %u (rate ix %u)\n", 197 "Unknown phy %u (rate ix %u)\n",
215 rates->info[rateix].phy, rateix); 198 rates->info[rateix].phy, rateix);
216 txTime = 0; 199 txTime = 0;
217 break; 200 break;
218 } 201 }
219 202
220 return txTime; 203 return txTime;
221} 204}
205EXPORT_SYMBOL(ath9k_hw_computetxtime);
222 206
223void ath9k_hw_get_channel_centers(struct ath_hw *ah, 207void ath9k_hw_get_channel_centers(struct ath_hw *ah,
224 struct ath9k_channel *chan, 208 struct ath9k_channel *chan,
@@ -245,10 +229,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
245 229
246 centers->ctl_center = 230 centers->ctl_center =
247 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); 231 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
232 /* 25 MHz spacing is supported by hw but not on upper layers */
248 centers->ext_center = 233 centers->ext_center =
249 centers->synth_center + (extoff * 234 centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT);
250 ((ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_20) ?
251 HT40_CHANNEL_CENTER_SHIFT : 15));
252} 235}
253 236
254/******************/ 237/******************/
@@ -317,6 +300,7 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
317 300
318static bool ath9k_hw_chip_test(struct ath_hw *ah) 301static bool ath9k_hw_chip_test(struct ath_hw *ah)
319{ 302{
303 struct ath_common *common = ath9k_hw_common(ah);
320 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; 304 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
321 u32 regHold[2]; 305 u32 regHold[2];
322 u32 patternData[4] = { 0x55555555, 306 u32 patternData[4] = { 0x55555555,
@@ -335,10 +319,11 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
335 REG_WRITE(ah, addr, wrData); 319 REG_WRITE(ah, addr, wrData);
336 rdData = REG_READ(ah, addr); 320 rdData = REG_READ(ah, addr);
337 if (rdData != wrData) { 321 if (rdData != wrData) {
338 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 322 ath_print(common, ATH_DBG_FATAL,
339 "address test failed " 323 "address test failed "
340 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 324 "addr: 0x%08x - wr:0x%08x != "
341 addr, wrData, rdData); 325 "rd:0x%08x\n",
326 addr, wrData, rdData);
342 return false; 327 return false;
343 } 328 }
344 } 329 }
@@ -347,10 +332,11 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
347 REG_WRITE(ah, addr, wrData); 332 REG_WRITE(ah, addr, wrData);
348 rdData = REG_READ(ah, addr); 333 rdData = REG_READ(ah, addr);
349 if (wrData != rdData) { 334 if (wrData != rdData) {
350 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 335 ath_print(common, ATH_DBG_FATAL,
351 "address test failed " 336 "address test failed "
352 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 337 "addr: 0x%08x - wr:0x%08x != "
353 addr, wrData, rdData); 338 "rd:0x%08x\n",
339 addr, wrData, rdData);
354 return false; 340 return false;
355 } 341 }
356 } 342 }
@@ -404,8 +390,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
404 ah->config.cck_trig_high = 200; 390 ah->config.cck_trig_high = 200;
405 ah->config.cck_trig_low = 100; 391 ah->config.cck_trig_low = 100;
406 ah->config.enable_ani = 1; 392 ah->config.enable_ani = 1;
407 ah->config.diversity_control = ATH9K_ANT_VARIABLE;
408 ah->config.antenna_switch_swap = 0;
409 393
410 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 394 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
411 ah->config.spurchans[i][0] = AR_NO_SPUR; 395 ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -433,6 +417,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
433 if (num_possible_cpus() > 1) 417 if (num_possible_cpus() > 1)
434 ah->config.serialize_regmode = SER_REG_MODE_AUTO; 418 ah->config.serialize_regmode = SER_REG_MODE_AUTO;
435} 419}
420EXPORT_SYMBOL(ath9k_hw_init);
436 421
437static void ath9k_hw_init_defaults(struct ath_hw *ah) 422static void ath9k_hw_init_defaults(struct ath_hw *ah)
438{ 423{
@@ -459,27 +444,9 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
459 ah->acktimeout = (u32) -1; 444 ah->acktimeout = (u32) -1;
460 ah->ctstimeout = (u32) -1; 445 ah->ctstimeout = (u32) -1;
461 ah->globaltxtimeout = (u32) -1; 446 ah->globaltxtimeout = (u32) -1;
462
463 ah->gbeacon_rate = 0;
464
465 ah->power_mode = ATH9K_PM_UNDEFINED; 447 ah->power_mode = ATH9K_PM_UNDEFINED;
466} 448}
467 449
468static int ath9k_hw_rfattach(struct ath_hw *ah)
469{
470 bool rfStatus = false;
471 int ecode = 0;
472
473 rfStatus = ath9k_hw_init_rf(ah, &ecode);
474 if (!rfStatus) {
475 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
476 "RF setup failed, status: %u\n", ecode);
477 return ecode;
478 }
479
480 return 0;
481}
482
483static int ath9k_hw_rf_claim(struct ath_hw *ah) 450static int ath9k_hw_rf_claim(struct ath_hw *ah)
484{ 451{
485 u32 val; 452 u32 val;
@@ -497,9 +464,9 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
497 case AR_RAD2122_SREV_MAJOR: 464 case AR_RAD2122_SREV_MAJOR:
498 break; 465 break;
499 default: 466 default:
500 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 467 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
501 "Radio Chip Rev 0x%02X not supported\n", 468 "Radio Chip Rev 0x%02X not supported\n",
502 val & AR_RADIO_SREV_MAJOR); 469 val & AR_RADIO_SREV_MAJOR);
503 return -EOPNOTSUPP; 470 return -EOPNOTSUPP;
504 } 471 }
505 472
@@ -510,6 +477,7 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
510 477
511static int ath9k_hw_init_macaddr(struct ath_hw *ah) 478static int ath9k_hw_init_macaddr(struct ath_hw *ah)
512{ 479{
480 struct ath_common *common = ath9k_hw_common(ah);
513 u32 sum; 481 u32 sum;
514 int i; 482 int i;
515 u16 eeval; 483 u16 eeval;
@@ -518,8 +486,8 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
518 for (i = 0; i < 3; i++) { 486 for (i = 0; i < 3; i++) {
519 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i)); 487 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
520 sum += eeval; 488 sum += eeval;
521 ah->macaddr[2 * i] = eeval >> 8; 489 common->macaddr[2 * i] = eeval >> 8;
522 ah->macaddr[2 * i + 1] = eeval & 0xff; 490 common->macaddr[2 * i + 1] = eeval & 0xff;
523 } 491 }
524 if (sum == 0 || sum == 0xffff * 3) 492 if (sum == 0 || sum == 0xffff * 3)
525 return -EADDRNOTAVAIL; 493 return -EADDRNOTAVAIL;
@@ -590,12 +558,20 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
590 if (ecode != 0) 558 if (ecode != 0)
591 return ecode; 559 return ecode;
592 560
593 DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n", 561 ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG,
594 ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); 562 "Eeprom VER: %d, REV: %d\n",
595 563 ah->eep_ops->get_eeprom_ver(ah),
596 ecode = ath9k_hw_rfattach(ah); 564 ah->eep_ops->get_eeprom_rev(ah));
597 if (ecode != 0) 565
598 return ecode; 566 if (!AR_SREV_9280_10_OR_LATER(ah)) {
567 ecode = ath9k_hw_rf_alloc_ext_banks(ah);
568 if (ecode) {
569 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
570 "Failed allocating banks for "
571 "external radio\n");
572 return ecode;
573 }
574 }
599 575
600 if (!AR_SREV_9100(ah)) { 576 if (!AR_SREV_9100(ah)) {
601 ath9k_hw_ani_setup(ah); 577 ath9k_hw_ani_setup(ah);
@@ -617,6 +593,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
617 case AR9285_DEVID_PCIE: 593 case AR9285_DEVID_PCIE:
618 case AR5416_DEVID_AR9287_PCI: 594 case AR5416_DEVID_AR9287_PCI:
619 case AR5416_DEVID_AR9287_PCIE: 595 case AR5416_DEVID_AR9287_PCIE:
596 case AR9271_USB:
620 return true; 597 return true;
621 default: 598 default:
622 break; 599 break;
@@ -634,9 +611,8 @@ static bool ath9k_hw_macversion_supported(u32 macversion)
634 case AR_SREV_VERSION_9280: 611 case AR_SREV_VERSION_9280:
635 case AR_SREV_VERSION_9285: 612 case AR_SREV_VERSION_9285:
636 case AR_SREV_VERSION_9287: 613 case AR_SREV_VERSION_9287:
637 return true;
638 /* Not yet */
639 case AR_SREV_VERSION_9271: 614 case AR_SREV_VERSION_9271:
615 return true;
640 default: 616 default:
641 break; 617 break;
642 } 618 }
@@ -670,10 +646,13 @@ static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
670static void ath9k_hw_init_mode_regs(struct ath_hw *ah) 646static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
671{ 647{
672 if (AR_SREV_9271(ah)) { 648 if (AR_SREV_9271(ah)) {
673 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271_1_0, 649 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
674 ARRAY_SIZE(ar9271Modes_9271_1_0), 6); 650 ARRAY_SIZE(ar9271Modes_9271), 6);
675 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271_1_0, 651 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
676 ARRAY_SIZE(ar9271Common_9271_1_0), 2); 652 ARRAY_SIZE(ar9271Common_9271), 2);
653 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
654 ar9271Modes_9271_1_0_only,
655 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
677 return; 656 return;
678 } 657 }
679 658
@@ -905,21 +884,27 @@ static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
905 884
906int ath9k_hw_init(struct ath_hw *ah) 885int ath9k_hw_init(struct ath_hw *ah)
907{ 886{
887 struct ath_common *common = ath9k_hw_common(ah);
908 int r = 0; 888 int r = 0;
909 889
910 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) 890 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
891 ath_print(common, ATH_DBG_FATAL,
892 "Unsupported device ID: 0x%0x\n",
893 ah->hw_version.devid);
911 return -EOPNOTSUPP; 894 return -EOPNOTSUPP;
895 }
912 896
913 ath9k_hw_init_defaults(ah); 897 ath9k_hw_init_defaults(ah);
914 ath9k_hw_init_config(ah); 898 ath9k_hw_init_config(ah);
915 899
916 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 900 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
917 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Couldn't reset chip\n"); 901 ath_print(common, ATH_DBG_FATAL,
902 "Couldn't reset chip\n");
918 return -EIO; 903 return -EIO;
919 } 904 }
920 905
921 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 906 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
922 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); 907 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
923 return -EIO; 908 return -EIO;
924 } 909 }
925 910
@@ -934,14 +919,14 @@ int ath9k_hw_init(struct ath_hw *ah)
934 } 919 }
935 } 920 }
936 921
937 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "serialize_regmode is %d\n", 922 ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
938 ah->config.serialize_regmode); 923 ah->config.serialize_regmode);
939 924
940 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { 925 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
941 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 926 ath_print(common, ATH_DBG_FATAL,
942 "Mac Chip Rev 0x%02x.%x is not supported by " 927 "Mac Chip Rev 0x%02x.%x is not supported by "
943 "this driver\n", ah->hw_version.macVersion, 928 "this driver\n", ah->hw_version.macVersion,
944 ah->hw_version.macRev); 929 ah->hw_version.macRev);
945 return -EOPNOTSUPP; 930 return -EOPNOTSUPP;
946 } 931 }
947 932
@@ -959,8 +944,14 @@ int ath9k_hw_init(struct ath_hw *ah)
959 ath9k_hw_init_cal_settings(ah); 944 ath9k_hw_init_cal_settings(ah);
960 945
961 ah->ani_function = ATH9K_ANI_ALL; 946 ah->ani_function = ATH9K_ANI_ALL;
962 if (AR_SREV_9280_10_OR_LATER(ah)) 947 if (AR_SREV_9280_10_OR_LATER(ah)) {
963 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 948 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
949 ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
950 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
951 } else {
952 ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
953 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
954 }
964 955
965 ath9k_hw_init_mode_regs(ah); 956 ath9k_hw_init_mode_regs(ah);
966 957
@@ -969,6 +960,16 @@ int ath9k_hw_init(struct ath_hw *ah)
969 else 960 else
970 ath9k_hw_disablepcie(ah); 961 ath9k_hw_disablepcie(ah);
971 962
963 /* Support for Japan ch.14 (2484) spread */
964 if (AR_SREV_9287_11_OR_LATER(ah)) {
965 INIT_INI_ARRAY(&ah->iniCckfirNormal,
966 ar9287Common_normal_cck_fir_coeff_92871_1,
967 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
968 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
969 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
970 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
971 }
972
972 r = ath9k_hw_post_init(ah); 973 r = ath9k_hw_post_init(ah);
973 if (r) 974 if (r)
974 return r; 975 return r;
@@ -979,8 +980,8 @@ int ath9k_hw_init(struct ath_hw *ah)
979 980
980 r = ath9k_hw_init_macaddr(ah); 981 r = ath9k_hw_init_macaddr(ah);
981 if (r) { 982 if (r) {
982 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 983 ath_print(common, ATH_DBG_FATAL,
983 "Failed to initialize MAC address\n"); 984 "Failed to initialize MAC address\n");
984 return r; 985 return r;
985 } 986 }
986 987
@@ -991,6 +992,8 @@ int ath9k_hw_init(struct ath_hw *ah)
991 992
992 ath9k_init_nfcal_hist_buffer(ah); 993 ath9k_init_nfcal_hist_buffer(ah);
993 994
995 common->state = ATH_HW_INITIALIZED;
996
994 return 0; 997 return 0;
995} 998}
996 999
@@ -1027,6 +1030,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
1027 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 1030 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
1028} 1031}
1029 1032
1033static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
1034{
1035 u32 lcr;
1036 u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
1037
1038 lcr = REG_READ(ah , 0x5100c);
1039 lcr |= 0x80;
1040
1041 REG_WRITE(ah, 0x5100c, lcr);
1042 REG_WRITE(ah, 0x51004, (baud_divider >> 8));
1043 REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
1044
1045 lcr &= ~0x80;
1046 REG_WRITE(ah, 0x5100c, lcr);
1047}
1048
1030static void ath9k_hw_init_pll(struct ath_hw *ah, 1049static void ath9k_hw_init_pll(struct ath_hw *ah,
1031 struct ath9k_channel *chan) 1050 struct ath9k_channel *chan)
1032{ 1051{
@@ -1090,6 +1109,26 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
1090 } 1109 }
1091 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 1110 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
1092 1111
1112 /* Switch the core clock for ar9271 to 117Mhz */
1113 if (AR_SREV_9271(ah)) {
1114 if ((pll == 0x142c) || (pll == 0x2850) ) {
1115 udelay(500);
1116 /* set CLKOBS to output AHB clock */
1117 REG_WRITE(ah, 0x7020, 0xe);
1118 /*
1119 * 0x304: 117Mhz, ahb_ratio: 1x1
1120 * 0x306: 40Mhz, ahb_ratio: 1x1
1121 */
1122 REG_WRITE(ah, 0x50040, 0x304);
1123 /*
1124 * makes adjustments for the baud dividor to keep the
1125 * targetted baud rate based on the used core clock.
1126 */
1127 ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
1128 AR9271_TARGET_BAUD_RATE);
1129 }
1130 }
1131
1093 udelay(RTC_PLL_SETTLE_DELAY); 1132 udelay(RTC_PLL_SETTLE_DELAY);
1094 1133
1095 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 1134 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
@@ -1107,7 +1146,7 @@ static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
1107 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 1146 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
1108 AR_PHY_SWAP_ALT_CHAIN); 1147 AR_PHY_SWAP_ALT_CHAIN);
1109 case 0x3: 1148 case 0x3:
1110 if (((ah)->hw_version.macVersion <= AR_SREV_VERSION_9160)) { 1149 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
1111 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); 1150 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
1112 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); 1151 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
1113 break; 1152 break;
@@ -1164,7 +1203,8 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1164static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) 1203static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1165{ 1204{
1166 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { 1205 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
1167 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad ack timeout %u\n", us); 1206 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1207 "bad ack timeout %u\n", us);
1168 ah->acktimeout = (u32) -1; 1208 ah->acktimeout = (u32) -1;
1169 return false; 1209 return false;
1170 } else { 1210 } else {
@@ -1178,7 +1218,8 @@ static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1178static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) 1218static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1179{ 1219{
1180 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { 1220 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
1181 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad cts timeout %u\n", us); 1221 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1222 "bad cts timeout %u\n", us);
1182 ah->ctstimeout = (u32) -1; 1223 ah->ctstimeout = (u32) -1;
1183 return false; 1224 return false;
1184 } else { 1225 } else {
@@ -1192,8 +1233,8 @@ static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1192static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 1233static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1193{ 1234{
1194 if (tu > 0xFFFF) { 1235 if (tu > 0xFFFF) {
1195 DPRINTF(ah->ah_sc, ATH_DBG_XMIT, 1236 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
1196 "bad global tx timeout %u\n", tu); 1237 "bad global tx timeout %u\n", tu);
1197 ah->globaltxtimeout = (u32) -1; 1238 ah->globaltxtimeout = (u32) -1;
1198 return false; 1239 return false;
1199 } else { 1240 } else {
@@ -1205,8 +1246,8 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1205 1246
1206static void ath9k_hw_init_user_settings(struct ath_hw *ah) 1247static void ath9k_hw_init_user_settings(struct ath_hw *ah)
1207{ 1248{
1208 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "ah->misc_mode 0x%x\n", 1249 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1209 ah->misc_mode); 1250 ah->misc_mode);
1210 1251
1211 if (ah->misc_mode != 0) 1252 if (ah->misc_mode != 0)
1212 REG_WRITE(ah, AR_PCU_MISC, 1253 REG_WRITE(ah, AR_PCU_MISC,
@@ -1229,14 +1270,23 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid)
1229 1270
1230void ath9k_hw_detach(struct ath_hw *ah) 1271void ath9k_hw_detach(struct ath_hw *ah)
1231{ 1272{
1273 struct ath_common *common = ath9k_hw_common(ah);
1274
1275 if (common->state <= ATH_HW_INITIALIZED)
1276 goto free_hw;
1277
1232 if (!AR_SREV_9100(ah)) 1278 if (!AR_SREV_9100(ah))
1233 ath9k_hw_ani_disable(ah); 1279 ath9k_hw_ani_disable(ah);
1234 1280
1235 ath9k_hw_rf_free(ah);
1236 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1281 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1282
1283free_hw:
1284 if (!AR_SREV_9280_10_OR_LATER(ah))
1285 ath9k_hw_rf_free_ext_banks(ah);
1237 kfree(ah); 1286 kfree(ah);
1238 ah = NULL; 1287 ah = NULL;
1239} 1288}
1289EXPORT_SYMBOL(ath9k_hw_detach);
1240 1290
1241/*******/ 1291/*******/
1242/* INI */ 1292/* INI */
@@ -1254,7 +1304,8 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1254 * AR9271 1.1 1304 * AR9271 1.1
1255 */ 1305 */
1256 if (AR_SREV_9271_10(ah)) { 1306 if (AR_SREV_9271_10(ah)) {
1257 val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) | AR_PHY_SPECTRAL_SCAN_ENABLE; 1307 val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
1308 AR_PHY_SPECTRAL_SCAN_ENABLE;
1258 REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val); 1309 REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
1259 } 1310 }
1260 else if (AR_SREV_9271_11(ah)) 1311 else if (AR_SREV_9271_11(ah))
@@ -1298,28 +1349,29 @@ static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
1298 u32 reg, u32 value) 1349 u32 reg, u32 value)
1299{ 1350{
1300 struct base_eep_header *pBase = &(pEepData->baseEepHeader); 1351 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
1352 struct ath_common *common = ath9k_hw_common(ah);
1301 1353
1302 switch (ah->hw_version.devid) { 1354 switch (ah->hw_version.devid) {
1303 case AR9280_DEVID_PCI: 1355 case AR9280_DEVID_PCI:
1304 if (reg == 0x7894) { 1356 if (reg == 0x7894) {
1305 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1357 ath_print(common, ATH_DBG_EEPROM,
1306 "ini VAL: %x EEPROM: %x\n", value, 1358 "ini VAL: %x EEPROM: %x\n", value,
1307 (pBase->version & 0xff)); 1359 (pBase->version & 0xff));
1308 1360
1309 if ((pBase->version & 0xff) > 0x0a) { 1361 if ((pBase->version & 0xff) > 0x0a) {
1310 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1362 ath_print(common, ATH_DBG_EEPROM,
1311 "PWDCLKIND: %d\n", 1363 "PWDCLKIND: %d\n",
1312 pBase->pwdclkind); 1364 pBase->pwdclkind);
1313 value &= ~AR_AN_TOP2_PWDCLKIND; 1365 value &= ~AR_AN_TOP2_PWDCLKIND;
1314 value |= AR_AN_TOP2_PWDCLKIND & 1366 value |= AR_AN_TOP2_PWDCLKIND &
1315 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S); 1367 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
1316 } else { 1368 } else {
1317 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1369 ath_print(common, ATH_DBG_EEPROM,
1318 "PWDCLKIND Earlier Rev\n"); 1370 "PWDCLKIND Earlier Rev\n");
1319 } 1371 }
1320 1372
1321 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1373 ath_print(common, ATH_DBG_EEPROM,
1322 "final ini VAL: %x\n", value); 1374 "final ini VAL: %x\n", value);
1323 } 1375 }
1324 break; 1376 break;
1325 } 1377 }
@@ -1374,8 +1426,7 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
1374} 1426}
1375 1427
1376static int ath9k_hw_process_ini(struct ath_hw *ah, 1428static int ath9k_hw_process_ini(struct ath_hw *ah,
1377 struct ath9k_channel *chan, 1429 struct ath9k_channel *chan)
1378 enum ath9k_ht_macmode macmode)
1379{ 1430{
1380 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1431 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1381 int i, regWrites = 0; 1432 int i, regWrites = 0;
@@ -1469,7 +1520,11 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1469 DO_DELAY(regWrites); 1520 DO_DELAY(regWrites);
1470 } 1521 }
1471 1522
1472 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites); 1523 ath9k_hw_write_regs(ah, freqIndex, regWrites);
1524
1525 if (AR_SREV_9271_10(ah))
1526 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
1527 modesIndex, regWrites);
1473 1528
1474 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { 1529 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
1475 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, 1530 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
@@ -1477,7 +1532,7 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1477 } 1532 }
1478 1533
1479 ath9k_hw_override_ini(ah, chan); 1534 ath9k_hw_override_ini(ah, chan);
1480 ath9k_hw_set_regs(ah, chan, macmode); 1535 ath9k_hw_set_regs(ah, chan);
1481 ath9k_hw_init_chain_masks(ah); 1536 ath9k_hw_init_chain_masks(ah);
1482 1537
1483 if (OLC_FOR_AR9280_20_LATER) 1538 if (OLC_FOR_AR9280_20_LATER)
@@ -1491,8 +1546,8 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1491 (u32) regulatory->power_limit)); 1546 (u32) regulatory->power_limit));
1492 1547
1493 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 1548 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
1494 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1549 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1495 "ar5416SetRfRegs failed\n"); 1550 "ar5416SetRfRegs failed\n");
1496 return -EIO; 1551 return -EIO;
1497 } 1552 }
1498 1553
@@ -1697,16 +1752,14 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1697 1752
1698 REG_WRITE(ah, AR_RTC_RC, 0); 1753 REG_WRITE(ah, AR_RTC_RC, 0);
1699 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { 1754 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
1700 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 1755 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1701 "RTC stuck in MAC reset\n"); 1756 "RTC stuck in MAC reset\n");
1702 return false; 1757 return false;
1703 } 1758 }
1704 1759
1705 if (!AR_SREV_9100(ah)) 1760 if (!AR_SREV_9100(ah))
1706 REG_WRITE(ah, AR_RC, 0); 1761 REG_WRITE(ah, AR_RC, 0);
1707 1762
1708 ath9k_hw_init_pll(ah, NULL);
1709
1710 if (AR_SREV_9100(ah)) 1763 if (AR_SREV_9100(ah))
1711 udelay(50); 1764 udelay(50);
1712 1765
@@ -1734,7 +1787,8 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1734 AR_RTC_STATUS_M, 1787 AR_RTC_STATUS_M,
1735 AR_RTC_STATUS_ON, 1788 AR_RTC_STATUS_ON,
1736 AH_WAIT_TIMEOUT)) { 1789 AH_WAIT_TIMEOUT)) {
1737 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "RTC not waking up\n"); 1790 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1791 "RTC not waking up\n");
1738 return false; 1792 return false;
1739 } 1793 }
1740 1794
@@ -1759,8 +1813,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1759 } 1813 }
1760} 1814}
1761 1815
1762static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan, 1816static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
1763 enum ath9k_ht_macmode macmode)
1764{ 1817{
1765 u32 phymode; 1818 u32 phymode;
1766 u32 enableDacFifo = 0; 1819 u32 enableDacFifo = 0;
@@ -1779,12 +1832,10 @@ static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
1779 (chan->chanmode == CHANNEL_G_HT40PLUS)) 1832 (chan->chanmode == CHANNEL_G_HT40PLUS))
1780 phymode |= AR_PHY_FC_DYN2040_PRI_CH; 1833 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1781 1834
1782 if (ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1783 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1784 } 1835 }
1785 REG_WRITE(ah, AR_PHY_TURBO, phymode); 1836 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1786 1837
1787 ath9k_hw_set11nmac2040(ah, macmode); 1838 ath9k_hw_set11nmac2040(ah);
1788 1839
1789 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); 1840 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1790 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); 1841 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
@@ -1810,17 +1861,19 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1810} 1861}
1811 1862
1812static bool ath9k_hw_channel_change(struct ath_hw *ah, 1863static bool ath9k_hw_channel_change(struct ath_hw *ah,
1813 struct ath9k_channel *chan, 1864 struct ath9k_channel *chan)
1814 enum ath9k_ht_macmode macmode)
1815{ 1865{
1816 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1866 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1867 struct ath_common *common = ath9k_hw_common(ah);
1817 struct ieee80211_channel *channel = chan->chan; 1868 struct ieee80211_channel *channel = chan->chan;
1818 u32 synthDelay, qnum; 1869 u32 synthDelay, qnum;
1870 int r;
1819 1871
1820 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1872 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
1821 if (ath9k_hw_numtxpending(ah, qnum)) { 1873 if (ath9k_hw_numtxpending(ah, qnum)) {
1822 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 1874 ath_print(common, ATH_DBG_QUEUE,
1823 "Transmit frames pending on queue %d\n", qnum); 1875 "Transmit frames pending on "
1876 "queue %d\n", qnum);
1824 return false; 1877 return false;
1825 } 1878 }
1826 } 1879 }
@@ -1828,21 +1881,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1828 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); 1881 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
1829 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, 1882 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
1830 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) { 1883 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
1831 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1884 ath_print(common, ATH_DBG_FATAL,
1832 "Could not kill baseband RX\n"); 1885 "Could not kill baseband RX\n");
1833 return false; 1886 return false;
1834 } 1887 }
1835 1888
1836 ath9k_hw_set_regs(ah, chan, macmode); 1889 ath9k_hw_set_regs(ah, chan);
1837 1890
1838 if (AR_SREV_9280_10_OR_LATER(ah)) { 1891 r = ah->ath9k_hw_rf_set_freq(ah, chan);
1839 ath9k_hw_ar9280_set_channel(ah, chan); 1892 if (r) {
1840 } else { 1893 ath_print(common, ATH_DBG_FATAL,
1841 if (!(ath9k_hw_set_channel(ah, chan))) { 1894 "Failed to set channel\n");
1842 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1895 return false;
1843 "Failed to set channel\n");
1844 return false;
1845 }
1846 } 1896 }
1847 1897
1848 ah->eep_ops->set_txpower(ah, chan, 1898 ah->eep_ops->set_txpower(ah, chan,
@@ -1865,10 +1915,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1865 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1915 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1866 ath9k_hw_set_delta_slope(ah, chan); 1916 ath9k_hw_set_delta_slope(ah, chan);
1867 1917
1868 if (AR_SREV_9280_10_OR_LATER(ah)) 1918 ah->ath9k_hw_spur_mitigate_freq(ah, chan);
1869 ath9k_hw_9280_spur_mitigate(ah, chan);
1870 else
1871 ath9k_hw_spur_mitigate(ah, chan);
1872 1919
1873 if (!chan->oneTimeCalsDone) 1920 if (!chan->oneTimeCalsDone)
1874 chan->oneTimeCalsDone = true; 1921 chan->oneTimeCalsDone = true;
@@ -1876,457 +1923,6 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1876 return true; 1923 return true;
1877} 1924}
1878 1925
1879static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
1880{
1881 int bb_spur = AR_NO_SPUR;
1882 int freq;
1883 int bin, cur_bin;
1884 int bb_spur_off, spur_subchannel_sd;
1885 int spur_freq_sd;
1886 int spur_delta_phase;
1887 int denominator;
1888 int upper, lower, cur_vit_mask;
1889 int tmp, newVal;
1890 int i;
1891 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
1892 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
1893 };
1894 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
1895 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
1896 };
1897 int inc[4] = { 0, 100, 0, 0 };
1898 struct chan_centers centers;
1899
1900 int8_t mask_m[123];
1901 int8_t mask_p[123];
1902 int8_t mask_amt;
1903 int tmp_mask;
1904 int cur_bb_spur;
1905 bool is2GHz = IS_CHAN_2GHZ(chan);
1906
1907 memset(&mask_m, 0, sizeof(int8_t) * 123);
1908 memset(&mask_p, 0, sizeof(int8_t) * 123);
1909
1910 ath9k_hw_get_channel_centers(ah, chan, &centers);
1911 freq = centers.synth_center;
1912
1913 ah->config.spurmode = SPUR_ENABLE_EEPROM;
1914 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
1915 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
1916
1917 if (is2GHz)
1918 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
1919 else
1920 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
1921
1922 if (AR_NO_SPUR == cur_bb_spur)
1923 break;
1924 cur_bb_spur = cur_bb_spur - freq;
1925
1926 if (IS_CHAN_HT40(chan)) {
1927 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
1928 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
1929 bb_spur = cur_bb_spur;
1930 break;
1931 }
1932 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
1933 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
1934 bb_spur = cur_bb_spur;
1935 break;
1936 }
1937 }
1938
1939 if (AR_NO_SPUR == bb_spur) {
1940 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
1941 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
1942 return;
1943 } else {
1944 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
1945 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
1946 }
1947
1948 bin = bb_spur * 320;
1949
1950 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
1951
1952 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
1953 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
1954 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
1955 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
1956 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
1957
1958 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
1959 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
1960 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
1961 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
1962 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
1963 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
1964
1965 if (IS_CHAN_HT40(chan)) {
1966 if (bb_spur < 0) {
1967 spur_subchannel_sd = 1;
1968 bb_spur_off = bb_spur + 10;
1969 } else {
1970 spur_subchannel_sd = 0;
1971 bb_spur_off = bb_spur - 10;
1972 }
1973 } else {
1974 spur_subchannel_sd = 0;
1975 bb_spur_off = bb_spur;
1976 }
1977
1978 if (IS_CHAN_HT40(chan))
1979 spur_delta_phase =
1980 ((bb_spur * 262144) /
1981 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
1982 else
1983 spur_delta_phase =
1984 ((bb_spur * 524288) /
1985 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
1986
1987 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
1988 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
1989
1990 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
1991 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
1992 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
1993 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
1994
1995 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
1996 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
1997
1998 cur_bin = -6000;
1999 upper = bin + 100;
2000 lower = bin - 100;
2001
2002 for (i = 0; i < 4; i++) {
2003 int pilot_mask = 0;
2004 int chan_mask = 0;
2005 int bp = 0;
2006 for (bp = 0; bp < 30; bp++) {
2007 if ((cur_bin > lower) && (cur_bin < upper)) {
2008 pilot_mask = pilot_mask | 0x1 << bp;
2009 chan_mask = chan_mask | 0x1 << bp;
2010 }
2011 cur_bin += 100;
2012 }
2013 cur_bin += inc[i];
2014 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
2015 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
2016 }
2017
2018 cur_vit_mask = 6100;
2019 upper = bin + 120;
2020 lower = bin - 120;
2021
2022 for (i = 0; i < 123; i++) {
2023 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
2024
2025 /* workaround for gcc bug #37014 */
2026 volatile int tmp_v = abs(cur_vit_mask - bin);
2027
2028 if (tmp_v < 75)
2029 mask_amt = 1;
2030 else
2031 mask_amt = 0;
2032 if (cur_vit_mask < 0)
2033 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
2034 else
2035 mask_p[cur_vit_mask / 100] = mask_amt;
2036 }
2037 cur_vit_mask -= 100;
2038 }
2039
2040 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
2041 | (mask_m[48] << 26) | (mask_m[49] << 24)
2042 | (mask_m[50] << 22) | (mask_m[51] << 20)
2043 | (mask_m[52] << 18) | (mask_m[53] << 16)
2044 | (mask_m[54] << 14) | (mask_m[55] << 12)
2045 | (mask_m[56] << 10) | (mask_m[57] << 8)
2046 | (mask_m[58] << 6) | (mask_m[59] << 4)
2047 | (mask_m[60] << 2) | (mask_m[61] << 0);
2048 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
2049 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
2050
2051 tmp_mask = (mask_m[31] << 28)
2052 | (mask_m[32] << 26) | (mask_m[33] << 24)
2053 | (mask_m[34] << 22) | (mask_m[35] << 20)
2054 | (mask_m[36] << 18) | (mask_m[37] << 16)
2055 | (mask_m[48] << 14) | (mask_m[39] << 12)
2056 | (mask_m[40] << 10) | (mask_m[41] << 8)
2057 | (mask_m[42] << 6) | (mask_m[43] << 4)
2058 | (mask_m[44] << 2) | (mask_m[45] << 0);
2059 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
2060 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
2061
2062 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
2063 | (mask_m[18] << 26) | (mask_m[18] << 24)
2064 | (mask_m[20] << 22) | (mask_m[20] << 20)
2065 | (mask_m[22] << 18) | (mask_m[22] << 16)
2066 | (mask_m[24] << 14) | (mask_m[24] << 12)
2067 | (mask_m[25] << 10) | (mask_m[26] << 8)
2068 | (mask_m[27] << 6) | (mask_m[28] << 4)
2069 | (mask_m[29] << 2) | (mask_m[30] << 0);
2070 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
2071 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
2072
2073 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
2074 | (mask_m[2] << 26) | (mask_m[3] << 24)
2075 | (mask_m[4] << 22) | (mask_m[5] << 20)
2076 | (mask_m[6] << 18) | (mask_m[7] << 16)
2077 | (mask_m[8] << 14) | (mask_m[9] << 12)
2078 | (mask_m[10] << 10) | (mask_m[11] << 8)
2079 | (mask_m[12] << 6) | (mask_m[13] << 4)
2080 | (mask_m[14] << 2) | (mask_m[15] << 0);
2081 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
2082 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
2083
2084 tmp_mask = (mask_p[15] << 28)
2085 | (mask_p[14] << 26) | (mask_p[13] << 24)
2086 | (mask_p[12] << 22) | (mask_p[11] << 20)
2087 | (mask_p[10] << 18) | (mask_p[9] << 16)
2088 | (mask_p[8] << 14) | (mask_p[7] << 12)
2089 | (mask_p[6] << 10) | (mask_p[5] << 8)
2090 | (mask_p[4] << 6) | (mask_p[3] << 4)
2091 | (mask_p[2] << 2) | (mask_p[1] << 0);
2092 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
2093 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
2094
2095 tmp_mask = (mask_p[30] << 28)
2096 | (mask_p[29] << 26) | (mask_p[28] << 24)
2097 | (mask_p[27] << 22) | (mask_p[26] << 20)
2098 | (mask_p[25] << 18) | (mask_p[24] << 16)
2099 | (mask_p[23] << 14) | (mask_p[22] << 12)
2100 | (mask_p[21] << 10) | (mask_p[20] << 8)
2101 | (mask_p[19] << 6) | (mask_p[18] << 4)
2102 | (mask_p[17] << 2) | (mask_p[16] << 0);
2103 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
2104 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
2105
2106 tmp_mask = (mask_p[45] << 28)
2107 | (mask_p[44] << 26) | (mask_p[43] << 24)
2108 | (mask_p[42] << 22) | (mask_p[41] << 20)
2109 | (mask_p[40] << 18) | (mask_p[39] << 16)
2110 | (mask_p[38] << 14) | (mask_p[37] << 12)
2111 | (mask_p[36] << 10) | (mask_p[35] << 8)
2112 | (mask_p[34] << 6) | (mask_p[33] << 4)
2113 | (mask_p[32] << 2) | (mask_p[31] << 0);
2114 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
2115 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
2116
2117 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
2118 | (mask_p[59] << 26) | (mask_p[58] << 24)
2119 | (mask_p[57] << 22) | (mask_p[56] << 20)
2120 | (mask_p[55] << 18) | (mask_p[54] << 16)
2121 | (mask_p[53] << 14) | (mask_p[52] << 12)
2122 | (mask_p[51] << 10) | (mask_p[50] << 8)
2123 | (mask_p[49] << 6) | (mask_p[48] << 4)
2124 | (mask_p[47] << 2) | (mask_p[46] << 0);
2125 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
2126 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2127}
2128
2129static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
2130{
2131 int bb_spur = AR_NO_SPUR;
2132 int bin, cur_bin;
2133 int spur_freq_sd;
2134 int spur_delta_phase;
2135 int denominator;
2136 int upper, lower, cur_vit_mask;
2137 int tmp, new;
2138 int i;
2139 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
2140 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
2141 };
2142 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
2143 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
2144 };
2145 int inc[4] = { 0, 100, 0, 0 };
2146
2147 int8_t mask_m[123];
2148 int8_t mask_p[123];
2149 int8_t mask_amt;
2150 int tmp_mask;
2151 int cur_bb_spur;
2152 bool is2GHz = IS_CHAN_2GHZ(chan);
2153
2154 memset(&mask_m, 0, sizeof(int8_t) * 123);
2155 memset(&mask_p, 0, sizeof(int8_t) * 123);
2156
2157 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
2158 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
2159 if (AR_NO_SPUR == cur_bb_spur)
2160 break;
2161 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
2162 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
2163 bb_spur = cur_bb_spur;
2164 break;
2165 }
2166 }
2167
2168 if (AR_NO_SPUR == bb_spur)
2169 return;
2170
2171 bin = bb_spur * 32;
2172
2173 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
2174 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
2175 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
2176 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
2177 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
2178
2179 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
2180
2181 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
2182 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
2183 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
2184 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
2185 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
2186 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
2187
2188 spur_delta_phase = ((bb_spur * 524288) / 100) &
2189 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
2190
2191 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
2192 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
2193
2194 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
2195 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
2196 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
2197 REG_WRITE(ah, AR_PHY_TIMING11, new);
2198
2199 cur_bin = -6000;
2200 upper = bin + 100;
2201 lower = bin - 100;
2202
2203 for (i = 0; i < 4; i++) {
2204 int pilot_mask = 0;
2205 int chan_mask = 0;
2206 int bp = 0;
2207 for (bp = 0; bp < 30; bp++) {
2208 if ((cur_bin > lower) && (cur_bin < upper)) {
2209 pilot_mask = pilot_mask | 0x1 << bp;
2210 chan_mask = chan_mask | 0x1 << bp;
2211 }
2212 cur_bin += 100;
2213 }
2214 cur_bin += inc[i];
2215 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
2216 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
2217 }
2218
2219 cur_vit_mask = 6100;
2220 upper = bin + 120;
2221 lower = bin - 120;
2222
2223 for (i = 0; i < 123; i++) {
2224 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
2225
2226 /* workaround for gcc bug #37014 */
2227 volatile int tmp_v = abs(cur_vit_mask - bin);
2228
2229 if (tmp_v < 75)
2230 mask_amt = 1;
2231 else
2232 mask_amt = 0;
2233 if (cur_vit_mask < 0)
2234 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
2235 else
2236 mask_p[cur_vit_mask / 100] = mask_amt;
2237 }
2238 cur_vit_mask -= 100;
2239 }
2240
2241 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
2242 | (mask_m[48] << 26) | (mask_m[49] << 24)
2243 | (mask_m[50] << 22) | (mask_m[51] << 20)
2244 | (mask_m[52] << 18) | (mask_m[53] << 16)
2245 | (mask_m[54] << 14) | (mask_m[55] << 12)
2246 | (mask_m[56] << 10) | (mask_m[57] << 8)
2247 | (mask_m[58] << 6) | (mask_m[59] << 4)
2248 | (mask_m[60] << 2) | (mask_m[61] << 0);
2249 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
2250 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
2251
2252 tmp_mask = (mask_m[31] << 28)
2253 | (mask_m[32] << 26) | (mask_m[33] << 24)
2254 | (mask_m[34] << 22) | (mask_m[35] << 20)
2255 | (mask_m[36] << 18) | (mask_m[37] << 16)
2256 | (mask_m[48] << 14) | (mask_m[39] << 12)
2257 | (mask_m[40] << 10) | (mask_m[41] << 8)
2258 | (mask_m[42] << 6) | (mask_m[43] << 4)
2259 | (mask_m[44] << 2) | (mask_m[45] << 0);
2260 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
2261 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
2262
2263 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
2264 | (mask_m[18] << 26) | (mask_m[18] << 24)
2265 | (mask_m[20] << 22) | (mask_m[20] << 20)
2266 | (mask_m[22] << 18) | (mask_m[22] << 16)
2267 | (mask_m[24] << 14) | (mask_m[24] << 12)
2268 | (mask_m[25] << 10) | (mask_m[26] << 8)
2269 | (mask_m[27] << 6) | (mask_m[28] << 4)
2270 | (mask_m[29] << 2) | (mask_m[30] << 0);
2271 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
2272 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
2273
2274 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
2275 | (mask_m[2] << 26) | (mask_m[3] << 24)
2276 | (mask_m[4] << 22) | (mask_m[5] << 20)
2277 | (mask_m[6] << 18) | (mask_m[7] << 16)
2278 | (mask_m[8] << 14) | (mask_m[9] << 12)
2279 | (mask_m[10] << 10) | (mask_m[11] << 8)
2280 | (mask_m[12] << 6) | (mask_m[13] << 4)
2281 | (mask_m[14] << 2) | (mask_m[15] << 0);
2282 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
2283 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
2284
2285 tmp_mask = (mask_p[15] << 28)
2286 | (mask_p[14] << 26) | (mask_p[13] << 24)
2287 | (mask_p[12] << 22) | (mask_p[11] << 20)
2288 | (mask_p[10] << 18) | (mask_p[9] << 16)
2289 | (mask_p[8] << 14) | (mask_p[7] << 12)
2290 | (mask_p[6] << 10) | (mask_p[5] << 8)
2291 | (mask_p[4] << 6) | (mask_p[3] << 4)
2292 | (mask_p[2] << 2) | (mask_p[1] << 0);
2293 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
2294 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
2295
2296 tmp_mask = (mask_p[30] << 28)
2297 | (mask_p[29] << 26) | (mask_p[28] << 24)
2298 | (mask_p[27] << 22) | (mask_p[26] << 20)
2299 | (mask_p[25] << 18) | (mask_p[24] << 16)
2300 | (mask_p[23] << 14) | (mask_p[22] << 12)
2301 | (mask_p[21] << 10) | (mask_p[20] << 8)
2302 | (mask_p[19] << 6) | (mask_p[18] << 4)
2303 | (mask_p[17] << 2) | (mask_p[16] << 0);
2304 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
2305 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
2306
2307 tmp_mask = (mask_p[45] << 28)
2308 | (mask_p[44] << 26) | (mask_p[43] << 24)
2309 | (mask_p[42] << 22) | (mask_p[41] << 20)
2310 | (mask_p[40] << 18) | (mask_p[39] << 16)
2311 | (mask_p[38] << 14) | (mask_p[37] << 12)
2312 | (mask_p[36] << 10) | (mask_p[35] << 8)
2313 | (mask_p[34] << 6) | (mask_p[33] << 4)
2314 | (mask_p[32] << 2) | (mask_p[31] << 0);
2315 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
2316 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
2317
2318 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
2319 | (mask_p[59] << 26) | (mask_p[58] << 24)
2320 | (mask_p[57] << 22) | (mask_p[56] << 20)
2321 | (mask_p[55] << 18) | (mask_p[54] << 16)
2322 | (mask_p[53] << 14) | (mask_p[52] << 12)
2323 | (mask_p[51] << 10) | (mask_p[50] << 8)
2324 | (mask_p[49] << 6) | (mask_p[48] << 4)
2325 | (mask_p[47] << 2) | (mask_p[46] << 0);
2326 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
2327 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2328}
2329
2330static void ath9k_enable_rfkill(struct ath_hw *ah) 1926static void ath9k_enable_rfkill(struct ath_hw *ah)
2331{ 1927{
2332 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, 1928 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
@@ -2342,17 +1938,16 @@ static void ath9k_enable_rfkill(struct ath_hw *ah)
2342int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1938int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2343 bool bChannelChange) 1939 bool bChannelChange)
2344{ 1940{
1941 struct ath_common *common = ath9k_hw_common(ah);
2345 u32 saveLedState; 1942 u32 saveLedState;
2346 struct ath_softc *sc = ah->ah_sc;
2347 struct ath9k_channel *curchan = ah->curchan; 1943 struct ath9k_channel *curchan = ah->curchan;
2348 u32 saveDefAntenna; 1944 u32 saveDefAntenna;
2349 u32 macStaId1; 1945 u32 macStaId1;
2350 u64 tsf = 0; 1946 u64 tsf = 0;
2351 int i, rx_chainmask, r; 1947 int i, rx_chainmask, r;
2352 1948
2353 ah->extprotspacing = sc->ht_extprotspacing; 1949 ah->txchainmask = common->tx_chainmask;
2354 ah->txchainmask = sc->tx_chainmask; 1950 ah->rxchainmask = common->rx_chainmask;
2355 ah->rxchainmask = sc->rx_chainmask;
2356 1951
2357 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1952 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
2358 return -EIO; 1953 return -EIO;
@@ -2369,7 +1964,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2369 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) || 1964 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
2370 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) { 1965 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
2371 1966
2372 if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) { 1967 if (ath9k_hw_channel_change(ah, chan)) {
2373 ath9k_hw_loadnf(ah, ah->curchan); 1968 ath9k_hw_loadnf(ah, ah->curchan);
2374 ath9k_hw_start_nfcal(ah); 1969 ath9k_hw_start_nfcal(ah);
2375 return 0; 1970 return 0;
@@ -2400,7 +1995,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2400 } 1995 }
2401 1996
2402 if (!ath9k_hw_chip_reset(ah, chan)) { 1997 if (!ath9k_hw_chip_reset(ah, chan)) {
2403 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Chip reset failed\n"); 1998 ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n");
2404 return -EINVAL; 1999 return -EINVAL;
2405 } 2000 }
2406 2001
@@ -2429,7 +2024,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2429 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, 2024 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
2430 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); 2025 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
2431 } 2026 }
2432 r = ath9k_hw_process_ini(ah, chan, sc->tx_chan_width); 2027 r = ath9k_hw_process_ini(ah, chan);
2433 if (r) 2028 if (r)
2434 return r; 2029 return r;
2435 2030
@@ -2453,17 +2048,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2453 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 2048 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
2454 ath9k_hw_set_delta_slope(ah, chan); 2049 ath9k_hw_set_delta_slope(ah, chan);
2455 2050
2456 if (AR_SREV_9280_10_OR_LATER(ah)) 2051 ah->ath9k_hw_spur_mitigate_freq(ah, chan);
2457 ath9k_hw_9280_spur_mitigate(ah, chan);
2458 else
2459 ath9k_hw_spur_mitigate(ah, chan);
2460
2461 ah->eep_ops->set_board_values(ah, chan); 2052 ah->eep_ops->set_board_values(ah, chan);
2462 2053
2463 ath9k_hw_decrease_chain_power(ah, chan); 2054 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
2464 2055 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
2465 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ah->macaddr));
2466 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ah->macaddr + 4)
2467 | macStaId1 2056 | macStaId1
2468 | AR_STA_ID1_RTS_USE_DEF 2057 | AR_STA_ID1_RTS_USE_DEF
2469 | (ah->config. 2058 | (ah->config.
@@ -2471,24 +2060,19 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2471 | ah->sta_id1_defaults); 2060 | ah->sta_id1_defaults);
2472 ath9k_hw_set_operating_mode(ah, ah->opmode); 2061 ath9k_hw_set_operating_mode(ah, ah->opmode);
2473 2062
2474 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask)); 2063 ath_hw_setbssidmask(common);
2475 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
2476 2064
2477 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); 2065 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
2478 2066
2479 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid)); 2067 ath9k_hw_write_associd(ah);
2480 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
2481 ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
2482 2068
2483 REG_WRITE(ah, AR_ISR, ~0); 2069 REG_WRITE(ah, AR_ISR, ~0);
2484 2070
2485 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 2071 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
2486 2072
2487 if (AR_SREV_9280_10_OR_LATER(ah)) 2073 r = ah->ath9k_hw_rf_set_freq(ah, chan);
2488 ath9k_hw_ar9280_set_channel(ah, chan); 2074 if (r)
2489 else 2075 return r;
2490 if (!(ath9k_hw_set_channel(ah, chan)))
2491 return -EIO;
2492 2076
2493 for (i = 0; i < AR_NUM_DCU; i++) 2077 for (i = 0; i < AR_NUM_DCU; i++)
2494 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 2078 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
@@ -2558,13 +2142,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2558 u32 mask; 2142 u32 mask;
2559 mask = REG_READ(ah, AR_CFG); 2143 mask = REG_READ(ah, AR_CFG);
2560 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { 2144 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
2561 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2145 ath_print(common, ATH_DBG_RESET,
2562 "CFG Byte Swap Set 0x%x\n", mask); 2146 "CFG Byte Swap Set 0x%x\n", mask);
2563 } else { 2147 } else {
2564 mask = 2148 mask =
2565 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; 2149 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
2566 REG_WRITE(ah, AR_CFG, mask); 2150 REG_WRITE(ah, AR_CFG, mask);
2567 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2151 ath_print(common, ATH_DBG_RESET,
2568 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); 2152 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
2569 } 2153 }
2570 } else { 2154 } else {
@@ -2577,11 +2161,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2577#endif 2161#endif
2578 } 2162 }
2579 2163
2580 if (ah->ah_sc->sc_flags & SC_OP_BTCOEX_ENABLED) 2164 if (ah->btcoex_hw.enabled)
2581 ath9k_hw_btcoex_enable(ah); 2165 ath9k_hw_btcoex_enable(ah);
2582 2166
2583 return 0; 2167 return 0;
2584} 2168}
2169EXPORT_SYMBOL(ath9k_hw_reset);
2585 2170
2586/************************/ 2171/************************/
2587/* Key Cache Management */ 2172/* Key Cache Management */
@@ -2592,8 +2177,8 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
2592 u32 keyType; 2177 u32 keyType;
2593 2178
2594 if (entry >= ah->caps.keycache_size) { 2179 if (entry >= ah->caps.keycache_size) {
2595 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2180 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2596 "keychache entry %u out of range\n", entry); 2181 "keychache entry %u out of range\n", entry);
2597 return false; 2182 return false;
2598 } 2183 }
2599 2184
@@ -2620,14 +2205,15 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
2620 2205
2621 return true; 2206 return true;
2622} 2207}
2208EXPORT_SYMBOL(ath9k_hw_keyreset);
2623 2209
2624bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) 2210bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
2625{ 2211{
2626 u32 macHi, macLo; 2212 u32 macHi, macLo;
2627 2213
2628 if (entry >= ah->caps.keycache_size) { 2214 if (entry >= ah->caps.keycache_size) {
2629 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2215 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2630 "keychache entry %u out of range\n", entry); 2216 "keychache entry %u out of range\n", entry);
2631 return false; 2217 return false;
2632 } 2218 }
2633 2219
@@ -2648,18 +2234,20 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
2648 2234
2649 return true; 2235 return true;
2650} 2236}
2237EXPORT_SYMBOL(ath9k_hw_keysetmac);
2651 2238
2652bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, 2239bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2653 const struct ath9k_keyval *k, 2240 const struct ath9k_keyval *k,
2654 const u8 *mac) 2241 const u8 *mac)
2655{ 2242{
2656 const struct ath9k_hw_capabilities *pCap = &ah->caps; 2243 const struct ath9k_hw_capabilities *pCap = &ah->caps;
2244 struct ath_common *common = ath9k_hw_common(ah);
2657 u32 key0, key1, key2, key3, key4; 2245 u32 key0, key1, key2, key3, key4;
2658 u32 keyType; 2246 u32 keyType;
2659 2247
2660 if (entry >= pCap->keycache_size) { 2248 if (entry >= pCap->keycache_size) {
2661 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2249 ath_print(common, ATH_DBG_FATAL,
2662 "keycache entry %u out of range\n", entry); 2250 "keycache entry %u out of range\n", entry);
2663 return false; 2251 return false;
2664 } 2252 }
2665 2253
@@ -2669,9 +2257,9 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2669 break; 2257 break;
2670 case ATH9K_CIPHER_AES_CCM: 2258 case ATH9K_CIPHER_AES_CCM:
2671 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) { 2259 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
2672 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2260 ath_print(common, ATH_DBG_ANY,
2673 "AES-CCM not supported by mac rev 0x%x\n", 2261 "AES-CCM not supported by mac rev 0x%x\n",
2674 ah->hw_version.macRev); 2262 ah->hw_version.macRev);
2675 return false; 2263 return false;
2676 } 2264 }
2677 keyType = AR_KEYTABLE_TYPE_CCM; 2265 keyType = AR_KEYTABLE_TYPE_CCM;
@@ -2680,15 +2268,15 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2680 keyType = AR_KEYTABLE_TYPE_TKIP; 2268 keyType = AR_KEYTABLE_TYPE_TKIP;
2681 if (ATH9K_IS_MIC_ENABLED(ah) 2269 if (ATH9K_IS_MIC_ENABLED(ah)
2682 && entry + 64 >= pCap->keycache_size) { 2270 && entry + 64 >= pCap->keycache_size) {
2683 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2271 ath_print(common, ATH_DBG_ANY,
2684 "entry %u inappropriate for TKIP\n", entry); 2272 "entry %u inappropriate for TKIP\n", entry);
2685 return false; 2273 return false;
2686 } 2274 }
2687 break; 2275 break;
2688 case ATH9K_CIPHER_WEP: 2276 case ATH9K_CIPHER_WEP:
2689 if (k->kv_len < WLAN_KEY_LEN_WEP40) { 2277 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
2690 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2278 ath_print(common, ATH_DBG_ANY,
2691 "WEP key length %u too small\n", k->kv_len); 2279 "WEP key length %u too small\n", k->kv_len);
2692 return false; 2280 return false;
2693 } 2281 }
2694 if (k->kv_len <= WLAN_KEY_LEN_WEP40) 2282 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
@@ -2702,8 +2290,8 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2702 keyType = AR_KEYTABLE_TYPE_CLR; 2290 keyType = AR_KEYTABLE_TYPE_CLR;
2703 break; 2291 break;
2704 default: 2292 default:
2705 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2293 ath_print(common, ATH_DBG_FATAL,
2706 "cipher %u not supported\n", k->kv_type); 2294 "cipher %u not supported\n", k->kv_type);
2707 return false; 2295 return false;
2708 } 2296 }
2709 2297
@@ -2845,6 +2433,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2845 2433
2846 return true; 2434 return true;
2847} 2435}
2436EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
2848 2437
2849bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry) 2438bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
2850{ 2439{
@@ -2855,6 +2444,7 @@ bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
2855 } 2444 }
2856 return false; 2445 return false;
2857} 2446}
2447EXPORT_SYMBOL(ath9k_hw_keyisvalid);
2858 2448
2859/******************************/ 2449/******************************/
2860/* Power Management (Chipset) */ 2450/* Power Management (Chipset) */
@@ -2869,8 +2459,9 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2869 if (!AR_SREV_9100(ah)) 2459 if (!AR_SREV_9100(ah))
2870 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2460 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2871 2461
2872 REG_CLR_BIT(ah, (AR_RTC_RESET), 2462 if(!AR_SREV_5416(ah))
2873 AR_RTC_RESET_EN); 2463 REG_CLR_BIT(ah, (AR_RTC_RESET),
2464 AR_RTC_RESET_EN);
2874 } 2465 }
2875} 2466}
2876 2467
@@ -2902,6 +2493,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2902 ATH9K_RESET_POWER_ON) != true) { 2493 ATH9K_RESET_POWER_ON) != true) {
2903 return false; 2494 return false;
2904 } 2495 }
2496 ath9k_hw_init_pll(ah, NULL);
2905 } 2497 }
2906 if (AR_SREV_9100(ah)) 2498 if (AR_SREV_9100(ah))
2907 REG_SET_BIT(ah, AR_RTC_RESET, 2499 REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2920,8 +2512,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2920 AR_RTC_FORCE_WAKE_EN); 2512 AR_RTC_FORCE_WAKE_EN);
2921 } 2513 }
2922 if (i == 0) { 2514 if (i == 0) {
2923 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2515 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2924 "Failed to wakeup in %uus\n", POWER_UP_TIME / 20); 2516 "Failed to wakeup in %uus\n",
2517 POWER_UP_TIME / 20);
2925 return false; 2518 return false;
2926 } 2519 }
2927 } 2520 }
@@ -2931,9 +2524,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2931 return true; 2524 return true;
2932} 2525}
2933 2526
2934static bool ath9k_hw_setpower_nolock(struct ath_hw *ah, 2527bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2935 enum ath9k_power_mode mode)
2936{ 2528{
2529 struct ath_common *common = ath9k_hw_common(ah);
2937 int status = true, setChip = true; 2530 int status = true, setChip = true;
2938 static const char *modes[] = { 2531 static const char *modes[] = {
2939 "AWAKE", 2532 "AWAKE",
@@ -2945,8 +2538,8 @@ static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
2945 if (ah->power_mode == mode) 2538 if (ah->power_mode == mode)
2946 return status; 2539 return status;
2947 2540
2948 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s -> %s\n", 2541 ath_print(common, ATH_DBG_RESET, "%s -> %s\n",
2949 modes[ah->power_mode], modes[mode]); 2542 modes[ah->power_mode], modes[mode]);
2950 2543
2951 switch (mode) { 2544 switch (mode) {
2952 case ATH9K_PM_AWAKE: 2545 case ATH9K_PM_AWAKE:
@@ -2960,59 +2553,15 @@ static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
2960 ath9k_set_power_network_sleep(ah, setChip); 2553 ath9k_set_power_network_sleep(ah, setChip);
2961 break; 2554 break;
2962 default: 2555 default:
2963 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2556 ath_print(common, ATH_DBG_FATAL,
2964 "Unknown power mode %u\n", mode); 2557 "Unknown power mode %u\n", mode);
2965 return false; 2558 return false;
2966 } 2559 }
2967 ah->power_mode = mode; 2560 ah->power_mode = mode;
2968 2561
2969 return status; 2562 return status;
2970} 2563}
2971 2564EXPORT_SYMBOL(ath9k_hw_setpower);
2972bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2973{
2974 unsigned long flags;
2975 bool ret;
2976
2977 spin_lock_irqsave(&ah->ah_sc->sc_pm_lock, flags);
2978 ret = ath9k_hw_setpower_nolock(ah, mode);
2979 spin_unlock_irqrestore(&ah->ah_sc->sc_pm_lock, flags);
2980
2981 return ret;
2982}
2983
2984void ath9k_ps_wakeup(struct ath_softc *sc)
2985{
2986 unsigned long flags;
2987
2988 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2989 if (++sc->ps_usecount != 1)
2990 goto unlock;
2991
2992 ath9k_hw_setpower_nolock(sc->sc_ah, ATH9K_PM_AWAKE);
2993
2994 unlock:
2995 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2996}
2997
2998void ath9k_ps_restore(struct ath_softc *sc)
2999{
3000 unsigned long flags;
3001
3002 spin_lock_irqsave(&sc->sc_pm_lock, flags);
3003 if (--sc->ps_usecount != 0)
3004 goto unlock;
3005
3006 if (sc->ps_enabled &&
3007 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
3008 SC_OP_WAIT_FOR_CAB |
3009 SC_OP_WAIT_FOR_PSPOLL_DATA |
3010 SC_OP_WAIT_FOR_TX_ACK)))
3011 ath9k_hw_setpower_nolock(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
3012
3013 unlock:
3014 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
3015}
3016 2565
3017/* 2566/*
3018 * Helper for ASPM support. 2567 * Helper for ASPM support.
@@ -3145,6 +2694,7 @@ void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
3145 } 2694 }
3146 } 2695 }
3147} 2696}
2697EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
3148 2698
3149/**********************/ 2699/**********************/
3150/* Interrupt Handling */ 2700/* Interrupt Handling */
@@ -3168,6 +2718,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
3168 2718
3169 return false; 2719 return false;
3170} 2720}
2721EXPORT_SYMBOL(ath9k_hw_intrpend);
3171 2722
3172bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked) 2723bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3173{ 2724{
@@ -3176,6 +2727,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3176 struct ath9k_hw_capabilities *pCap = &ah->caps; 2727 struct ath9k_hw_capabilities *pCap = &ah->caps;
3177 u32 sync_cause = 0; 2728 u32 sync_cause = 0;
3178 bool fatal_int = false; 2729 bool fatal_int = false;
2730 struct ath_common *common = ath9k_hw_common(ah);
3179 2731
3180 if (!AR_SREV_9100(ah)) { 2732 if (!AR_SREV_9100(ah)) {
3181 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { 2733 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
@@ -3249,8 +2801,8 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3249 } 2801 }
3250 2802
3251 if (isr & AR_ISR_RXORN) { 2803 if (isr & AR_ISR_RXORN) {
3252 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2804 ath_print(common, ATH_DBG_INTERRUPT,
3253 "receive FIFO overrun interrupt\n"); 2805 "receive FIFO overrun interrupt\n");
3254 } 2806 }
3255 2807
3256 if (!AR_SREV_9100(ah)) { 2808 if (!AR_SREV_9100(ah)) {
@@ -3292,25 +2844,25 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3292 2844
3293 if (fatal_int) { 2845 if (fatal_int) {
3294 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { 2846 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
3295 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2847 ath_print(common, ATH_DBG_ANY,
3296 "received PCI FATAL interrupt\n"); 2848 "received PCI FATAL interrupt\n");
3297 } 2849 }
3298 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { 2850 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
3299 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2851 ath_print(common, ATH_DBG_ANY,
3300 "received PCI PERR interrupt\n"); 2852 "received PCI PERR interrupt\n");
3301 } 2853 }
3302 *masked |= ATH9K_INT_FATAL; 2854 *masked |= ATH9K_INT_FATAL;
3303 } 2855 }
3304 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 2856 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
3305 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2857 ath_print(common, ATH_DBG_INTERRUPT,
3306 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); 2858 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
3307 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 2859 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
3308 REG_WRITE(ah, AR_RC, 0); 2860 REG_WRITE(ah, AR_RC, 0);
3309 *masked |= ATH9K_INT_FATAL; 2861 *masked |= ATH9K_INT_FATAL;
3310 } 2862 }
3311 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { 2863 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
3312 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2864 ath_print(common, ATH_DBG_INTERRUPT,
3313 "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); 2865 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
3314 } 2866 }
3315 2867
3316 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 2868 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -3319,17 +2871,19 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3319 2871
3320 return true; 2872 return true;
3321} 2873}
2874EXPORT_SYMBOL(ath9k_hw_getisr);
3322 2875
3323enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) 2876enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3324{ 2877{
3325 u32 omask = ah->mask_reg; 2878 u32 omask = ah->mask_reg;
3326 u32 mask, mask2; 2879 u32 mask, mask2;
3327 struct ath9k_hw_capabilities *pCap = &ah->caps; 2880 struct ath9k_hw_capabilities *pCap = &ah->caps;
2881 struct ath_common *common = ath9k_hw_common(ah);
3328 2882
3329 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 2883 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
3330 2884
3331 if (omask & ATH9K_INT_GLOBAL) { 2885 if (omask & ATH9K_INT_GLOBAL) {
3332 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "disable IER\n"); 2886 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
3333 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 2887 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
3334 (void) REG_READ(ah, AR_IER); 2888 (void) REG_READ(ah, AR_IER);
3335 if (!AR_SREV_9100(ah)) { 2889 if (!AR_SREV_9100(ah)) {
@@ -3386,7 +2940,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3386 mask2 |= AR_IMR_S2_CST; 2940 mask2 |= AR_IMR_S2_CST;
3387 } 2941 }
3388 2942
3389 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 2943 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
3390 REG_WRITE(ah, AR_IMR, mask); 2944 REG_WRITE(ah, AR_IMR, mask);
3391 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | 2945 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
3392 AR_IMR_S2_DTIM | 2946 AR_IMR_S2_DTIM |
@@ -3406,7 +2960,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3406 } 2960 }
3407 2961
3408 if (ints & ATH9K_INT_GLOBAL) { 2962 if (ints & ATH9K_INT_GLOBAL) {
3409 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "enable IER\n"); 2963 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
3410 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 2964 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
3411 if (!AR_SREV_9100(ah)) { 2965 if (!AR_SREV_9100(ah)) {
3412 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 2966 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
@@ -3419,12 +2973,13 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3419 REG_WRITE(ah, AR_INTR_SYNC_MASK, 2973 REG_WRITE(ah, AR_INTR_SYNC_MASK,
3420 AR_INTR_SYNC_DEFAULT); 2974 AR_INTR_SYNC_DEFAULT);
3421 } 2975 }
3422 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 2976 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
3423 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 2977 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
3424 } 2978 }
3425 2979
3426 return omask; 2980 return omask;
3427} 2981}
2982EXPORT_SYMBOL(ath9k_hw_set_interrupts);
3428 2983
3429/*******************/ 2984/*******************/
3430/* Beacon Handling */ 2985/* Beacon Handling */
@@ -3467,9 +3022,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3467 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 3022 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
3468 break; 3023 break;
3469 default: 3024 default:
3470 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, 3025 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
3471 "%s: unsupported opmode: %d\n", 3026 "%s: unsupported opmode: %d\n",
3472 __func__, ah->opmode); 3027 __func__, ah->opmode);
3473 return; 3028 return;
3474 break; 3029 break;
3475 } 3030 }
@@ -3481,18 +3036,19 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3481 3036
3482 beacon_period &= ~ATH9K_BEACON_ENA; 3037 beacon_period &= ~ATH9K_BEACON_ENA;
3483 if (beacon_period & ATH9K_BEACON_RESET_TSF) { 3038 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
3484 beacon_period &= ~ATH9K_BEACON_RESET_TSF;
3485 ath9k_hw_reset_tsf(ah); 3039 ath9k_hw_reset_tsf(ah);
3486 } 3040 }
3487 3041
3488 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 3042 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
3489} 3043}
3044EXPORT_SYMBOL(ath9k_hw_beaconinit);
3490 3045
3491void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 3046void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3492 const struct ath9k_beacon_state *bs) 3047 const struct ath9k_beacon_state *bs)
3493{ 3048{
3494 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; 3049 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
3495 struct ath9k_hw_capabilities *pCap = &ah->caps; 3050 struct ath9k_hw_capabilities *pCap = &ah->caps;
3051 struct ath_common *common = ath9k_hw_common(ah);
3496 3052
3497 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); 3053 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
3498 3054
@@ -3518,10 +3074,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3518 else 3074 else
3519 nextTbtt = bs->bs_nexttbtt; 3075 nextTbtt = bs->bs_nexttbtt;
3520 3076
3521 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); 3077 ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
3522 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); 3078 ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
3523 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); 3079 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
3524 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); 3080 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
3525 3081
3526 REG_WRITE(ah, AR_NEXT_DTIM, 3082 REG_WRITE(ah, AR_NEXT_DTIM,
3527 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); 3083 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
@@ -3549,6 +3105,7 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3549 /* TSF Out of Range Threshold */ 3105 /* TSF Out of Range Threshold */
3550 REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); 3106 REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold);
3551} 3107}
3108EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
3552 3109
3553/*******************/ 3110/*******************/
3554/* HW Capabilities */ 3111/* HW Capabilities */
@@ -3558,7 +3115,8 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3558{ 3115{
3559 struct ath9k_hw_capabilities *pCap = &ah->caps; 3116 struct ath9k_hw_capabilities *pCap = &ah->caps;
3560 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 3117 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
3561 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 3118 struct ath_common *common = ath9k_hw_common(ah);
3119 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
3562 3120
3563 u16 capField = 0, eeval; 3121 u16 capField = 0, eeval;
3564 3122
@@ -3579,8 +3137,8 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3579 regulatory->current_rd += 5; 3137 regulatory->current_rd += 5;
3580 else if (regulatory->current_rd == 0x41) 3138 else if (regulatory->current_rd == 0x41)
3581 regulatory->current_rd = 0x43; 3139 regulatory->current_rd = 0x43;
3582 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 3140 ath_print(common, ATH_DBG_REGULATORY,
3583 "regdomain mapped to 0x%x\n", regulatory->current_rd); 3141 "regdomain mapped to 0x%x\n", regulatory->current_rd);
3584 } 3142 }
3585 3143
3586 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); 3144 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
@@ -3719,7 +3277,10 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3719 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN; 3277 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
3720 } 3278 }
3721 3279
3722 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; 3280 /* Advertise midband for AR5416 with FCC midband set in eeprom */
3281 if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
3282 AR_SREV_5416(ah))
3283 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3723 3284
3724 pCap->num_antcfg_5ghz = 3285 pCap->num_antcfg_5ghz =
3725 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ); 3286 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
@@ -3727,18 +3288,18 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3727 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ); 3288 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
3728 3289
3729 if (AR_SREV_9280_10_OR_LATER(ah) && 3290 if (AR_SREV_9280_10_OR_LATER(ah) &&
3730 ath_btcoex_supported(ah->hw_version.subsysid)) { 3291 ath9k_hw_btcoex_supported(ah)) {
3731 btcoex_info->btactive_gpio = ATH_BTACTIVE_GPIO; 3292 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
3732 btcoex_info->wlanactive_gpio = ATH_WLANACTIVE_GPIO; 3293 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
3733 3294
3734 if (AR_SREV_9285(ah)) { 3295 if (AR_SREV_9285(ah)) {
3735 btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_3WIRE; 3296 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
3736 btcoex_info->btpriority_gpio = ATH_BTPRIORITY_GPIO; 3297 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO;
3737 } else { 3298 } else {
3738 btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_2WIRE; 3299 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
3739 } 3300 }
3740 } else { 3301 } else {
3741 btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_NONE; 3302 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
3742 } 3303 }
3743} 3304}
3744 3305
@@ -3812,6 +3373,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3812 return false; 3373 return false;
3813 } 3374 }
3814} 3375}
3376EXPORT_SYMBOL(ath9k_hw_getcapability);
3815 3377
3816bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 3378bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3817 u32 capability, u32 setting, int *status) 3379 u32 capability, u32 setting, int *status)
@@ -3845,6 +3407,7 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3845 return false; 3407 return false;
3846 } 3408 }
3847} 3409}
3410EXPORT_SYMBOL(ath9k_hw_setcapability);
3848 3411
3849/****************************/ 3412/****************************/
3850/* GPIO / RFKILL / Antennae */ 3413/* GPIO / RFKILL / Antennae */
@@ -3882,7 +3445,7 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
3882{ 3445{
3883 u32 gpio_shift; 3446 u32 gpio_shift;
3884 3447
3885 ASSERT(gpio < ah->caps.num_gpio_pins); 3448 BUG_ON(gpio >= ah->caps.num_gpio_pins);
3886 3449
3887 gpio_shift = gpio << 1; 3450 gpio_shift = gpio << 1;
3888 3451
@@ -3891,6 +3454,7 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
3891 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), 3454 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
3892 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 3455 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3893} 3456}
3457EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
3894 3458
3895u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) 3459u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3896{ 3460{
@@ -3909,6 +3473,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3909 else 3473 else
3910 return MS_REG_READ(AR, gpio) != 0; 3474 return MS_REG_READ(AR, gpio) != 0;
3911} 3475}
3476EXPORT_SYMBOL(ath9k_hw_gpio_get);
3912 3477
3913void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, 3478void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
3914 u32 ah_signal_type) 3479 u32 ah_signal_type)
@@ -3924,67 +3489,26 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
3924 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), 3489 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
3925 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 3490 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3926} 3491}
3492EXPORT_SYMBOL(ath9k_hw_cfg_output);
3927 3493
3928void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 3494void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3929{ 3495{
3930 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 3496 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
3931 AR_GPIO_BIT(gpio)); 3497 AR_GPIO_BIT(gpio));
3932} 3498}
3499EXPORT_SYMBOL(ath9k_hw_set_gpio);
3933 3500
3934u32 ath9k_hw_getdefantenna(struct ath_hw *ah) 3501u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
3935{ 3502{
3936 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; 3503 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
3937} 3504}
3505EXPORT_SYMBOL(ath9k_hw_getdefantenna);
3938 3506
3939void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) 3507void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
3940{ 3508{
3941 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); 3509 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
3942} 3510}
3943 3511EXPORT_SYMBOL(ath9k_hw_setantenna);
3944bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
3945 enum ath9k_ant_setting settings,
3946 struct ath9k_channel *chan,
3947 u8 *tx_chainmask,
3948 u8 *rx_chainmask,
3949 u8 *antenna_cfgd)
3950{
3951 static u8 tx_chainmask_cfg, rx_chainmask_cfg;
3952
3953 if (AR_SREV_9280(ah)) {
3954 if (!tx_chainmask_cfg) {
3955
3956 tx_chainmask_cfg = *tx_chainmask;
3957 rx_chainmask_cfg = *rx_chainmask;
3958 }
3959
3960 switch (settings) {
3961 case ATH9K_ANT_FIXED_A:
3962 *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
3963 *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK;
3964 *antenna_cfgd = true;
3965 break;
3966 case ATH9K_ANT_FIXED_B:
3967 if (ah->caps.tx_chainmask >
3968 ATH9K_ANTENNA1_CHAINMASK) {
3969 *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
3970 }
3971 *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK;
3972 *antenna_cfgd = true;
3973 break;
3974 case ATH9K_ANT_VARIABLE:
3975 *tx_chainmask = tx_chainmask_cfg;
3976 *rx_chainmask = rx_chainmask_cfg;
3977 *antenna_cfgd = true;
3978 break;
3979 default:
3980 break;
3981 }
3982 } else {
3983 ah->config.diversity_control = settings;
3984 }
3985
3986 return true;
3987}
3988 3512
3989/*********************/ 3513/*********************/
3990/* General Operation */ 3514/* General Operation */
@@ -4002,6 +3526,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
4002 3526
4003 return bits; 3527 return bits;
4004} 3528}
3529EXPORT_SYMBOL(ath9k_hw_getrxfilter);
4005 3530
4006void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) 3531void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
4007{ 3532{
@@ -4023,19 +3548,30 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
4023 REG_WRITE(ah, AR_RXCFG, 3548 REG_WRITE(ah, AR_RXCFG,
4024 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); 3549 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
4025} 3550}
3551EXPORT_SYMBOL(ath9k_hw_setrxfilter);
4026 3552
4027bool ath9k_hw_phy_disable(struct ath_hw *ah) 3553bool ath9k_hw_phy_disable(struct ath_hw *ah)
4028{ 3554{
4029 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM); 3555 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
3556 return false;
3557
3558 ath9k_hw_init_pll(ah, NULL);
3559 return true;
4030} 3560}
3561EXPORT_SYMBOL(ath9k_hw_phy_disable);
4031 3562
4032bool ath9k_hw_disable(struct ath_hw *ah) 3563bool ath9k_hw_disable(struct ath_hw *ah)
4033{ 3564{
4034 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 3565 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
4035 return false; 3566 return false;
4036 3567
4037 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD); 3568 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD))
3569 return false;
3570
3571 ath9k_hw_init_pll(ah, NULL);
3572 return true;
4038} 3573}
3574EXPORT_SYMBOL(ath9k_hw_disable);
4039 3575
4040void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) 3576void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
4041{ 3577{
@@ -4052,35 +3588,36 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
4052 min((u32) MAX_RATE_POWER, 3588 min((u32) MAX_RATE_POWER,
4053 (u32) regulatory->power_limit)); 3589 (u32) regulatory->power_limit));
4054} 3590}
3591EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
4055 3592
4056void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac) 3593void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
4057{ 3594{
4058 memcpy(ah->macaddr, mac, ETH_ALEN); 3595 memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
4059} 3596}
3597EXPORT_SYMBOL(ath9k_hw_setmac);
4060 3598
4061void ath9k_hw_setopmode(struct ath_hw *ah) 3599void ath9k_hw_setopmode(struct ath_hw *ah)
4062{ 3600{
4063 ath9k_hw_set_operating_mode(ah, ah->opmode); 3601 ath9k_hw_set_operating_mode(ah, ah->opmode);
4064} 3602}
3603EXPORT_SYMBOL(ath9k_hw_setopmode);
4065 3604
4066void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) 3605void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
4067{ 3606{
4068 REG_WRITE(ah, AR_MCAST_FIL0, filter0); 3607 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
4069 REG_WRITE(ah, AR_MCAST_FIL1, filter1); 3608 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
4070} 3609}
3610EXPORT_SYMBOL(ath9k_hw_setmcastfilter);
4071 3611
4072void ath9k_hw_setbssidmask(struct ath_softc *sc) 3612void ath9k_hw_write_associd(struct ath_hw *ah)
4073{ 3613{
4074 REG_WRITE(sc->sc_ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask)); 3614 struct ath_common *common = ath9k_hw_common(ah);
4075 REG_WRITE(sc->sc_ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
4076}
4077 3615
4078void ath9k_hw_write_associd(struct ath_softc *sc) 3616 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid));
4079{ 3617 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) |
4080 REG_WRITE(sc->sc_ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid)); 3618 ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
4081 REG_WRITE(sc->sc_ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
4082 ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
4083} 3619}
3620EXPORT_SYMBOL(ath9k_hw_write_associd);
4084 3621
4085u64 ath9k_hw_gettsf64(struct ath_hw *ah) 3622u64 ath9k_hw_gettsf64(struct ath_hw *ah)
4086{ 3623{
@@ -4091,24 +3628,25 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah)
4091 3628
4092 return tsf; 3629 return tsf;
4093} 3630}
3631EXPORT_SYMBOL(ath9k_hw_gettsf64);
4094 3632
4095void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) 3633void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
4096{ 3634{
4097 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); 3635 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
4098 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); 3636 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
4099} 3637}
3638EXPORT_SYMBOL(ath9k_hw_settsf64);
4100 3639
4101void ath9k_hw_reset_tsf(struct ath_hw *ah) 3640void ath9k_hw_reset_tsf(struct ath_hw *ah)
4102{ 3641{
4103 ath9k_ps_wakeup(ah->ah_sc);
4104 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, 3642 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
4105 AH_TSF_WRITE_TIMEOUT)) 3643 AH_TSF_WRITE_TIMEOUT))
4106 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 3644 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
4107 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); 3645 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
4108 3646
4109 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); 3647 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
4110 ath9k_ps_restore(ah->ah_sc);
4111} 3648}
3649EXPORT_SYMBOL(ath9k_hw_reset_tsf);
4112 3650
4113void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) 3651void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
4114{ 3652{
@@ -4117,11 +3655,28 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
4117 else 3655 else
4118 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; 3656 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
4119} 3657}
3658EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
3659
3660/*
3661 * Extend 15-bit time stamp from rx descriptor to
3662 * a full 64-bit TSF using the current h/w TSF.
3663*/
3664u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
3665{
3666 u64 tsf;
3667
3668 tsf = ath9k_hw_gettsf64(ah);
3669 if ((tsf & 0x7fff) < rstamp)
3670 tsf -= 0x8000;
3671 return (tsf & ~0x7fff) | rstamp;
3672}
3673EXPORT_SYMBOL(ath9k_hw_extend_tsf);
4120 3674
4121bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us) 3675bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
4122{ 3676{
4123 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) { 3677 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
4124 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad slot time %u\n", us); 3678 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
3679 "bad slot time %u\n", us);
4125 ah->slottime = (u32) -1; 3680 ah->slottime = (u32) -1;
4126 return false; 3681 return false;
4127 } else { 3682 } else {
@@ -4130,13 +3685,14 @@ bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
4130 return true; 3685 return true;
4131 } 3686 }
4132} 3687}
3688EXPORT_SYMBOL(ath9k_hw_setslottime);
4133 3689
4134void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode) 3690void ath9k_hw_set11nmac2040(struct ath_hw *ah)
4135{ 3691{
3692 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
4136 u32 macmode; 3693 u32 macmode;
4137 3694
4138 if (mode == ATH9K_HT_MACMODE_2040 && 3695 if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
4139 !ah->config.cwm_ignore_extcca)
4140 macmode = AR_2040_JOINED_RX_CLEAR; 3696 macmode = AR_2040_JOINED_RX_CLEAR;
4141 else 3697 else
4142 macmode = 0; 3698 macmode = 0;
@@ -4193,6 +3749,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah)
4193{ 3749{
4194 return REG_READ(ah, AR_TSF_L32); 3750 return REG_READ(ah, AR_TSF_L32);
4195} 3751}
3752EXPORT_SYMBOL(ath9k_hw_gettsf32);
4196 3753
4197struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 3754struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
4198 void (*trigger)(void *), 3755 void (*trigger)(void *),
@@ -4206,8 +3763,9 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
4206 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); 3763 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
4207 3764
4208 if (timer == NULL) { 3765 if (timer == NULL) {
4209 printk(KERN_DEBUG "Failed to allocate memory" 3766 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
4210 "for hw timer[%d]\n", timer_index); 3767 "Failed to allocate memory"
3768 "for hw timer[%d]\n", timer_index);
4211 return NULL; 3769 return NULL;
4212 } 3770 }
4213 3771
@@ -4220,10 +3778,12 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
4220 3778
4221 return timer; 3779 return timer;
4222} 3780}
3781EXPORT_SYMBOL(ath_gen_timer_alloc);
4223 3782
4224void ath_gen_timer_start(struct ath_hw *ah, 3783void ath9k_hw_gen_timer_start(struct ath_hw *ah,
4225 struct ath_gen_timer *timer, 3784 struct ath_gen_timer *timer,
4226 u32 timer_next, u32 timer_period) 3785 u32 timer_next,
3786 u32 timer_period)
4227{ 3787{
4228 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3788 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
4229 u32 tsf; 3789 u32 tsf;
@@ -4234,8 +3794,9 @@ void ath_gen_timer_start(struct ath_hw *ah,
4234 3794
4235 tsf = ath9k_hw_gettsf32(ah); 3795 tsf = ath9k_hw_gettsf32(ah);
4236 3796
4237 DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, "curent tsf %x period %x" 3797 ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
4238 "timer_next %x\n", tsf, timer_period, timer_next); 3798 "curent tsf %x period %x"
3799 "timer_next %x\n", tsf, timer_period, timer_next);
4239 3800
4240 /* 3801 /*
4241 * Pull timer_next forward if the current TSF already passed it 3802 * Pull timer_next forward if the current TSF already passed it
@@ -4258,15 +3819,10 @@ void ath_gen_timer_start(struct ath_hw *ah,
4258 REG_SET_BIT(ah, AR_IMR_S5, 3819 REG_SET_BIT(ah, AR_IMR_S5,
4259 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | 3820 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
4260 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); 3821 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
4261
4262 if ((ah->ah_sc->imask & ATH9K_INT_GENTIMER) == 0) {
4263 ath9k_hw_set_interrupts(ah, 0);
4264 ah->ah_sc->imask |= ATH9K_INT_GENTIMER;
4265 ath9k_hw_set_interrupts(ah, ah->ah_sc->imask);
4266 }
4267} 3822}
3823EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
4268 3824
4269void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) 3825void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
4270{ 3826{
4271 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3827 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
4272 3828
@@ -4285,14 +3841,8 @@ void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
4285 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); 3841 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
4286 3842
4287 clear_bit(timer->index, &timer_table->timer_mask.timer_bits); 3843 clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
4288
4289 /* if no timer is enabled, turn off interrupt mask */
4290 if (timer_table->timer_mask.val == 0) {
4291 ath9k_hw_set_interrupts(ah, 0);
4292 ah->ah_sc->imask &= ~ATH9K_INT_GENTIMER;
4293 ath9k_hw_set_interrupts(ah, ah->ah_sc->imask);
4294 }
4295} 3844}
3845EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
4296 3846
4297void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) 3847void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
4298{ 3848{
@@ -4302,6 +3852,7 @@ void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
4302 timer_table->timers[timer->index] = NULL; 3852 timer_table->timers[timer->index] = NULL;
4303 kfree(timer); 3853 kfree(timer);
4304} 3854}
3855EXPORT_SYMBOL(ath_gen_timer_free);
4305 3856
4306/* 3857/*
4307 * Generic Timer Interrupts handling 3858 * Generic Timer Interrupts handling
@@ -4310,6 +3861,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
4310{ 3861{
4311 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3862 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
4312 struct ath_gen_timer *timer; 3863 struct ath_gen_timer *timer;
3864 struct ath_common *common = ath9k_hw_common(ah);
4313 u32 trigger_mask, thresh_mask, index; 3865 u32 trigger_mask, thresh_mask, index;
4314 3866
4315 /* get hardware generic timer interrupt status */ 3867 /* get hardware generic timer interrupt status */
@@ -4324,8 +3876,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
4324 index = rightmost_index(timer_table, &thresh_mask); 3876 index = rightmost_index(timer_table, &thresh_mask);
4325 timer = timer_table->timers[index]; 3877 timer = timer_table->timers[index];
4326 BUG_ON(!timer); 3878 BUG_ON(!timer);
4327 DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, 3879 ath_print(common, ATH_DBG_HWTIMER,
4328 "TSF overflow for Gen timer %d\n", index); 3880 "TSF overflow for Gen timer %d\n", index);
4329 timer->overflow(timer->arg); 3881 timer->overflow(timer->arg);
4330 } 3882 }
4331 3883
@@ -4333,21 +3885,95 @@ void ath_gen_timer_isr(struct ath_hw *ah)
4333 index = rightmost_index(timer_table, &trigger_mask); 3885 index = rightmost_index(timer_table, &trigger_mask);
4334 timer = timer_table->timers[index]; 3886 timer = timer_table->timers[index];
4335 BUG_ON(!timer); 3887 BUG_ON(!timer);
4336 DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, 3888 ath_print(common, ATH_DBG_HWTIMER,
4337 "Gen timer[%d] trigger\n", index); 3889 "Gen timer[%d] trigger\n", index);
4338 timer->trigger(timer->arg); 3890 timer->trigger(timer->arg);
4339 } 3891 }
4340} 3892}
3893EXPORT_SYMBOL(ath_gen_timer_isr);
3894
3895static struct {
3896 u32 version;
3897 const char * name;
3898} ath_mac_bb_names[] = {
3899 /* Devices with external radios */
3900 { AR_SREV_VERSION_5416_PCI, "5416" },
3901 { AR_SREV_VERSION_5416_PCIE, "5418" },
3902 { AR_SREV_VERSION_9100, "9100" },
3903 { AR_SREV_VERSION_9160, "9160" },
3904 /* Single-chip solutions */
3905 { AR_SREV_VERSION_9280, "9280" },
3906 { AR_SREV_VERSION_9285, "9285" },
3907 { AR_SREV_VERSION_9287, "9287" },
3908 { AR_SREV_VERSION_9271, "9271" },
3909};
3910
3911/* For devices with external radios */
3912static struct {
3913 u16 version;
3914 const char * name;
3915} ath_rf_names[] = {
3916 { 0, "5133" },
3917 { AR_RAD5133_SREV_MAJOR, "5133" },
3918 { AR_RAD5122_SREV_MAJOR, "5122" },
3919 { AR_RAD2133_SREV_MAJOR, "2133" },
3920 { AR_RAD2122_SREV_MAJOR, "2122" }
3921};
3922
3923/*
3924 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
3925 */
3926static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version)
3927{
3928 int i;
3929
3930 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
3931 if (ath_mac_bb_names[i].version == mac_bb_version) {
3932 return ath_mac_bb_names[i].name;
3933 }
3934 }
3935
3936 return "????";
3937}
4341 3938
4342/* 3939/*
4343 * Primitive to disable ASPM 3940 * Return the RF name. "????" is returned if the RF is unknown.
3941 * Used for devices with external radios.
4344 */ 3942 */
4345void ath_pcie_aspm_disable(struct ath_softc *sc) 3943static const char *ath9k_hw_rf_name(u16 rf_version)
4346{ 3944{
4347 struct pci_dev *pdev = to_pci_dev(sc->dev); 3945 int i;
4348 u8 aspm; 3946
3947 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
3948 if (ath_rf_names[i].version == rf_version) {
3949 return ath_rf_names[i].name;
3950 }
3951 }
3952
3953 return "????";
3954}
3955
3956void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
3957{
3958 int used;
3959
3960 /* chipsets >= AR9280 are single-chip */
3961 if (AR_SREV_9280_10_OR_LATER(ah)) {
3962 used = snprintf(hw_name, len,
3963 "Atheros AR%s Rev:%x",
3964 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
3965 ah->hw_version.macRev);
3966 }
3967 else {
3968 used = snprintf(hw_name, len,
3969 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
3970 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
3971 ah->hw_version.macRev,
3972 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
3973 AR_RADIO_SREV_MAJOR)),
3974 ah->hw_version.phyRev);
3975 }
4349 3976
4350 pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm); 3977 hw_name[used] = '\0';
4351 aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
4352 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
4353} 3978}
3979EXPORT_SYMBOL(ath9k_hw_name);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b89234571829..f8f5e997162c 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -27,17 +27,24 @@
27#include "calib.h" 27#include "calib.h"
28#include "reg.h" 28#include "reg.h"
29#include "phy.h" 29#include "phy.h"
30#include "btcoex.h"
30 31
31#include "../regd.h" 32#include "../regd.h"
33#include "../debug.h"
32 34
33#define ATHEROS_VENDOR_ID 0x168c 35#define ATHEROS_VENDOR_ID 0x168c
36
34#define AR5416_DEVID_PCI 0x0023 37#define AR5416_DEVID_PCI 0x0023
35#define AR5416_DEVID_PCIE 0x0024 38#define AR5416_DEVID_PCIE 0x0024
36#define AR9160_DEVID_PCI 0x0027 39#define AR9160_DEVID_PCI 0x0027
37#define AR9280_DEVID_PCI 0x0029 40#define AR9280_DEVID_PCI 0x0029
38#define AR9280_DEVID_PCIE 0x002a 41#define AR9280_DEVID_PCIE 0x002a
39#define AR9285_DEVID_PCIE 0x002b 42#define AR9285_DEVID_PCIE 0x002b
43
40#define AR5416_AR9100_DEVID 0x000b 44#define AR5416_AR9100_DEVID 0x000b
45
46#define AR9271_USB 0x9271
47
41#define AR_SUBVENDOR_ID_NOG 0x0e11 48#define AR_SUBVENDOR_ID_NOG 0x0e11
42#define AR_SUBVENDOR_ID_NEW_A 0x7065 49#define AR_SUBVENDOR_ID_NEW_A 0x7065
43#define AR5416_MAGIC 0x19641014 50#define AR5416_MAGIC 0x19641014
@@ -49,9 +56,18 @@
49#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa 56#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
50#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab 57#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
51 58
59#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
60
61#define ATH_DEFAULT_NOISE_FLOOR -95
62
63#define ATH9K_RSSI_BAD 0x80
64
52/* Register read/write primitives */ 65/* Register read/write primitives */
53#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val)) 66#define REG_WRITE(_ah, _reg, _val) \
54#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg)) 67 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
68
69#define REG_READ(_ah, _reg) \
70 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
55 71
56#define SM(_v, _f) (((_v) << _f##_S) & _f) 72#define SM(_v, _f) (((_v) << _f##_S) & _f)
57#define MS(_v, _f) (((_v) & _f) >> _f##_S) 73#define MS(_v, _f) (((_v) & _f) >> _f##_S)
@@ -91,7 +107,7 @@
91#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 107#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
92 108
93#define BASE_ACTIVATE_DELAY 100 109#define BASE_ACTIVATE_DELAY 100
94#define RTC_PLL_SETTLE_DELAY 1000 110#define RTC_PLL_SETTLE_DELAY 100
95#define COEF_SCALE_S 24 111#define COEF_SCALE_S 24
96#define HT40_CHANNEL_CENTER_SHIFT 10 112#define HT40_CHANNEL_CENTER_SHIFT 10
97 113
@@ -132,12 +148,6 @@ enum wireless_mode {
132 ATH9K_MODE_MAX, 148 ATH9K_MODE_MAX,
133}; 149};
134 150
135enum ath9k_ant_setting {
136 ATH9K_ANT_VARIABLE = 0,
137 ATH9K_ANT_FIXED_A,
138 ATH9K_ANT_FIXED_B
139};
140
141enum ath9k_hw_caps { 151enum ath9k_hw_caps {
142 ATH9K_HW_CAP_MIC_AESCCM = BIT(0), 152 ATH9K_HW_CAP_MIC_AESCCM = BIT(0),
143 ATH9K_HW_CAP_MIC_CKIP = BIT(1), 153 ATH9K_HW_CAP_MIC_CKIP = BIT(1),
@@ -201,8 +211,6 @@ struct ath9k_ops_config {
201 u32 cck_trig_high; 211 u32 cck_trig_high;
202 u32 cck_trig_low; 212 u32 cck_trig_low;
203 u32 enable_ani; 213 u32 enable_ani;
204 enum ath9k_ant_setting diversity_control;
205 u16 antenna_switch_swap;
206 int serialize_regmode; 214 int serialize_regmode;
207 bool intr_mitigation; 215 bool intr_mitigation;
208#define SPUR_DISABLE 0 216#define SPUR_DISABLE 0
@@ -407,7 +415,7 @@ struct ath9k_hw_version {
407 * Using de Bruijin sequence to to look up 1's index in a 32 bit number 415 * Using de Bruijin sequence to to look up 1's index in a 32 bit number
408 * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001 416 * debruijn32 = 0000 0111 0111 1100 1011 0101 0011 0001
409 */ 417 */
410#define debruijn32 0x077CB531UL 418#define debruijn32 0x077CB531U
411 419
412struct ath_gen_timer_configuration { 420struct ath_gen_timer_configuration {
413 u32 next_addr; 421 u32 next_addr;
@@ -433,7 +441,8 @@ struct ath_gen_timer_table {
433}; 441};
434 442
435struct ath_hw { 443struct ath_hw {
436 struct ath_softc *ah_sc; 444 struct ieee80211_hw *hw;
445 struct ath_common common;
437 struct ath9k_hw_version hw_version; 446 struct ath9k_hw_version hw_version;
438 struct ath9k_ops_config config; 447 struct ath9k_ops_config config;
439 struct ath9k_hw_capabilities caps; 448 struct ath9k_hw_capabilities caps;
@@ -450,7 +459,6 @@ struct ath_hw {
450 459
451 bool sw_mgmt_crypto; 460 bool sw_mgmt_crypto;
452 bool is_pciexpress; 461 bool is_pciexpress;
453 u8 macaddr[ETH_ALEN];
454 u16 tx_trig_level; 462 u16 tx_trig_level;
455 u16 rfsilent; 463 u16 rfsilent;
456 u32 rfkill_gpio; 464 u32 rfkill_gpio;
@@ -523,7 +531,14 @@ struct ath_hw {
523 DONT_USE_32KHZ, 531 DONT_USE_32KHZ,
524 } enable_32kHz_clock; 532 } enable_32kHz_clock;
525 533
526 /* RF */ 534 /* Callback for radio frequency change */
535 int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
536
537 /* Callback for baseband spur frequency */
538 void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
539 struct ath9k_channel *chan);
540
541 /* Used to program the radio on non single-chip devices */
527 u32 *analogBank0Data; 542 u32 *analogBank0Data;
528 u32 *analogBank1Data; 543 u32 *analogBank1Data;
529 u32 *analogBank2Data; 544 u32 *analogBank2Data;
@@ -540,7 +555,6 @@ struct ath_hw {
540 u32 acktimeout; 555 u32 acktimeout;
541 u32 ctstimeout; 556 u32 ctstimeout;
542 u32 globaltxtimeout; 557 u32 globaltxtimeout;
543 u8 gbeacon_rate;
544 558
545 /* ANI */ 559 /* ANI */
546 u32 proc_phyerr; 560 u32 proc_phyerr;
@@ -553,8 +567,10 @@ struct ath_hw {
553 int firpwr[5]; 567 int firpwr[5];
554 enum ath9k_ani_cmd ani_function; 568 enum ath9k_ani_cmd ani_function;
555 569
570 /* Bluetooth coexistance */
571 struct ath_btcoex_hw btcoex_hw;
572
556 u32 intr_txqs; 573 u32 intr_txqs;
557 enum ath9k_ht_extprotspacing extprotspacing;
558 u8 txchainmask; 574 u8 txchainmask;
559 u8 rxchainmask; 575 u8 rxchainmask;
560 576
@@ -578,17 +594,29 @@ struct ath_hw {
578 struct ar5416IniArray iniModesAdditional; 594 struct ar5416IniArray iniModesAdditional;
579 struct ar5416IniArray iniModesRxGain; 595 struct ar5416IniArray iniModesRxGain;
580 struct ar5416IniArray iniModesTxGain; 596 struct ar5416IniArray iniModesTxGain;
597 struct ar5416IniArray iniModes_9271_1_0_only;
598 struct ar5416IniArray iniCckfirNormal;
599 struct ar5416IniArray iniCckfirJapan2484;
581 600
582 u32 intr_gen_timer_trigger; 601 u32 intr_gen_timer_trigger;
583 u32 intr_gen_timer_thresh; 602 u32 intr_gen_timer_thresh;
584 struct ath_gen_timer_table hw_gen_timers; 603 struct ath_gen_timer_table hw_gen_timers;
585}; 604};
586 605
606static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
607{
608 return &ah->common;
609}
610
611static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
612{
613 return &(ath9k_hw_common(ah)->regulatory);
614}
615
587/* Initialization, Detach, Reset */ 616/* Initialization, Detach, Reset */
588const char *ath9k_hw_probe(u16 vendorid, u16 devid); 617const char *ath9k_hw_probe(u16 vendorid, u16 devid);
589void ath9k_hw_detach(struct ath_hw *ah); 618void ath9k_hw_detach(struct ath_hw *ah);
590int ath9k_hw_init(struct ath_hw *ah); 619int ath9k_hw_init(struct ath_hw *ah);
591void ath9k_hw_rf_free(struct ath_hw *ah);
592int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 620int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
593 bool bChannelChange); 621 bool bChannelChange);
594void ath9k_hw_fill_cap_info(struct ath_hw *ah); 622void ath9k_hw_fill_cap_info(struct ath_hw *ah);
@@ -613,11 +641,6 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
613void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val); 641void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
614u32 ath9k_hw_getdefantenna(struct ath_hw *ah); 642u32 ath9k_hw_getdefantenna(struct ath_hw *ah);
615void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 643void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
616bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
617 enum ath9k_ant_setting settings,
618 struct ath9k_channel *chan,
619 u8 *tx_chainmask, u8 *rx_chainmask,
620 u8 *antenna_cfgd);
621 644
622/* General Operation */ 645/* General Operation */
623bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 646bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
@@ -637,19 +660,21 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
637void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac); 660void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
638void ath9k_hw_setopmode(struct ath_hw *ah); 661void ath9k_hw_setopmode(struct ath_hw *ah);
639void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 662void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
640void ath9k_hw_setbssidmask(struct ath_softc *sc); 663void ath9k_hw_setbssidmask(struct ath_hw *ah);
641void ath9k_hw_write_associd(struct ath_softc *sc); 664void ath9k_hw_write_associd(struct ath_hw *ah);
642u64 ath9k_hw_gettsf64(struct ath_hw *ah); 665u64 ath9k_hw_gettsf64(struct ath_hw *ah);
643void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 666void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
644void ath9k_hw_reset_tsf(struct ath_hw *ah); 667void ath9k_hw_reset_tsf(struct ath_hw *ah);
645void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 668void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
669u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
646bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 670bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
647void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode); 671void ath9k_hw_set11nmac2040(struct ath_hw *ah);
648void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 672void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
649void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 673void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
650 const struct ath9k_beacon_state *bs); 674 const struct ath9k_beacon_state *bs);
651bool ath9k_hw_setpower(struct ath_hw *ah, 675
652 enum ath9k_power_mode mode); 676bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
677
653void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off); 678void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
654 679
655/* Interrupt Handling */ 680/* Interrupt Handling */
@@ -663,16 +688,20 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
663 void (*overflow)(void *), 688 void (*overflow)(void *),
664 void *arg, 689 void *arg,
665 u8 timer_index); 690 u8 timer_index);
666void ath_gen_timer_start(struct ath_hw *ah, struct ath_gen_timer *timer, 691void ath9k_hw_gen_timer_start(struct ath_hw *ah,
667 u32 timer_next, u32 timer_period); 692 struct ath_gen_timer *timer,
668void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer); 693 u32 timer_next,
694 u32 timer_period);
695void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
696
669void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer); 697void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
670void ath_gen_timer_isr(struct ath_hw *hw); 698void ath_gen_timer_isr(struct ath_hw *hw);
671u32 ath9k_hw_gettsf32(struct ath_hw *ah); 699u32 ath9k_hw_gettsf32(struct ath_hw *ah);
672 700
701void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
702
673#define ATH_PCIE_CAP_LINK_CTRL 0x70 703#define ATH_PCIE_CAP_LINK_CTRL 0x70
674#define ATH_PCIE_CAP_LINK_L0S 1 704#define ATH_PCIE_CAP_LINK_L0S 1
675#define ATH_PCIE_CAP_LINK_L1 2 705#define ATH_PCIE_CAP_LINK_L1 2
676 706
677void ath_pcie_aspm_disable(struct ath_softc *sc);
678#endif 707#endif
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
index 8622265a030a..8a3bf3ab998d 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/initvals.h
@@ -21,6 +21,8 @@ static const u32 ar5416Modes[][6] = {
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, 21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, 23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
25 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
24 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 26 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
25 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 27 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
26 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 28 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
@@ -31,11 +33,11 @@ static const u32 ar5416Modes[][6] = {
31 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 33 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
32 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 34 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
33 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 35 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x00009850, 0x6c48b4e0, 0x6c48b4e0, 0x6c48b0de, 0x6c48b0de, 0x6c48b0de }, 36 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
35 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, 37 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
36 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e }, 38 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
37 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, 39 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
38 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 40 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
39 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, 41 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
40 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, 42 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
41 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 43 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
@@ -46,10 +48,10 @@ static const u32 ar5416Modes[][6] = {
46 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 48 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
47 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 49 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 }, 50 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
49 { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 }, 51 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
50 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, 52 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
51 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 53 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
52 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c }, 54 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
53 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 55 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
54 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 56 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
55 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 57 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -199,7 +201,6 @@ static const u32 ar5416Common[][2] = {
199 { 0x00008110, 0x00000168 }, 201 { 0x00008110, 0x00000168 },
200 { 0x00008118, 0x000100aa }, 202 { 0x00008118, 0x000100aa },
201 { 0x0000811c, 0x00003210 }, 203 { 0x0000811c, 0x00003210 },
202 { 0x00008120, 0x08f04800 },
203 { 0x00008124, 0x00000000 }, 204 { 0x00008124, 0x00000000 },
204 { 0x00008128, 0x00000000 }, 205 { 0x00008128, 0x00000000 },
205 { 0x0000812c, 0x00000000 }, 206 { 0x0000812c, 0x00000000 },
@@ -215,7 +216,6 @@ static const u32 ar5416Common[][2] = {
215 { 0x00008178, 0x00000100 }, 216 { 0x00008178, 0x00000100 },
216 { 0x0000817c, 0x00000000 }, 217 { 0x0000817c, 0x00000000 },
217 { 0x000081c4, 0x00000000 }, 218 { 0x000081c4, 0x00000000 },
218 { 0x000081d0, 0x00003210 },
219 { 0x000081ec, 0x00000000 }, 219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 }, 220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 }, 221 { 0x000081f4, 0x00000000 },
@@ -246,6 +246,7 @@ static const u32 ar5416Common[][2] = {
246 { 0x00008258, 0x00000000 }, 246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff }, 247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 }, 248 { 0x00008260, 0x00080922 },
249 { 0x00008264, 0xa8000010 },
249 { 0x00008270, 0x00000000 }, 250 { 0x00008270, 0x00000000 },
250 { 0x00008274, 0x40000000 }, 251 { 0x00008274, 0x40000000 },
251 { 0x00008278, 0x003e4180 }, 252 { 0x00008278, 0x003e4180 },
@@ -406,9 +407,9 @@ static const u32 ar5416Common[][2] = {
406 { 0x0000a25c, 0x0f0f0f01 }, 407 { 0x0000a25c, 0x0f0f0f01 },
407 { 0x0000a260, 0xdfa91f01 }, 408 { 0x0000a260, 0xdfa91f01 },
408 { 0x0000a268, 0x00000000 }, 409 { 0x0000a268, 0x00000000 },
409 { 0x0000a26c, 0x0ebae9c6 }, 410 { 0x0000a26c, 0x0e79e5c6 },
410 { 0x0000b26c, 0x0ebae9c6 }, 411 { 0x0000b26c, 0x0e79e5c6 },
411 { 0x0000c26c, 0x0ebae9c6 }, 412 { 0x0000c26c, 0x0e79e5c6 },
412 { 0x0000d270, 0x00820820 }, 413 { 0x0000d270, 0x00820820 },
413 { 0x0000a278, 0x1ce739ce }, 414 { 0x0000a278, 0x1ce739ce },
414 { 0x0000a27c, 0x051701ce }, 415 { 0x0000a27c, 0x051701ce },
@@ -2551,26 +2552,27 @@ static const u32 ar9280Modes_9280_2[][6] = {
2551 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 2552 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
2552 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 2553 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2553 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 2554 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2554 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 2555 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e },
2555 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 2556 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2556 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 2557 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2557 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 2558 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2558 { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e }, 2559 { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e },
2559 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, 2560 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
2560 { 0x00009850, 0x6c4000e2, 0x6c4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, 2561 { 0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 },
2561 { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 2562 { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
2562 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x3139605e, 0x31395d5e, 0x31395d5e }, 2563 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
2563 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, 2564 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
2564 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 2565 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2565 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 2566 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2566 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 2567 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2567 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 2568 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
2568 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 2569 { 0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016 },
2569 { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 2570 { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2570 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 }, 2571 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 },
2571 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 2572 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2572 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 2573 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2573 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, 2574 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2575 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
2574 { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c }, 2576 { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c },
2575 { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 }, 2577 { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 },
2576 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 2578 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
@@ -2585,8 +2587,10 @@ static const u32 ar9280Modes_9280_2[][6] = {
2585 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, 2587 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2586 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 2588 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2587 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 2589 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2590 { 0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000 },
2588 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 }, 2591 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 },
2589 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 2592 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2593 { 0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000 },
2590 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2594 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2591 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 }, 2595 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2592}; 2596};
@@ -2813,7 +2817,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2813 { 0x00009958, 0x2108ecff }, 2817 { 0x00009958, 0x2108ecff },
2814 { 0x00009940, 0x14750604 }, 2818 { 0x00009940, 0x14750604 },
2815 { 0x0000c95c, 0x004b6a8e }, 2819 { 0x0000c95c, 0x004b6a8e },
2816 { 0x0000c968, 0x000003ce },
2817 { 0x00009970, 0x190fb515 }, 2820 { 0x00009970, 0x190fb515 },
2818 { 0x00009974, 0x00000000 }, 2821 { 0x00009974, 0x00000000 },
2819 { 0x00009978, 0x00000001 }, 2822 { 0x00009978, 0x00000001 },
@@ -2849,7 +2852,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2849 { 0x0000a22c, 0x233f7180 }, 2852 { 0x0000a22c, 0x233f7180 },
2850 { 0x0000a234, 0x20202020 }, 2853 { 0x0000a234, 0x20202020 },
2851 { 0x0000a238, 0x20202020 }, 2854 { 0x0000a238, 0x20202020 },
2852 { 0x0000a23c, 0x13c88000 },
2853 { 0x0000a240, 0x38490a20 }, 2855 { 0x0000a240, 0x38490a20 },
2854 { 0x0000a244, 0x00007bb6 }, 2856 { 0x0000a244, 0x00007bb6 },
2855 { 0x0000a248, 0x0fff3ffc }, 2857 { 0x0000a248, 0x0fff3ffc },
@@ -2859,8 +2861,8 @@ static const u32 ar9280Common_9280_2[][2] = {
2859 { 0x0000a25c, 0x0f0f0f01 }, 2861 { 0x0000a25c, 0x0f0f0f01 },
2860 { 0x0000a260, 0xdfa91f01 }, 2862 { 0x0000a260, 0xdfa91f01 },
2861 { 0x0000a268, 0x00000000 }, 2863 { 0x0000a268, 0x00000000 },
2862 { 0x0000a26c, 0x0ebae9c6 }, 2864 { 0x0000a26c, 0x0e79e5c6 },
2863 { 0x0000b26c, 0x0ebae9c6 }, 2865 { 0x0000b26c, 0x0e79e5c6 },
2864 { 0x0000d270, 0x00820820 }, 2866 { 0x0000d270, 0x00820820 },
2865 { 0x0000a278, 0x1ce739ce }, 2867 { 0x0000a278, 0x1ce739ce },
2866 { 0x0000d35c, 0x07ffffef }, 2868 { 0x0000d35c, 0x07ffffef },
@@ -2874,7 +2876,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2874 { 0x0000d37c, 0x7fffffe2 }, 2876 { 0x0000d37c, 0x7fffffe2 },
2875 { 0x0000d380, 0x7f3c7bba }, 2877 { 0x0000d380, 0x7f3c7bba },
2876 { 0x0000d384, 0xf3307ff0 }, 2878 { 0x0000d384, 0xf3307ff0 },
2877 { 0x0000a388, 0x0c000000 },
2878 { 0x0000a38c, 0x20202020 }, 2879 { 0x0000a38c, 0x20202020 },
2879 { 0x0000a390, 0x20202020 }, 2880 { 0x0000a390, 0x20202020 },
2880 { 0x0000a394, 0x1ce739ce }, 2881 { 0x0000a394, 0x1ce739ce },
@@ -2940,7 +2941,7 @@ static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
2940 { 0x0000801c, 0x148ec02b, 0x148ec057 }, 2941 { 0x0000801c, 0x148ec02b, 0x148ec057 },
2941 { 0x00008318, 0x000044c0, 0x00008980 }, 2942 { 0x00008318, 0x000044c0, 0x00008980 },
2942 { 0x00009820, 0x02020200, 0x02020200 }, 2943 { 0x00009820, 0x02020200, 0x02020200 },
2943 { 0x00009824, 0x00000f0f, 0x00000f0f }, 2944 { 0x00009824, 0x01000f0f, 0x01000f0f },
2944 { 0x00009828, 0x0b020001, 0x0b020001 }, 2945 { 0x00009828, 0x0b020001, 0x0b020001 },
2945 { 0x00009834, 0x00000f0f, 0x00000f0f }, 2946 { 0x00009834, 0x00000f0f, 0x00000f0f },
2946 { 0x00009844, 0x03721821, 0x03721821 }, 2947 { 0x00009844, 0x03721821, 0x03721821 },
@@ -3348,6 +3349,8 @@ static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = {
3348}; 3349};
3349 3350
3350static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = { 3351static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
3352 { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3353 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
3351 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3354 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3352 { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 }, 3355 { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 },
3353 { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 }, 3356 { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 },
@@ -3376,11 +3379,11 @@ static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
3376 { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, 3379 { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 },
3377 { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, 3380 { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
3378 { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, 3381 { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
3379 { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3380 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
3381}; 3382};
3382 3383
3383static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = { 3384static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
3385 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3386 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
3384 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3387 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3385 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, 3388 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
3386 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, 3389 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
@@ -3409,8 +3412,6 @@ static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
3409 { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, 3412 { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 },
3410 { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, 3413 { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
3411 { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, 3414 { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
3412 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3413 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
3414}; 3415};
3415 3416
3416static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = { 3417static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
@@ -5918,9 +5919,6 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
5918 { 0x000099ec, 0x0cc80caa }, 5919 { 0x000099ec, 0x0cc80caa },
5919 { 0x000099f0, 0x00000000 }, 5920 { 0x000099f0, 0x00000000 },
5920 { 0x000099fc, 0x00001042 }, 5921 { 0x000099fc, 0x00001042 },
5921 { 0x0000a1f4, 0x00fffeff },
5922 { 0x0000a1f8, 0x00f5f9ff },
5923 { 0x0000a1fc, 0xb79f6427 },
5924 { 0x0000a208, 0x803e4788 }, 5922 { 0x0000a208, 0x803e4788 },
5925 { 0x0000a210, 0x4080a333 }, 5923 { 0x0000a210, 0x4080a333 },
5926 { 0x0000a214, 0x40206c10 }, 5924 { 0x0000a214, 0x40206c10 },
@@ -5980,7 +5978,7 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
5980 { 0x0000b3f4, 0x00000000 }, 5978 { 0x0000b3f4, 0x00000000 },
5981 { 0x0000a7d8, 0x000003f1 }, 5979 { 0x0000a7d8, 0x000003f1 },
5982 { 0x00007800, 0x00000800 }, 5980 { 0x00007800, 0x00000800 },
5983 { 0x00007804, 0x6c35ffc2 }, 5981 { 0x00007804, 0x6c35ffd2 },
5984 { 0x00007808, 0x6db6c000 }, 5982 { 0x00007808, 0x6db6c000 },
5985 { 0x0000780c, 0x6db6cb30 }, 5983 { 0x0000780c, 0x6db6cb30 },
5986 { 0x00007810, 0x6db6cb6c }, 5984 { 0x00007810, 0x6db6cb6c },
@@ -6000,7 +5998,7 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
6000 { 0x00007848, 0x934934a8 }, 5998 { 0x00007848, 0x934934a8 },
6001 { 0x00007850, 0x00000000 }, 5999 { 0x00007850, 0x00000000 },
6002 { 0x00007854, 0x00000800 }, 6000 { 0x00007854, 0x00000800 },
6003 { 0x00007858, 0x6c35ffc2 }, 6001 { 0x00007858, 0x6c35ffd2 },
6004 { 0x0000785c, 0x6db6c000 }, 6002 { 0x0000785c, 0x6db6c000 },
6005 { 0x00007860, 0x6db6cb30 }, 6003 { 0x00007860, 0x6db6cb30 },
6006 { 0x00007864, 0x6db6cb6c }, 6004 { 0x00007864, 0x6db6cb6c },
@@ -6027,6 +6025,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
6027 { 0x000078b8, 0x2a850160 }, 6025 { 0x000078b8, 0x2a850160 },
6028}; 6026};
6029 6027
6028/*
6029 * For Japanese regulatory requirements, 2484 MHz requires the following three
6030 * registers be programmed differently from the channel between 2412 and 2472 MHz.
6031 */
6032static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
6033 { 0x0000a1f4, 0x00fffeff },
6034 { 0x0000a1f8, 0x00f5f9ff },
6035 { 0x0000a1fc, 0xb79f6427 },
6036};
6037
6038static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
6039 { 0x0000a1f4, 0x00000000 },
6040 { 0x0000a1f8, 0xefff0301 },
6041 { 0x0000a1fc, 0xca9228ee },
6042};
6043
6030static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = { 6044static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
6031 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 6045 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
6032 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 6046 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -6365,8 +6379,8 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
6365}; 6379};
6366 6380
6367 6381
6368/* AR9271 initialization values automaticaly created: 03/23/09 */ 6382/* AR9271 initialization values automaticaly created: 06/04/09 */
6369static const u_int32_t ar9271Modes_9271_1_0[][6] = { 6383static const u_int32_t ar9271Modes_9271[][6] = {
6370 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 6384 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
6371 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 6385 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
6372 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 6386 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6376,8 +6390,8 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6376 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 6390 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
6377 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 6391 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
6378 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 6392 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
6379 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 6393 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e },
6380 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 6394 { 0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001 },
6381 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 6395 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
6382 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 6396 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
6383 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, 6397 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
@@ -6391,6 +6405,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6391 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 6405 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
6392 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 6406 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
6393 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 6407 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
6408 { 0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310 },
6394 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 6409 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
6395 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 6410 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
6396 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, 6411 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
@@ -6401,7 +6416,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6401 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, 6416 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
6402 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 6417 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
6403 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 6418 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
6404 { 0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329 }, 6419 { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f },
6405 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 6420 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
6406 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 6421 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
6407 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 6422 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -6690,7 +6705,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6690 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 6705 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
6691}; 6706};
6692 6707
6693static const u_int32_t ar9271Common_9271_1_0[][2] = { 6708static const u_int32_t ar9271Common_9271[][2] = {
6694 { 0x0000000c, 0x00000000 }, 6709 { 0x0000000c, 0x00000000 },
6695 { 0x00000030, 0x00020045 }, 6710 { 0x00000030, 0x00020045 },
6696 { 0x00000034, 0x00000005 }, 6711 { 0x00000034, 0x00000005 },
@@ -6786,7 +6801,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6786 { 0x0000803c, 0x00000000 }, 6801 { 0x0000803c, 0x00000000 },
6787 { 0x00008048, 0x00000000 }, 6802 { 0x00008048, 0x00000000 },
6788 { 0x00008054, 0x00000000 }, 6803 { 0x00008054, 0x00000000 },
6789 { 0x00008058, 0x02000000 }, 6804 { 0x00008058, 0x00000000 },
6790 { 0x0000805c, 0x000fc78f }, 6805 { 0x0000805c, 0x000fc78f },
6791 { 0x00008060, 0x0000000f }, 6806 { 0x00008060, 0x0000000f },
6792 { 0x00008064, 0x00000000 }, 6807 { 0x00008064, 0x00000000 },
@@ -6817,7 +6832,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6817 { 0x00008110, 0x00000168 }, 6832 { 0x00008110, 0x00000168 },
6818 { 0x00008118, 0x000100aa }, 6833 { 0x00008118, 0x000100aa },
6819 { 0x0000811c, 0x00003210 }, 6834 { 0x0000811c, 0x00003210 },
6820 { 0x00008120, 0x08f04814 }, 6835 { 0x00008120, 0x08f04810 },
6821 { 0x00008124, 0x00000000 }, 6836 { 0x00008124, 0x00000000 },
6822 { 0x00008128, 0x00000000 }, 6837 { 0x00008128, 0x00000000 },
6823 { 0x0000812c, 0x00000000 }, 6838 { 0x0000812c, 0x00000000 },
@@ -6864,7 +6879,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6864 { 0x00008258, 0x00000000 }, 6879 { 0x00008258, 0x00000000 },
6865 { 0x0000825c, 0x400000ff }, 6880 { 0x0000825c, 0x400000ff },
6866 { 0x00008260, 0x00080922 }, 6881 { 0x00008260, 0x00080922 },
6867 { 0x00008264, 0xa8a00010 }, 6882 { 0x00008264, 0x88a00010 },
6868 { 0x00008270, 0x00000000 }, 6883 { 0x00008270, 0x00000000 },
6869 { 0x00008274, 0x40000000 }, 6884 { 0x00008274, 0x40000000 },
6870 { 0x00008278, 0x003e4180 }, 6885 { 0x00008278, 0x003e4180 },
@@ -6896,7 +6911,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6896 { 0x00007814, 0x924934a8 }, 6911 { 0x00007814, 0x924934a8 },
6897 { 0x0000781c, 0x00000000 }, 6912 { 0x0000781c, 0x00000000 },
6898 { 0x00007820, 0x00000c04 }, 6913 { 0x00007820, 0x00000c04 },
6899 { 0x00007824, 0x00d86bff }, 6914 { 0x00007824, 0x00d8abff },
6900 { 0x00007828, 0x66964300 }, 6915 { 0x00007828, 0x66964300 },
6901 { 0x0000782c, 0x8db6d961 }, 6916 { 0x0000782c, 0x8db6d961 },
6902 { 0x00007830, 0x8db6d96c }, 6917 { 0x00007830, 0x8db6d96c },
@@ -6930,7 +6945,6 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6930 { 0x00009904, 0x00000000 }, 6945 { 0x00009904, 0x00000000 },
6931 { 0x00009908, 0x00000000 }, 6946 { 0x00009908, 0x00000000 },
6932 { 0x0000990c, 0x00000000 }, 6947 { 0x0000990c, 0x00000000 },
6933 { 0x00009910, 0x30002310 },
6934 { 0x0000991c, 0x10000fff }, 6948 { 0x0000991c, 0x10000fff },
6935 { 0x00009920, 0x04900000 }, 6949 { 0x00009920, 0x04900000 },
6936 { 0x00009928, 0x00000001 }, 6950 { 0x00009928, 0x00000001 },
@@ -6944,7 +6958,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6944 { 0x00009954, 0x5f3ca3de }, 6958 { 0x00009954, 0x5f3ca3de },
6945 { 0x00009958, 0x0108ecff }, 6959 { 0x00009958, 0x0108ecff },
6946 { 0x00009968, 0x000003ce }, 6960 { 0x00009968, 0x000003ce },
6947 { 0x00009970, 0x192bb515 }, 6961 { 0x00009970, 0x192bb514 },
6948 { 0x00009974, 0x00000000 }, 6962 { 0x00009974, 0x00000000 },
6949 { 0x00009978, 0x00000001 }, 6963 { 0x00009978, 0x00000001 },
6950 { 0x0000997c, 0x00000000 }, 6964 { 0x0000997c, 0x00000000 },
@@ -7031,3 +7045,8 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
7031 { 0x0000d380, 0x7f3c7bba }, 7045 { 0x0000d380, 0x7f3c7bba },
7032 { 0x0000d384, 0xf3307ff0 }, 7046 { 0x0000d384, 0xf3307ff0 },
7033}; 7047};
7048
7049static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
7050 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
7051 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
7052};
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 800bfab94635..46466ffebcb0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -14,16 +14,16 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
20 struct ath9k_tx_queue_info *qi) 20 struct ath9k_tx_queue_info *qi)
21{ 21{
22 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 22 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask); 26 ah->txurn_interrupt_mask);
27 27
28 REG_WRITE(ah, AR_IMR_S0, 28 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
@@ -39,17 +39,21 @@ u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
39{ 39{
40 return REG_READ(ah, AR_QTXDP(q)); 40 return REG_READ(ah, AR_QTXDP(q));
41} 41}
42EXPORT_SYMBOL(ath9k_hw_gettxbuf);
42 43
43void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 44void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
44{ 45{
45 REG_WRITE(ah, AR_QTXDP(q), txdp); 46 REG_WRITE(ah, AR_QTXDP(q), txdp);
46} 47}
48EXPORT_SYMBOL(ath9k_hw_puttxbuf);
47 49
48void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 50void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
49{ 51{
50 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Enable TXE on queue: %u\n", q); 52 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
53 "Enable TXE on queue: %u\n", q);
51 REG_WRITE(ah, AR_Q_TXE, 1 << q); 54 REG_WRITE(ah, AR_Q_TXE, 1 << q);
52} 55}
56EXPORT_SYMBOL(ath9k_hw_txstart);
53 57
54u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 58u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
55{ 59{
@@ -64,6 +68,7 @@ u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
64 68
65 return npend; 69 return npend;
66} 70}
71EXPORT_SYMBOL(ath9k_hw_numtxpending);
67 72
68bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 73bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
69{ 74{
@@ -93,27 +98,28 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
93 98
94 return newLevel != curLevel; 99 return newLevel != curLevel;
95} 100}
101EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
96 102
97bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) 103bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
98{ 104{
99#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ 105#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
100#define ATH9K_TIME_QUANTUM 100 /* usec */ 106#define ATH9K_TIME_QUANTUM 100 /* usec */
101 107 struct ath_common *common = ath9k_hw_common(ah);
102 struct ath9k_hw_capabilities *pCap = &ah->caps; 108 struct ath9k_hw_capabilities *pCap = &ah->caps;
103 struct ath9k_tx_queue_info *qi; 109 struct ath9k_tx_queue_info *qi;
104 u32 tsfLow, j, wait; 110 u32 tsfLow, j, wait;
105 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 111 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
106 112
107 if (q >= pCap->total_queues) { 113 if (q >= pCap->total_queues) {
108 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, " 114 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
109 "invalid queue: %u\n", q); 115 "invalid queue: %u\n", q);
110 return false; 116 return false;
111 } 117 }
112 118
113 qi = &ah->txq[q]; 119 qi = &ah->txq[q];
114 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 120 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
115 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, " 121 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
116 "inactive queue: %u\n", q); 122 "inactive queue: %u\n", q);
117 return false; 123 return false;
118 } 124 }
119 125
@@ -126,9 +132,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
126 } 132 }
127 133
128 if (ath9k_hw_numtxpending(ah, q)) { 134 if (ath9k_hw_numtxpending(ah, q)) {
129 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 135 ath_print(common, ATH_DBG_QUEUE,
130 "%s: Num of pending TX Frames %d on Q %d\n", 136 "%s: Num of pending TX Frames %d on Q %d\n",
131 __func__, ath9k_hw_numtxpending(ah, q), q); 137 __func__, ath9k_hw_numtxpending(ah, q), q);
132 138
133 for (j = 0; j < 2; j++) { 139 for (j = 0; j < 2; j++) {
134 tsfLow = REG_READ(ah, AR_TSF_L32); 140 tsfLow = REG_READ(ah, AR_TSF_L32);
@@ -142,9 +148,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
142 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) 148 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
143 break; 149 break;
144 150
145 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 151 ath_print(common, ATH_DBG_QUEUE,
146 "TSF has moved while trying to set " 152 "TSF has moved while trying to set "
147 "quiet time TSF: 0x%08x\n", tsfLow); 153 "quiet time TSF: 0x%08x\n", tsfLow);
148 } 154 }
149 155
150 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 156 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
@@ -155,9 +161,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
155 wait = wait_time; 161 wait = wait_time;
156 while (ath9k_hw_numtxpending(ah, q)) { 162 while (ath9k_hw_numtxpending(ah, q)) {
157 if ((--wait) == 0) { 163 if ((--wait) == 0) {
158 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 164 ath_print(common, ATH_DBG_QUEUE,
159 "Failed to stop TX DMA in 100 " 165 "Failed to stop TX DMA in 100 "
160 "msec after killing last frame\n"); 166 "msec after killing last frame\n");
161 break; 167 break;
162 } 168 }
163 udelay(ATH9K_TIME_QUANTUM); 169 udelay(ATH9K_TIME_QUANTUM);
@@ -172,6 +178,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
172#undef ATH9K_TX_STOP_DMA_TIMEOUT 178#undef ATH9K_TX_STOP_DMA_TIMEOUT
173#undef ATH9K_TIME_QUANTUM 179#undef ATH9K_TIME_QUANTUM
174} 180}
181EXPORT_SYMBOL(ath9k_hw_stoptxdma);
175 182
176void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds, 183void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
177 u32 segLen, bool firstSeg, 184 u32 segLen, bool firstSeg,
@@ -198,6 +205,7 @@ void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
198 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 205 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
199 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 206 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
200} 207}
208EXPORT_SYMBOL(ath9k_hw_filltxdesc);
201 209
202void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds) 210void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
203{ 211{
@@ -209,6 +217,7 @@ void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
209 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 217 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
210 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 218 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
211} 219}
220EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
212 221
213int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) 222int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
214{ 223{
@@ -284,6 +293,7 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
284 293
285 return 0; 294 return 0;
286} 295}
296EXPORT_SYMBOL(ath9k_hw_txprocdesc);
287 297
288void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds, 298void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
289 u32 pktLen, enum ath9k_pkt_type type, u32 txPower, 299 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
@@ -319,6 +329,7 @@ void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
319 ads->ds_ctl11 = 0; 329 ads->ds_ctl11 = 0;
320 } 330 }
321} 331}
332EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
322 333
323void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds, 334void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
324 struct ath_desc *lastds, 335 struct ath_desc *lastds,
@@ -374,6 +385,7 @@ void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
374 last_ads->ds_ctl2 = ads->ds_ctl2; 385 last_ads->ds_ctl2 = ads->ds_ctl2;
375 last_ads->ds_ctl3 = ads->ds_ctl3; 386 last_ads->ds_ctl3 = ads->ds_ctl3;
376} 387}
388EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
377 389
378void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds, 390void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
379 u32 aggrLen) 391 u32 aggrLen)
@@ -384,6 +396,7 @@ void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
384 ads->ds_ctl6 &= ~AR_AggrLen; 396 ads->ds_ctl6 &= ~AR_AggrLen;
385 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen); 397 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
386} 398}
399EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
387 400
388void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds, 401void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
389 u32 numDelims) 402 u32 numDelims)
@@ -398,6 +411,7 @@ void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
398 ctl6 |= SM(numDelims, AR_PadDelim); 411 ctl6 |= SM(numDelims, AR_PadDelim);
399 ads->ds_ctl6 = ctl6; 412 ads->ds_ctl6 = ctl6;
400} 413}
414EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
401 415
402void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds) 416void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
403{ 417{
@@ -407,6 +421,7 @@ void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
407 ads->ds_ctl1 &= ~AR_MoreAggr; 421 ads->ds_ctl1 &= ~AR_MoreAggr;
408 ads->ds_ctl6 &= ~AR_PadDelim; 422 ads->ds_ctl6 &= ~AR_PadDelim;
409} 423}
424EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
410 425
411void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds) 426void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
412{ 427{
@@ -414,6 +429,7 @@ void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
414 429
415 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); 430 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
416} 431}
432EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
417 433
418void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds, 434void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
419 u32 burstDuration) 435 u32 burstDuration)
@@ -423,6 +439,7 @@ void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
423 ads->ds_ctl2 &= ~AR_BurstDur; 439 ads->ds_ctl2 &= ~AR_BurstDur;
424 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); 440 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
425} 441}
442EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
426 443
427void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds, 444void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
428 u32 vmf) 445 u32 vmf)
@@ -440,28 +457,30 @@ void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
440 *txqs &= ah->intr_txqs; 457 *txqs &= ah->intr_txqs;
441 ah->intr_txqs &= ~(*txqs); 458 ah->intr_txqs &= ~(*txqs);
442} 459}
460EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
443 461
444bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 462bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
445 const struct ath9k_tx_queue_info *qinfo) 463 const struct ath9k_tx_queue_info *qinfo)
446{ 464{
447 u32 cw; 465 u32 cw;
466 struct ath_common *common = ath9k_hw_common(ah);
448 struct ath9k_hw_capabilities *pCap = &ah->caps; 467 struct ath9k_hw_capabilities *pCap = &ah->caps;
449 struct ath9k_tx_queue_info *qi; 468 struct ath9k_tx_queue_info *qi;
450 469
451 if (q >= pCap->total_queues) { 470 if (q >= pCap->total_queues) {
452 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, " 471 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
453 "invalid queue: %u\n", q); 472 "invalid queue: %u\n", q);
454 return false; 473 return false;
455 } 474 }
456 475
457 qi = &ah->txq[q]; 476 qi = &ah->txq[q];
458 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 477 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
459 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, " 478 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
460 "inactive queue: %u\n", q); 479 "inactive queue: %u\n", q);
461 return false; 480 return false;
462 } 481 }
463 482
464 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 483 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
465 484
466 qi->tqi_ver = qinfo->tqi_ver; 485 qi->tqi_ver = qinfo->tqi_ver;
467 qi->tqi_subtype = qinfo->tqi_subtype; 486 qi->tqi_subtype = qinfo->tqi_subtype;
@@ -510,23 +529,25 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
510 529
511 return true; 530 return true;
512} 531}
532EXPORT_SYMBOL(ath9k_hw_set_txq_props);
513 533
514bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 534bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
515 struct ath9k_tx_queue_info *qinfo) 535 struct ath9k_tx_queue_info *qinfo)
516{ 536{
537 struct ath_common *common = ath9k_hw_common(ah);
517 struct ath9k_hw_capabilities *pCap = &ah->caps; 538 struct ath9k_hw_capabilities *pCap = &ah->caps;
518 struct ath9k_tx_queue_info *qi; 539 struct ath9k_tx_queue_info *qi;
519 540
520 if (q >= pCap->total_queues) { 541 if (q >= pCap->total_queues) {
521 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, " 542 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
522 "invalid queue: %u\n", q); 543 "invalid queue: %u\n", q);
523 return false; 544 return false;
524 } 545 }
525 546
526 qi = &ah->txq[q]; 547 qi = &ah->txq[q];
527 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 548 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
528 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, " 549 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
529 "inactive queue: %u\n", q); 550 "inactive queue: %u\n", q);
530 return false; 551 return false;
531 } 552 }
532 553
@@ -547,10 +568,12 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
547 568
548 return true; 569 return true;
549} 570}
571EXPORT_SYMBOL(ath9k_hw_get_txq_props);
550 572
551int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 573int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
552 const struct ath9k_tx_queue_info *qinfo) 574 const struct ath9k_tx_queue_info *qinfo)
553{ 575{
576 struct ath_common *common = ath9k_hw_common(ah);
554 struct ath9k_tx_queue_info *qi; 577 struct ath9k_tx_queue_info *qi;
555 struct ath9k_hw_capabilities *pCap = &ah->caps; 578 struct ath9k_hw_capabilities *pCap = &ah->caps;
556 int q; 579 int q;
@@ -574,23 +597,23 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
574 ATH9K_TX_QUEUE_INACTIVE) 597 ATH9K_TX_QUEUE_INACTIVE)
575 break; 598 break;
576 if (q == pCap->total_queues) { 599 if (q == pCap->total_queues) {
577 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 600 ath_print(common, ATH_DBG_FATAL,
578 "No available TX queue\n"); 601 "No available TX queue\n");
579 return -1; 602 return -1;
580 } 603 }
581 break; 604 break;
582 default: 605 default:
583 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid TX queue type: %u\n", 606 ath_print(common, ATH_DBG_FATAL,
584 type); 607 "Invalid TX queue type: %u\n", type);
585 return -1; 608 return -1;
586 } 609 }
587 610
588 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 611 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
589 612
590 qi = &ah->txq[q]; 613 qi = &ah->txq[q];
591 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 614 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
592 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 615 ath_print(common, ATH_DBG_FATAL,
593 "TX queue: %u already active\n", q); 616 "TX queue: %u already active\n", q);
594 return -1; 617 return -1;
595 } 618 }
596 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 619 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -613,25 +636,27 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
613 636
614 return q; 637 return q;
615} 638}
639EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
616 640
617bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 641bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
618{ 642{
619 struct ath9k_hw_capabilities *pCap = &ah->caps; 643 struct ath9k_hw_capabilities *pCap = &ah->caps;
644 struct ath_common *common = ath9k_hw_common(ah);
620 struct ath9k_tx_queue_info *qi; 645 struct ath9k_tx_queue_info *qi;
621 646
622 if (q >= pCap->total_queues) { 647 if (q >= pCap->total_queues) {
623 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, " 648 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
624 "invalid queue: %u\n", q); 649 "invalid queue: %u\n", q);
625 return false; 650 return false;
626 } 651 }
627 qi = &ah->txq[q]; 652 qi = &ah->txq[q];
628 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 653 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
629 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, " 654 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
630 "inactive queue: %u\n", q); 655 "inactive queue: %u\n", q);
631 return false; 656 return false;
632 } 657 }
633 658
634 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 659 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
635 660
636 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 661 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
637 ah->txok_interrupt_mask &= ~(1 << q); 662 ah->txok_interrupt_mask &= ~(1 << q);
@@ -643,28 +668,30 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
643 668
644 return true; 669 return true;
645} 670}
671EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
646 672
647bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 673bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
648{ 674{
649 struct ath9k_hw_capabilities *pCap = &ah->caps; 675 struct ath9k_hw_capabilities *pCap = &ah->caps;
676 struct ath_common *common = ath9k_hw_common(ah);
650 struct ath9k_channel *chan = ah->curchan; 677 struct ath9k_channel *chan = ah->curchan;
651 struct ath9k_tx_queue_info *qi; 678 struct ath9k_tx_queue_info *qi;
652 u32 cwMin, chanCwMin, value; 679 u32 cwMin, chanCwMin, value;
653 680
654 if (q >= pCap->total_queues) { 681 if (q >= pCap->total_queues) {
655 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, " 682 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
656 "invalid queue: %u\n", q); 683 "invalid queue: %u\n", q);
657 return false; 684 return false;
658 } 685 }
659 686
660 qi = &ah->txq[q]; 687 qi = &ah->txq[q];
661 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 688 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
662 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, " 689 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
663 "inactive queue: %u\n", q); 690 "inactive queue: %u\n", q);
664 return true; 691 return true;
665 } 692 }
666 693
667 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 694 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
668 695
669 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 696 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
670 if (chan && IS_CHAN_B(chan)) 697 if (chan && IS_CHAN_B(chan))
@@ -799,6 +826,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
799 826
800 return true; 827 return true;
801} 828}
829EXPORT_SYMBOL(ath9k_hw_resettxqueue);
802 830
803int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 831int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
804 u32 pa, struct ath_desc *nds, u64 tsf) 832 u32 pa, struct ath_desc *nds, u64 tsf)
@@ -880,6 +908,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
880 908
881 return 0; 909 return 0;
882} 910}
911EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
883 912
884void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, 913void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
885 u32 size, u32 flags) 914 u32 size, u32 flags)
@@ -895,6 +924,7 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
895 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 924 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
896 memset(&(ads->u), 0, sizeof(ads->u)); 925 memset(&(ads->u), 0, sizeof(ads->u));
897} 926}
927EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
898 928
899bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 929bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
900{ 930{
@@ -911,8 +941,9 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
911 AR_DIAG_RX_ABORT)); 941 AR_DIAG_RX_ABORT));
912 942
913 reg = REG_READ(ah, AR_OBS_BUS_1); 943 reg = REG_READ(ah, AR_OBS_BUS_1);
914 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 944 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
915 "RX failed to go idle in 10 ms RXSM=0x%x\n", reg); 945 "RX failed to go idle in 10 ms RXSM=0x%x\n",
946 reg);
916 947
917 return false; 948 return false;
918 } 949 }
@@ -923,16 +954,19 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
923 954
924 return true; 955 return true;
925} 956}
957EXPORT_SYMBOL(ath9k_hw_setrxabort);
926 958
927void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 959void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
928{ 960{
929 REG_WRITE(ah, AR_RXDP, rxdp); 961 REG_WRITE(ah, AR_RXDP, rxdp);
930} 962}
963EXPORT_SYMBOL(ath9k_hw_putrxbuf);
931 964
932void ath9k_hw_rxena(struct ath_hw *ah) 965void ath9k_hw_rxena(struct ath_hw *ah)
933{ 966{
934 REG_WRITE(ah, AR_CR, AR_CR_RXE); 967 REG_WRITE(ah, AR_CR, AR_CR_RXE);
935} 968}
969EXPORT_SYMBOL(ath9k_hw_rxena);
936 970
937void ath9k_hw_startpcureceive(struct ath_hw *ah) 971void ath9k_hw_startpcureceive(struct ath_hw *ah)
938{ 972{
@@ -942,6 +976,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah)
942 976
943 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 977 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
944} 978}
979EXPORT_SYMBOL(ath9k_hw_startpcureceive);
945 980
946void ath9k_hw_stoppcurecv(struct ath_hw *ah) 981void ath9k_hw_stoppcurecv(struct ath_hw *ah)
947{ 982{
@@ -949,12 +984,13 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
949 984
950 ath9k_hw_disable_mib_counters(ah); 985 ath9k_hw_disable_mib_counters(ah);
951} 986}
987EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
952 988
953bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 989bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
954{ 990{
955#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 991#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
956#define AH_RX_TIME_QUANTUM 100 /* usec */ 992#define AH_RX_TIME_QUANTUM 100 /* usec */
957 993 struct ath_common *common = ath9k_hw_common(ah);
958 int i; 994 int i;
959 995
960 REG_WRITE(ah, AR_CR, AR_CR_RXD); 996 REG_WRITE(ah, AR_CR, AR_CR_RXD);
@@ -967,12 +1003,12 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
967 } 1003 }
968 1004
969 if (i == 0) { 1005 if (i == 0) {
970 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1006 ath_print(common, ATH_DBG_FATAL,
971 "DMA failed to stop in %d ms " 1007 "DMA failed to stop in %d ms "
972 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 1008 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
973 AH_RX_STOP_DMA_TIMEOUT / 1000, 1009 AH_RX_STOP_DMA_TIMEOUT / 1000,
974 REG_READ(ah, AR_CR), 1010 REG_READ(ah, AR_CR),
975 REG_READ(ah, AR_DIAG_SW)); 1011 REG_READ(ah, AR_DIAG_SW));
976 return false; 1012 return false;
977 } else { 1013 } else {
978 return true; 1014 return true;
@@ -981,3 +1017,17 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
981#undef AH_RX_TIME_QUANTUM 1017#undef AH_RX_TIME_QUANTUM
982#undef AH_RX_STOP_DMA_TIMEOUT 1018#undef AH_RX_STOP_DMA_TIMEOUT
983} 1019}
1020EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
1021
1022int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1023{
1024 struct ath9k_tx_queue_info qi;
1025
1026 memset(&qi, 0, sizeof(qi));
1027 qi.tqi_aifs = 1;
1028 qi.tqi_cwmin = 0;
1029 qi.tqi_cwmax = 0;
1030 /* NB: don't enable any interrupts */
1031 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1032}
1033EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index f56e77da6c3e..fefb65dafb1c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -614,16 +614,6 @@ enum ath9k_cipher {
614 ATH9K_CIPHER_MIC = 127 614 ATH9K_CIPHER_MIC = 127
615}; 615};
616 616
617enum ath9k_ht_macmode {
618 ATH9K_HT_MACMODE_20 = 0,
619 ATH9K_HT_MACMODE_2040 = 1,
620};
621
622enum ath9k_ht_extprotspacing {
623 ATH9K_HT_EXTPROTSPACING_20 = 0,
624 ATH9K_HT_EXTPROTSPACING_25 = 1,
625};
626
627struct ath_hw; 617struct ath_hw;
628struct ath9k_channel; 618struct ath9k_channel;
629struct ath_rate_table; 619struct ath_rate_table;
@@ -677,5 +667,6 @@ void ath9k_hw_rxena(struct ath_hw *ah);
677void ath9k_hw_startpcureceive(struct ath_hw *ah); 667void ath9k_hw_startpcureceive(struct ath_hw *ah);
678void ath9k_hw_stoppcurecv(struct ath_hw *ah); 668void ath9k_hw_stoppcurecv(struct ath_hw *ah);
679bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 669bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
670int ath9k_hw_beaconq_setup(struct ath_hw *ah);
680 671
681#endif /* MAC_H */ 672#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 43d2be9867fc..cbf5d2a1bb26 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h"
19 20
20static char *dev_info = "ath9k"; 21static char *dev_info = "ath9k";
21 22
@@ -28,6 +29,10 @@ static int modparam_nohwcrypt;
28module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 29module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
29MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
30 31
32static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33module_param_named(debug, ath9k_debug, uint, 0);
34MODULE_PARM_DESC(debug, "Debugging mask");
35
31/* We use the hw_value as an index into our private channel structure */ 36/* We use the hw_value as an index into our private channel structure */
32 37
33#define CHAN2G(_freq, _idx) { \ 38#define CHAN2G(_freq, _idx) { \
@@ -224,8 +229,9 @@ static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
224 } 229 }
225 sband->n_bitrates++; 230 sband->n_bitrates++;
226 231
227 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n", 232 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
228 rate[i].bitrate / 10, rate[i].hw_value); 233 "Rate: %2dMbps, ratecode: %2d\n",
234 rate[i].bitrate / 10, rate[i].hw_value);
229 } 235 }
230} 236}
231 237
@@ -242,6 +248,51 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
242 return channel; 248 return channel;
243} 249}
244 250
251static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
252{
253 unsigned long flags;
254 bool ret;
255
256 spin_lock_irqsave(&sc->sc_pm_lock, flags);
257 ret = ath9k_hw_setpower(sc->sc_ah, mode);
258 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
259
260 return ret;
261}
262
263void ath9k_ps_wakeup(struct ath_softc *sc)
264{
265 unsigned long flags;
266
267 spin_lock_irqsave(&sc->sc_pm_lock, flags);
268 if (++sc->ps_usecount != 1)
269 goto unlock;
270
271 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
272
273 unlock:
274 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
275}
276
277void ath9k_ps_restore(struct ath_softc *sc)
278{
279 unsigned long flags;
280
281 spin_lock_irqsave(&sc->sc_pm_lock, flags);
282 if (--sc->ps_usecount != 0)
283 goto unlock;
284
285 if (sc->ps_enabled &&
286 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
287 SC_OP_WAIT_FOR_CAB |
288 SC_OP_WAIT_FOR_PSPOLL_DATA |
289 SC_OP_WAIT_FOR_TX_ACK)))
290 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
291
292 unlock:
293 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
294}
295
245/* 296/*
246 * Set/change channels. If the channel is really being changed, it's done 297 * Set/change channels. If the channel is really being changed, it's done
247 * by reseting the chip. To accomplish this we must first cleanup any pending 298 * by reseting the chip. To accomplish this we must first cleanup any pending
@@ -251,6 +302,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
251 struct ath9k_channel *hchan) 302 struct ath9k_channel *hchan)
252{ 303{
253 struct ath_hw *ah = sc->sc_ah; 304 struct ath_hw *ah = sc->sc_ah;
305 struct ath_common *common = ath9k_hw_common(ah);
306 struct ieee80211_conf *conf = &common->hw->conf;
254 bool fastcc = true, stopped; 307 bool fastcc = true, stopped;
255 struct ieee80211_channel *channel = hw->conf.channel; 308 struct ieee80211_channel *channel = hw->conf.channel;
256 int r; 309 int r;
@@ -280,19 +333,19 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
280 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) 333 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
281 fastcc = false; 334 fastcc = false;
282 335
283 DPRINTF(sc, ATH_DBG_CONFIG, 336 ath_print(common, ATH_DBG_CONFIG,
284 "(%u MHz) -> (%u MHz), chanwidth: %d\n", 337 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n",
285 sc->sc_ah->curchan->channel, 338 sc->sc_ah->curchan->channel,
286 channel->center_freq, sc->tx_chan_width); 339 channel->center_freq, conf_is_ht40(conf));
287 340
288 spin_lock_bh(&sc->sc_resetlock); 341 spin_lock_bh(&sc->sc_resetlock);
289 342
290 r = ath9k_hw_reset(ah, hchan, fastcc); 343 r = ath9k_hw_reset(ah, hchan, fastcc);
291 if (r) { 344 if (r) {
292 DPRINTF(sc, ATH_DBG_FATAL, 345 ath_print(common, ATH_DBG_FATAL,
293 "Unable to reset channel (%u Mhz) " 346 "Unable to reset channel (%u Mhz) "
294 "reset status %d\n", 347 "reset status %d\n",
295 channel->center_freq, r); 348 channel->center_freq, r);
296 spin_unlock_bh(&sc->sc_resetlock); 349 spin_unlock_bh(&sc->sc_resetlock);
297 goto ps_restore; 350 goto ps_restore;
298 } 351 }
@@ -301,8 +354,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
301 sc->sc_flags &= ~SC_OP_FULL_RESET; 354 sc->sc_flags &= ~SC_OP_FULL_RESET;
302 355
303 if (ath_startrecv(sc) != 0) { 356 if (ath_startrecv(sc) != 0) {
304 DPRINTF(sc, ATH_DBG_FATAL, 357 ath_print(common, ATH_DBG_FATAL,
305 "Unable to restart recv logic\n"); 358 "Unable to restart recv logic\n");
306 r = -EIO; 359 r = -EIO;
307 goto ps_restore; 360 goto ps_restore;
308 } 361 }
@@ -327,6 +380,7 @@ static void ath_ani_calibrate(unsigned long data)
327{ 380{
328 struct ath_softc *sc = (struct ath_softc *)data; 381 struct ath_softc *sc = (struct ath_softc *)data;
329 struct ath_hw *ah = sc->sc_ah; 382 struct ath_hw *ah = sc->sc_ah;
383 struct ath_common *common = ath9k_hw_common(ah);
330 bool longcal = false; 384 bool longcal = false;
331 bool shortcal = false; 385 bool shortcal = false;
332 bool aniflag = false; 386 bool aniflag = false;
@@ -351,33 +405,34 @@ static void ath_ani_calibrate(unsigned long data)
351 ath9k_ps_wakeup(sc); 405 ath9k_ps_wakeup(sc);
352 406
353 /* Long calibration runs independently of short calibration. */ 407 /* Long calibration runs independently of short calibration. */
354 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { 408 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
355 longcal = true; 409 longcal = true;
356 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 410 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
357 sc->ani.longcal_timer = timestamp; 411 common->ani.longcal_timer = timestamp;
358 } 412 }
359 413
360 /* Short calibration applies only while caldone is false */ 414 /* Short calibration applies only while caldone is false */
361 if (!sc->ani.caldone) { 415 if (!common->ani.caldone) {
362 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) { 416 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
363 shortcal = true; 417 shortcal = true;
364 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies); 418 ath_print(common, ATH_DBG_ANI,
365 sc->ani.shortcal_timer = timestamp; 419 "shortcal @%lu\n", jiffies);
366 sc->ani.resetcal_timer = timestamp; 420 common->ani.shortcal_timer = timestamp;
421 common->ani.resetcal_timer = timestamp;
367 } 422 }
368 } else { 423 } else {
369 if ((timestamp - sc->ani.resetcal_timer) >= 424 if ((timestamp - common->ani.resetcal_timer) >=
370 ATH_RESTART_CALINTERVAL) { 425 ATH_RESTART_CALINTERVAL) {
371 sc->ani.caldone = ath9k_hw_reset_calvalid(ah); 426 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
372 if (sc->ani.caldone) 427 if (common->ani.caldone)
373 sc->ani.resetcal_timer = timestamp; 428 common->ani.resetcal_timer = timestamp;
374 } 429 }
375 } 430 }
376 431
377 /* Verify whether we must check ANI */ 432 /* Verify whether we must check ANI */
378 if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) { 433 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
379 aniflag = true; 434 aniflag = true;
380 sc->ani.checkani_timer = timestamp; 435 common->ani.checkani_timer = timestamp;
381 } 436 }
382 437
383 /* Skip all processing if there's nothing to do. */ 438 /* Skip all processing if there's nothing to do. */
@@ -388,16 +443,21 @@ static void ath_ani_calibrate(unsigned long data)
388 443
389 /* Perform calibration if necessary */ 444 /* Perform calibration if necessary */
390 if (longcal || shortcal) { 445 if (longcal || shortcal) {
391 sc->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan, 446 common->ani.caldone =
392 sc->rx_chainmask, longcal); 447 ath9k_hw_calibrate(ah,
448 ah->curchan,
449 common->rx_chainmask,
450 longcal);
393 451
394 if (longcal) 452 if (longcal)
395 sc->ani.noise_floor = ath9k_hw_getchan_noise(ah, 453 common->ani.noise_floor = ath9k_hw_getchan_noise(ah,
396 ah->curchan); 454 ah->curchan);
397 455
398 DPRINTF(sc, ATH_DBG_ANI," calibrate chan %u/%x nf: %d\n", 456 ath_print(common, ATH_DBG_ANI,
399 ah->curchan->channel, ah->curchan->channelFlags, 457 " calibrate chan %u/%x nf: %d\n",
400 sc->ani.noise_floor); 458 ah->curchan->channel,
459 ah->curchan->channelFlags,
460 common->ani.noise_floor);
401 } 461 }
402 } 462 }
403 463
@@ -413,21 +473,21 @@ set_timer:
413 cal_interval = ATH_LONG_CALINTERVAL; 473 cal_interval = ATH_LONG_CALINTERVAL;
414 if (sc->sc_ah->config.enable_ani) 474 if (sc->sc_ah->config.enable_ani)
415 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL); 475 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
416 if (!sc->ani.caldone) 476 if (!common->ani.caldone)
417 cal_interval = min(cal_interval, (u32)short_cal_interval); 477 cal_interval = min(cal_interval, (u32)short_cal_interval);
418 478
419 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); 479 mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
420} 480}
421 481
422static void ath_start_ani(struct ath_softc *sc) 482static void ath_start_ani(struct ath_common *common)
423{ 483{
424 unsigned long timestamp = jiffies_to_msecs(jiffies); 484 unsigned long timestamp = jiffies_to_msecs(jiffies);
425 485
426 sc->ani.longcal_timer = timestamp; 486 common->ani.longcal_timer = timestamp;
427 sc->ani.shortcal_timer = timestamp; 487 common->ani.shortcal_timer = timestamp;
428 sc->ani.checkani_timer = timestamp; 488 common->ani.checkani_timer = timestamp;
429 489
430 mod_timer(&sc->ani.timer, 490 mod_timer(&common->ani.timer,
431 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL)); 491 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
432} 492}
433 493
@@ -439,17 +499,22 @@ static void ath_start_ani(struct ath_softc *sc)
439 */ 499 */
440void ath_update_chainmask(struct ath_softc *sc, int is_ht) 500void ath_update_chainmask(struct ath_softc *sc, int is_ht)
441{ 501{
502 struct ath_hw *ah = sc->sc_ah;
503 struct ath_common *common = ath9k_hw_common(ah);
504
442 if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || 505 if ((sc->sc_flags & SC_OP_SCANNING) || is_ht ||
443 (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE)) { 506 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
444 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 507 common->tx_chainmask = ah->caps.tx_chainmask;
445 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 508 common->rx_chainmask = ah->caps.rx_chainmask;
446 } else { 509 } else {
447 sc->tx_chainmask = 1; 510 common->tx_chainmask = 1;
448 sc->rx_chainmask = 1; 511 common->rx_chainmask = 1;
449 } 512 }
450 513
451 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n", 514 ath_print(common, ATH_DBG_CONFIG,
452 sc->tx_chainmask, sc->rx_chainmask); 515 "tx chmask: %d, rx chmask: %d\n",
516 common->tx_chainmask,
517 common->rx_chainmask);
453} 518}
454 519
455static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) 520static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -478,6 +543,9 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
478static void ath9k_tasklet(unsigned long data) 543static void ath9k_tasklet(unsigned long data)
479{ 544{
480 struct ath_softc *sc = (struct ath_softc *)data; 545 struct ath_softc *sc = (struct ath_softc *)data;
546 struct ath_hw *ah = sc->sc_ah;
547 struct ath_common *common = ath9k_hw_common(ah);
548
481 u32 status = sc->intrstatus; 549 u32 status = sc->intrstatus;
482 550
483 ath9k_ps_wakeup(sc); 551 ath9k_ps_wakeup(sc);
@@ -502,16 +570,17 @@ static void ath9k_tasklet(unsigned long data)
502 * TSF sync does not look correct; remain awake to sync with 570 * TSF sync does not look correct; remain awake to sync with
503 * the next Beacon. 571 * the next Beacon.
504 */ 572 */
505 DPRINTF(sc, ATH_DBG_PS, "TSFOOR - Sync with next Beacon\n"); 573 ath_print(common, ATH_DBG_PS,
574 "TSFOOR - Sync with next Beacon\n");
506 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC; 575 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
507 } 576 }
508 577
509 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 578 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
510 if (status & ATH9K_INT_GENTIMER) 579 if (status & ATH9K_INT_GENTIMER)
511 ath_gen_timer_isr(sc->sc_ah); 580 ath_gen_timer_isr(sc->sc_ah);
512 581
513 /* re-enable hardware interrupt */ 582 /* re-enable hardware interrupt */
514 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 583 ath9k_hw_set_interrupts(ah, sc->imask);
515 ath9k_ps_restore(sc); 584 ath9k_ps_restore(sc);
516} 585}
517 586
@@ -602,7 +671,7 @@ irqreturn_t ath_isr(int irq, void *dev)
602 if (status & ATH9K_INT_TIM_TIMER) { 671 if (status & ATH9K_INT_TIM_TIMER) {
603 /* Clear RxAbort bit so that we can 672 /* Clear RxAbort bit so that we can
604 * receive frames */ 673 * receive frames */
605 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 674 ath9k_setpower(sc, ATH9K_PM_AWAKE);
606 ath9k_hw_setrxabort(sc->sc_ah, 0); 675 ath9k_hw_setrxabort(sc->sc_ah, 0);
607 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON; 676 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
608 } 677 }
@@ -664,10 +733,11 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
664 return chanmode; 733 return chanmode;
665} 734}
666 735
667static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key, 736static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
668 struct ath9k_keyval *hk, const u8 *addr, 737 struct ath9k_keyval *hk, const u8 *addr,
669 bool authenticator) 738 bool authenticator)
670{ 739{
740 struct ath_hw *ah = common->ah;
671 const u8 *key_rxmic; 741 const u8 *key_rxmic;
672 const u8 *key_txmic; 742 const u8 *key_txmic;
673 743
@@ -687,42 +757,42 @@ static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
687 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 757 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
688 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic)); 758 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
689 } 759 }
690 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr); 760 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
691 } 761 }
692 if (!sc->splitmic) { 762 if (!common->splitmic) {
693 /* TX and RX keys share the same key cache entry. */ 763 /* TX and RX keys share the same key cache entry. */
694 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 764 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
695 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); 765 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
696 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr); 766 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
697 } 767 }
698 768
699 /* Separate key cache entries for TX and RX */ 769 /* Separate key cache entries for TX and RX */
700 770
701 /* TX key goes at first index, RX key at +32. */ 771 /* TX key goes at first index, RX key at +32. */
702 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); 772 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
703 if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) { 773 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
704 /* TX MIC entry failed. No need to proceed further */ 774 /* TX MIC entry failed. No need to proceed further */
705 DPRINTF(sc, ATH_DBG_FATAL, 775 ath_print(common, ATH_DBG_FATAL,
706 "Setting TX MIC Key Failed\n"); 776 "Setting TX MIC Key Failed\n");
707 return 0; 777 return 0;
708 } 778 }
709 779
710 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); 780 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
711 /* XXX delete tx key on failure? */ 781 /* XXX delete tx key on failure? */
712 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix + 32, hk, addr); 782 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
713} 783}
714 784
715static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc) 785static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
716{ 786{
717 int i; 787 int i;
718 788
719 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) { 789 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
720 if (test_bit(i, sc->keymap) || 790 if (test_bit(i, common->keymap) ||
721 test_bit(i + 64, sc->keymap)) 791 test_bit(i + 64, common->keymap))
722 continue; /* At least one part of TKIP key allocated */ 792 continue; /* At least one part of TKIP key allocated */
723 if (sc->splitmic && 793 if (common->splitmic &&
724 (test_bit(i + 32, sc->keymap) || 794 (test_bit(i + 32, common->keymap) ||
725 test_bit(i + 64 + 32, sc->keymap))) 795 test_bit(i + 64 + 32, common->keymap)))
726 continue; /* At least one part of TKIP key allocated */ 796 continue; /* At least one part of TKIP key allocated */
727 797
728 /* Found a free slot for a TKIP key */ 798 /* Found a free slot for a TKIP key */
@@ -731,60 +801,60 @@ static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
731 return -1; 801 return -1;
732} 802}
733 803
734static int ath_reserve_key_cache_slot(struct ath_softc *sc) 804static int ath_reserve_key_cache_slot(struct ath_common *common)
735{ 805{
736 int i; 806 int i;
737 807
738 /* First, try to find slots that would not be available for TKIP. */ 808 /* First, try to find slots that would not be available for TKIP. */
739 if (sc->splitmic) { 809 if (common->splitmic) {
740 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) { 810 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
741 if (!test_bit(i, sc->keymap) && 811 if (!test_bit(i, common->keymap) &&
742 (test_bit(i + 32, sc->keymap) || 812 (test_bit(i + 32, common->keymap) ||
743 test_bit(i + 64, sc->keymap) || 813 test_bit(i + 64, common->keymap) ||
744 test_bit(i + 64 + 32, sc->keymap))) 814 test_bit(i + 64 + 32, common->keymap)))
745 return i; 815 return i;
746 if (!test_bit(i + 32, sc->keymap) && 816 if (!test_bit(i + 32, common->keymap) &&
747 (test_bit(i, sc->keymap) || 817 (test_bit(i, common->keymap) ||
748 test_bit(i + 64, sc->keymap) || 818 test_bit(i + 64, common->keymap) ||
749 test_bit(i + 64 + 32, sc->keymap))) 819 test_bit(i + 64 + 32, common->keymap)))
750 return i + 32; 820 return i + 32;
751 if (!test_bit(i + 64, sc->keymap) && 821 if (!test_bit(i + 64, common->keymap) &&
752 (test_bit(i , sc->keymap) || 822 (test_bit(i , common->keymap) ||
753 test_bit(i + 32, sc->keymap) || 823 test_bit(i + 32, common->keymap) ||
754 test_bit(i + 64 + 32, sc->keymap))) 824 test_bit(i + 64 + 32, common->keymap)))
755 return i + 64; 825 return i + 64;
756 if (!test_bit(i + 64 + 32, sc->keymap) && 826 if (!test_bit(i + 64 + 32, common->keymap) &&
757 (test_bit(i, sc->keymap) || 827 (test_bit(i, common->keymap) ||
758 test_bit(i + 32, sc->keymap) || 828 test_bit(i + 32, common->keymap) ||
759 test_bit(i + 64, sc->keymap))) 829 test_bit(i + 64, common->keymap)))
760 return i + 64 + 32; 830 return i + 64 + 32;
761 } 831 }
762 } else { 832 } else {
763 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) { 833 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
764 if (!test_bit(i, sc->keymap) && 834 if (!test_bit(i, common->keymap) &&
765 test_bit(i + 64, sc->keymap)) 835 test_bit(i + 64, common->keymap))
766 return i; 836 return i;
767 if (test_bit(i, sc->keymap) && 837 if (test_bit(i, common->keymap) &&
768 !test_bit(i + 64, sc->keymap)) 838 !test_bit(i + 64, common->keymap))
769 return i + 64; 839 return i + 64;
770 } 840 }
771 } 841 }
772 842
773 /* No partially used TKIP slots, pick any available slot */ 843 /* No partially used TKIP slots, pick any available slot */
774 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) { 844 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
775 /* Do not allow slots that could be needed for TKIP group keys 845 /* Do not allow slots that could be needed for TKIP group keys
776 * to be used. This limitation could be removed if we know that 846 * to be used. This limitation could be removed if we know that
777 * TKIP will not be used. */ 847 * TKIP will not be used. */
778 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID) 848 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
779 continue; 849 continue;
780 if (sc->splitmic) { 850 if (common->splitmic) {
781 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID) 851 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
782 continue; 852 continue;
783 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID) 853 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
784 continue; 854 continue;
785 } 855 }
786 856
787 if (!test_bit(i, sc->keymap)) 857 if (!test_bit(i, common->keymap))
788 return i; /* Found a free slot for a key */ 858 return i; /* Found a free slot for a key */
789 } 859 }
790 860
@@ -792,11 +862,12 @@ static int ath_reserve_key_cache_slot(struct ath_softc *sc)
792 return -1; 862 return -1;
793} 863}
794 864
795static int ath_key_config(struct ath_softc *sc, 865static int ath_key_config(struct ath_common *common,
796 struct ieee80211_vif *vif, 866 struct ieee80211_vif *vif,
797 struct ieee80211_sta *sta, 867 struct ieee80211_sta *sta,
798 struct ieee80211_key_conf *key) 868 struct ieee80211_key_conf *key)
799{ 869{
870 struct ath_hw *ah = common->ah;
800 struct ath9k_keyval hk; 871 struct ath9k_keyval hk;
801 const u8 *mac = NULL; 872 const u8 *mac = NULL;
802 int ret = 0; 873 int ret = 0;
@@ -842,54 +913,57 @@ static int ath_key_config(struct ath_softc *sc,
842 mac = sta->addr; 913 mac = sta->addr;
843 914
844 if (key->alg == ALG_TKIP) 915 if (key->alg == ALG_TKIP)
845 idx = ath_reserve_key_cache_slot_tkip(sc); 916 idx = ath_reserve_key_cache_slot_tkip(common);
846 else 917 else
847 idx = ath_reserve_key_cache_slot(sc); 918 idx = ath_reserve_key_cache_slot(common);
848 if (idx < 0) 919 if (idx < 0)
849 return -ENOSPC; /* no free key cache entries */ 920 return -ENOSPC; /* no free key cache entries */
850 } 921 }
851 922
852 if (key->alg == ALG_TKIP) 923 if (key->alg == ALG_TKIP)
853 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac, 924 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
854 vif->type == NL80211_IFTYPE_AP); 925 vif->type == NL80211_IFTYPE_AP);
855 else 926 else
856 ret = ath9k_hw_set_keycache_entry(sc->sc_ah, idx, &hk, mac); 927 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
857 928
858 if (!ret) 929 if (!ret)
859 return -EIO; 930 return -EIO;
860 931
861 set_bit(idx, sc->keymap); 932 set_bit(idx, common->keymap);
862 if (key->alg == ALG_TKIP) { 933 if (key->alg == ALG_TKIP) {
863 set_bit(idx + 64, sc->keymap); 934 set_bit(idx + 64, common->keymap);
864 if (sc->splitmic) { 935 if (common->splitmic) {
865 set_bit(idx + 32, sc->keymap); 936 set_bit(idx + 32, common->keymap);
866 set_bit(idx + 64 + 32, sc->keymap); 937 set_bit(idx + 64 + 32, common->keymap);
867 } 938 }
868 } 939 }
869 940
870 return idx; 941 return idx;
871} 942}
872 943
873static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key) 944static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
874{ 945{
875 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx); 946 struct ath_hw *ah = common->ah;
947
948 ath9k_hw_keyreset(ah, key->hw_key_idx);
876 if (key->hw_key_idx < IEEE80211_WEP_NKID) 949 if (key->hw_key_idx < IEEE80211_WEP_NKID)
877 return; 950 return;
878 951
879 clear_bit(key->hw_key_idx, sc->keymap); 952 clear_bit(key->hw_key_idx, common->keymap);
880 if (key->alg != ALG_TKIP) 953 if (key->alg != ALG_TKIP)
881 return; 954 return;
882 955
883 clear_bit(key->hw_key_idx + 64, sc->keymap); 956 clear_bit(key->hw_key_idx + 64, common->keymap);
884 if (sc->splitmic) { 957 if (common->splitmic) {
885 clear_bit(key->hw_key_idx + 32, sc->keymap); 958 clear_bit(key->hw_key_idx + 32, common->keymap);
886 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap); 959 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
887 } 960 }
888} 961}
889 962
890static void setup_ht_cap(struct ath_softc *sc, 963static void setup_ht_cap(struct ath_softc *sc,
891 struct ieee80211_sta_ht_cap *ht_info) 964 struct ieee80211_sta_ht_cap *ht_info)
892{ 965{
966 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
893 u8 tx_streams, rx_streams; 967 u8 tx_streams, rx_streams;
894 968
895 ht_info->ht_supported = true; 969 ht_info->ht_supported = true;
@@ -903,12 +977,15 @@ static void setup_ht_cap(struct ath_softc *sc,
903 977
904 /* set up supported mcs set */ 978 /* set up supported mcs set */
905 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 979 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
906 tx_streams = !(sc->tx_chainmask & (sc->tx_chainmask - 1)) ? 1 : 2; 980 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
907 rx_streams = !(sc->rx_chainmask & (sc->rx_chainmask - 1)) ? 1 : 2; 981 1 : 2;
982 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
983 1 : 2;
908 984
909 if (tx_streams != rx_streams) { 985 if (tx_streams != rx_streams) {
910 DPRINTF(sc, ATH_DBG_CONFIG, "TX streams %d, RX streams: %d\n", 986 ath_print(common, ATH_DBG_CONFIG,
911 tx_streams, rx_streams); 987 "TX streams %d, RX streams: %d\n",
988 tx_streams, rx_streams);
912 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 989 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
913 ht_info->mcs.tx_params |= ((tx_streams - 1) << 990 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
914 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 991 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
@@ -925,14 +1002,17 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
925 struct ieee80211_vif *vif, 1002 struct ieee80211_vif *vif,
926 struct ieee80211_bss_conf *bss_conf) 1003 struct ieee80211_bss_conf *bss_conf)
927{ 1004{
1005 struct ath_hw *ah = sc->sc_ah;
1006 struct ath_common *common = ath9k_hw_common(ah);
928 1007
929 if (bss_conf->assoc) { 1008 if (bss_conf->assoc) {
930 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n", 1009 ath_print(common, ATH_DBG_CONFIG,
931 bss_conf->aid, sc->curbssid); 1010 "Bss Info ASSOC %d, bssid: %pM\n",
1011 bss_conf->aid, common->curbssid);
932 1012
933 /* New association, store aid */ 1013 /* New association, store aid */
934 sc->curaid = bss_conf->aid; 1014 common->curaid = bss_conf->aid;
935 ath9k_hw_write_associd(sc); 1015 ath9k_hw_write_associd(ah);
936 1016
937 /* 1017 /*
938 * Request a re-configuration of Beacon related timers 1018 * Request a re-configuration of Beacon related timers
@@ -947,12 +1027,12 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
947 /* Reset rssi stats */ 1027 /* Reset rssi stats */
948 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1028 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
949 1029
950 ath_start_ani(sc); 1030 ath_start_ani(common);
951 } else { 1031 } else {
952 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n"); 1032 ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
953 sc->curaid = 0; 1033 common->curaid = 0;
954 /* Stop ANI */ 1034 /* Stop ANI */
955 del_timer_sync(&sc->ani.timer); 1035 del_timer_sync(&common->ani.timer);
956 } 1036 }
957} 1037}
958 1038
@@ -1042,8 +1122,8 @@ static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1042 1122
1043 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev); 1123 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1044 if (ret) 1124 if (ret)
1045 DPRINTF(sc, ATH_DBG_FATAL, 1125 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1046 "Failed to register led:%s", led->name); 1126 "Failed to register led:%s", led->name);
1047 else 1127 else
1048 led->registered = 1; 1128 led->registered = 1;
1049 return ret; 1129 return ret;
@@ -1124,10 +1204,11 @@ fail:
1124 ath_deinit_leds(sc); 1204 ath_deinit_leds(sc);
1125} 1205}
1126 1206
1127void ath_radio_enable(struct ath_softc *sc) 1207void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
1128{ 1208{
1129 struct ath_hw *ah = sc->sc_ah; 1209 struct ath_hw *ah = sc->sc_ah;
1130 struct ieee80211_channel *channel = sc->hw->conf.channel; 1210 struct ath_common *common = ath9k_hw_common(ah);
1211 struct ieee80211_channel *channel = hw->conf.channel;
1131 int r; 1212 int r;
1132 1213
1133 ath9k_ps_wakeup(sc); 1214 ath9k_ps_wakeup(sc);
@@ -1139,17 +1220,17 @@ void ath_radio_enable(struct ath_softc *sc)
1139 spin_lock_bh(&sc->sc_resetlock); 1220 spin_lock_bh(&sc->sc_resetlock);
1140 r = ath9k_hw_reset(ah, ah->curchan, false); 1221 r = ath9k_hw_reset(ah, ah->curchan, false);
1141 if (r) { 1222 if (r) {
1142 DPRINTF(sc, ATH_DBG_FATAL, 1223 ath_print(common, ATH_DBG_FATAL,
1143 "Unable to reset channel %u (%uMhz) ", 1224 "Unable to reset channel %u (%uMhz) ",
1144 "reset status %d\n", 1225 "reset status %d\n",
1145 channel->center_freq, r); 1226 channel->center_freq, r);
1146 } 1227 }
1147 spin_unlock_bh(&sc->sc_resetlock); 1228 spin_unlock_bh(&sc->sc_resetlock);
1148 1229
1149 ath_update_txpow(sc); 1230 ath_update_txpow(sc);
1150 if (ath_startrecv(sc) != 0) { 1231 if (ath_startrecv(sc) != 0) {
1151 DPRINTF(sc, ATH_DBG_FATAL, 1232 ath_print(common, ATH_DBG_FATAL,
1152 "Unable to restart recv logic\n"); 1233 "Unable to restart recv logic\n");
1153 return; 1234 return;
1154 } 1235 }
1155 1236
@@ -1164,18 +1245,18 @@ void ath_radio_enable(struct ath_softc *sc)
1164 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 1245 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1165 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 1246 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1166 1247
1167 ieee80211_wake_queues(sc->hw); 1248 ieee80211_wake_queues(hw);
1168 ath9k_ps_restore(sc); 1249 ath9k_ps_restore(sc);
1169} 1250}
1170 1251
1171void ath_radio_disable(struct ath_softc *sc) 1252void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1172{ 1253{
1173 struct ath_hw *ah = sc->sc_ah; 1254 struct ath_hw *ah = sc->sc_ah;
1174 struct ieee80211_channel *channel = sc->hw->conf.channel; 1255 struct ieee80211_channel *channel = hw->conf.channel;
1175 int r; 1256 int r;
1176 1257
1177 ath9k_ps_wakeup(sc); 1258 ath9k_ps_wakeup(sc);
1178 ieee80211_stop_queues(sc->hw); 1259 ieee80211_stop_queues(hw);
1179 1260
1180 /* Disable LED */ 1261 /* Disable LED */
1181 ath9k_hw_set_gpio(ah, ah->led_pin, 1); 1262 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
@@ -1189,22 +1270,22 @@ void ath_radio_disable(struct ath_softc *sc)
1189 ath_flushrecv(sc); /* flush recv queue */ 1270 ath_flushrecv(sc); /* flush recv queue */
1190 1271
1191 if (!ah->curchan) 1272 if (!ah->curchan)
1192 ah->curchan = ath_get_curchannel(sc, sc->hw); 1273 ah->curchan = ath_get_curchannel(sc, hw);
1193 1274
1194 spin_lock_bh(&sc->sc_resetlock); 1275 spin_lock_bh(&sc->sc_resetlock);
1195 r = ath9k_hw_reset(ah, ah->curchan, false); 1276 r = ath9k_hw_reset(ah, ah->curchan, false);
1196 if (r) { 1277 if (r) {
1197 DPRINTF(sc, ATH_DBG_FATAL, 1278 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1198 "Unable to reset channel %u (%uMhz) " 1279 "Unable to reset channel %u (%uMhz) "
1199 "reset status %d\n", 1280 "reset status %d\n",
1200 channel->center_freq, r); 1281 channel->center_freq, r);
1201 } 1282 }
1202 spin_unlock_bh(&sc->sc_resetlock); 1283 spin_unlock_bh(&sc->sc_resetlock);
1203 1284
1204 ath9k_hw_phy_disable(ah); 1285 ath9k_hw_phy_disable(ah);
1205 ath9k_hw_configpcipowersave(ah, 1, 1); 1286 ath9k_hw_configpcipowersave(ah, 1, 1);
1206 ath9k_ps_restore(sc); 1287 ath9k_ps_restore(sc);
1207 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1288 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1208} 1289}
1209 1290
1210/*******************/ 1291/*******************/
@@ -1236,23 +1317,26 @@ static void ath_start_rfkill_poll(struct ath_softc *sc)
1236 wiphy_rfkill_start_polling(sc->hw->wiphy); 1317 wiphy_rfkill_start_polling(sc->hw->wiphy);
1237} 1318}
1238 1319
1239void ath_cleanup(struct ath_softc *sc) 1320static void ath9k_uninit_hw(struct ath_softc *sc)
1240{ 1321{
1241 ath_detach(sc); 1322 struct ath_hw *ah = sc->sc_ah;
1242 free_irq(sc->irq, sc); 1323
1243 ath_bus_cleanup(sc); 1324 BUG_ON(!ah);
1244 kfree(sc->sec_wiphy); 1325
1245 ieee80211_free_hw(sc->hw); 1326 ath9k_exit_debug(ah);
1327 ath9k_hw_detach(ah);
1328 sc->sc_ah = NULL;
1246} 1329}
1247 1330
1248void ath_detach(struct ath_softc *sc) 1331static void ath_clean_core(struct ath_softc *sc)
1249{ 1332{
1250 struct ieee80211_hw *hw = sc->hw; 1333 struct ieee80211_hw *hw = sc->hw;
1334 struct ath_hw *ah = sc->sc_ah;
1251 int i = 0; 1335 int i = 0;
1252 1336
1253 ath9k_ps_wakeup(sc); 1337 ath9k_ps_wakeup(sc);
1254 1338
1255 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); 1339 dev_dbg(sc->dev, "Detach ATH hw\n");
1256 1340
1257 ath_deinit_leds(sc); 1341 ath_deinit_leds(sc);
1258 wiphy_rfkill_stop_polling(sc->hw->wiphy); 1342 wiphy_rfkill_stop_polling(sc->hw->wiphy);
@@ -1273,20 +1357,36 @@ void ath_detach(struct ath_softc *sc)
1273 tasklet_kill(&sc->bcon_tasklet); 1357 tasklet_kill(&sc->bcon_tasklet);
1274 1358
1275 if (!(sc->sc_flags & SC_OP_INVALID)) 1359 if (!(sc->sc_flags & SC_OP_INVALID))
1276 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 1360 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1277 1361
1278 /* cleanup tx queues */ 1362 /* cleanup tx queues */
1279 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1363 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1280 if (ATH_TXQ_SETUP(sc, i)) 1364 if (ATH_TXQ_SETUP(sc, i))
1281 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1365 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1282 1366
1283 if ((sc->btcoex_info.no_stomp_timer) && 1367 if ((sc->btcoex.no_stomp_timer) &&
1284 sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 1368 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1285 ath_gen_timer_free(sc->sc_ah, sc->btcoex_info.no_stomp_timer); 1369 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1370}
1286 1371
1287 ath9k_hw_detach(sc->sc_ah); 1372void ath_detach(struct ath_softc *sc)
1288 sc->sc_ah = NULL; 1373{
1289 ath9k_exit_debug(sc); 1374 ath_clean_core(sc);
1375 ath9k_uninit_hw(sc);
1376}
1377
1378void ath_cleanup(struct ath_softc *sc)
1379{
1380 struct ath_hw *ah = sc->sc_ah;
1381 struct ath_common *common = ath9k_hw_common(ah);
1382
1383 ath_clean_core(sc);
1384 free_irq(sc->irq, sc);
1385 ath_bus_cleanup(common);
1386 kfree(sc->sec_wiphy);
1387 ieee80211_free_hw(sc->hw);
1388
1389 ath9k_uninit_hw(sc);
1290} 1390}
1291 1391
1292static int ath9k_reg_notifier(struct wiphy *wiphy, 1392static int ath9k_reg_notifier(struct wiphy *wiphy,
@@ -1295,29 +1395,245 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
1295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 1395 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1296 struct ath_wiphy *aphy = hw->priv; 1396 struct ath_wiphy *aphy = hw->priv;
1297 struct ath_softc *sc = aphy->sc; 1397 struct ath_softc *sc = aphy->sc;
1298 struct ath_regulatory *reg = &sc->common.regulatory; 1398 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1299 1399
1300 return ath_reg_notifier_apply(wiphy, request, reg); 1400 return ath_reg_notifier_apply(wiphy, request, reg);
1301} 1401}
1302 1402
1303/* 1403/*
1404 * Detects if there is any priority bt traffic
1405 */
1406static void ath_detect_bt_priority(struct ath_softc *sc)
1407{
1408 struct ath_btcoex *btcoex = &sc->btcoex;
1409 struct ath_hw *ah = sc->sc_ah;
1410
1411 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
1412 btcoex->bt_priority_cnt++;
1413
1414 if (time_after(jiffies, btcoex->bt_priority_time +
1415 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
1416 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
1417 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
1418 "BT priority traffic detected");
1419 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
1420 } else {
1421 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
1422 }
1423
1424 btcoex->bt_priority_cnt = 0;
1425 btcoex->bt_priority_time = jiffies;
1426 }
1427}
1428
1429/*
1430 * Configures appropriate weight based on stomp type.
1431 */
1432static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
1433 enum ath_stomp_type stomp_type)
1434{
1435 struct ath_hw *ah = sc->sc_ah;
1436
1437 switch (stomp_type) {
1438 case ATH_BTCOEX_STOMP_ALL:
1439 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1440 AR_STOMP_ALL_WLAN_WGHT);
1441 break;
1442 case ATH_BTCOEX_STOMP_LOW:
1443 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1444 AR_STOMP_LOW_WLAN_WGHT);
1445 break;
1446 case ATH_BTCOEX_STOMP_NONE:
1447 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1448 AR_STOMP_NONE_WLAN_WGHT);
1449 break;
1450 default:
1451 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1452 "Invalid Stomptype\n");
1453 break;
1454 }
1455
1456 ath9k_hw_btcoex_enable(ah);
1457}
1458
1459static void ath9k_gen_timer_start(struct ath_hw *ah,
1460 struct ath_gen_timer *timer,
1461 u32 timer_next,
1462 u32 timer_period)
1463{
1464 struct ath_common *common = ath9k_hw_common(ah);
1465 struct ath_softc *sc = (struct ath_softc *) common->priv;
1466
1467 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
1468
1469 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
1470 ath9k_hw_set_interrupts(ah, 0);
1471 sc->imask |= ATH9K_INT_GENTIMER;
1472 ath9k_hw_set_interrupts(ah, sc->imask);
1473 }
1474}
1475
1476static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
1477{
1478 struct ath_common *common = ath9k_hw_common(ah);
1479 struct ath_softc *sc = (struct ath_softc *) common->priv;
1480 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
1481
1482 ath9k_hw_gen_timer_stop(ah, timer);
1483
1484 /* if no timer is enabled, turn off interrupt mask */
1485 if (timer_table->timer_mask.val == 0) {
1486 ath9k_hw_set_interrupts(ah, 0);
1487 sc->imask &= ~ATH9K_INT_GENTIMER;
1488 ath9k_hw_set_interrupts(ah, sc->imask);
1489 }
1490}
1491
1492/*
1493 * This is the master bt coex timer which runs for every
1494 * 45ms, bt traffic will be given priority during 55% of this
1495 * period while wlan gets remaining 45%
1496 */
1497static void ath_btcoex_period_timer(unsigned long data)
1498{
1499 struct ath_softc *sc = (struct ath_softc *) data;
1500 struct ath_hw *ah = sc->sc_ah;
1501 struct ath_btcoex *btcoex = &sc->btcoex;
1502
1503 ath_detect_bt_priority(sc);
1504
1505 spin_lock_bh(&btcoex->btcoex_lock);
1506
1507 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
1508
1509 spin_unlock_bh(&btcoex->btcoex_lock);
1510
1511 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
1512 if (btcoex->hw_timer_enabled)
1513 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
1514
1515 ath9k_gen_timer_start(ah,
1516 btcoex->no_stomp_timer,
1517 (ath9k_hw_gettsf32(ah) +
1518 btcoex->btcoex_no_stomp),
1519 btcoex->btcoex_no_stomp * 10);
1520 btcoex->hw_timer_enabled = true;
1521 }
1522
1523 mod_timer(&btcoex->period_timer, jiffies +
1524 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
1525}
1526
1527/*
1528 * Generic tsf based hw timer which configures weight
1529 * registers to time slice between wlan and bt traffic
1530 */
1531static void ath_btcoex_no_stomp_timer(void *arg)
1532{
1533 struct ath_softc *sc = (struct ath_softc *)arg;
1534 struct ath_hw *ah = sc->sc_ah;
1535 struct ath_btcoex *btcoex = &sc->btcoex;
1536
1537 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1538 "no stomp timer running \n");
1539
1540 spin_lock_bh(&btcoex->btcoex_lock);
1541
1542 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
1543 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
1544 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
1545 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
1546
1547 spin_unlock_bh(&btcoex->btcoex_lock);
1548}
1549
1550static int ath_init_btcoex_timer(struct ath_softc *sc)
1551{
1552 struct ath_btcoex *btcoex = &sc->btcoex;
1553
1554 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
1555 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
1556 btcoex->btcoex_period / 100;
1557
1558 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
1559 (unsigned long) sc);
1560
1561 spin_lock_init(&btcoex->btcoex_lock);
1562
1563 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
1564 ath_btcoex_no_stomp_timer,
1565 ath_btcoex_no_stomp_timer,
1566 (void *) sc, AR_FIRST_NDP_TIMER);
1567
1568 if (!btcoex->no_stomp_timer)
1569 return -ENOMEM;
1570
1571 return 0;
1572}
1573
1574/*
1575 * Read and write, they both share the same lock. We do this to serialize
1576 * reads and writes on Atheros 802.11n PCI devices only. This is required
1577 * as the FIFO on these devices can only accept sanely 2 requests. After
1578 * that the device goes bananas. Serializing the reads/writes prevents this
1579 * from happening.
1580 */
1581
1582static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1583{
1584 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1585 struct ath_common *common = ath9k_hw_common(ah);
1586 struct ath_softc *sc = (struct ath_softc *) common->priv;
1587
1588 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1589 unsigned long flags;
1590 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1591 iowrite32(val, sc->mem + reg_offset);
1592 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1593 } else
1594 iowrite32(val, sc->mem + reg_offset);
1595}
1596
1597static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1598{
1599 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1600 struct ath_common *common = ath9k_hw_common(ah);
1601 struct ath_softc *sc = (struct ath_softc *) common->priv;
1602 u32 val;
1603
1604 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1605 unsigned long flags;
1606 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1607 val = ioread32(sc->mem + reg_offset);
1608 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1609 } else
1610 val = ioread32(sc->mem + reg_offset);
1611 return val;
1612}
1613
1614static const struct ath_ops ath9k_common_ops = {
1615 .read = ath9k_ioread32,
1616 .write = ath9k_iowrite32,
1617};
1618
1619/*
1304 * Initialize and fill ath_softc, ath_sofct is the 1620 * Initialize and fill ath_softc, ath_sofct is the
1305 * "Software Carrier" struct. Historically it has existed 1621 * "Software Carrier" struct. Historically it has existed
1306 * to allow the separation between hardware specific 1622 * to allow the separation between hardware specific
1307 * variables (now in ath_hw) and driver specific variables. 1623 * variables (now in ath_hw) and driver specific variables.
1308 */ 1624 */
1309static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid) 1625static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1626 const struct ath_bus_ops *bus_ops)
1310{ 1627{
1311 struct ath_hw *ah = NULL; 1628 struct ath_hw *ah = NULL;
1629 struct ath_common *common;
1312 int r = 0, i; 1630 int r = 0, i;
1313 int csz = 0; 1631 int csz = 0;
1632 int qnum;
1314 1633
1315 /* XXX: hardware will not be ready until ath_open() being called */ 1634 /* XXX: hardware will not be ready until ath_open() being called */
1316 sc->sc_flags |= SC_OP_INVALID; 1635 sc->sc_flags |= SC_OP_INVALID;
1317 1636
1318 if (ath9k_init_debug(sc) < 0)
1319 printk(KERN_ERR "Unable to create debugfs files\n");
1320
1321 spin_lock_init(&sc->wiphy_lock); 1637 spin_lock_init(&sc->wiphy_lock);
1322 spin_lock_init(&sc->sc_resetlock); 1638 spin_lock_init(&sc->sc_resetlock);
1323 spin_lock_init(&sc->sc_serial_rw); 1639 spin_lock_init(&sc->sc_serial_rw);
@@ -1328,47 +1644,58 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1328 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 1644 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1329 (unsigned long)sc); 1645 (unsigned long)sc);
1330 1646
1331 /*
1332 * Cache line size is used to size and align various
1333 * structures used to communicate with the hardware.
1334 */
1335 ath_read_cachesize(sc, &csz);
1336 /* XXX assert csz is non-zero */
1337 sc->common.cachelsz = csz << 2; /* convert to bytes */
1338
1339 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 1647 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1340 if (!ah) { 1648 if (!ah)
1341 r = -ENOMEM; 1649 return -ENOMEM;
1342 goto bad_no_ah;
1343 }
1344 1650
1345 ah->ah_sc = sc;
1346 ah->hw_version.devid = devid; 1651 ah->hw_version.devid = devid;
1347 ah->hw_version.subsysid = subsysid; 1652 ah->hw_version.subsysid = subsysid;
1348 sc->sc_ah = ah; 1653 sc->sc_ah = ah;
1349 1654
1655 common = ath9k_hw_common(ah);
1656 common->ops = &ath9k_common_ops;
1657 common->bus_ops = bus_ops;
1658 common->ah = ah;
1659 common->hw = sc->hw;
1660 common->priv = sc;
1661 common->debug_mask = ath9k_debug;
1662
1663 /*
1664 * Cache line size is used to size and align various
1665 * structures used to communicate with the hardware.
1666 */
1667 ath_read_cachesize(common, &csz);
1668 /* XXX assert csz is non-zero */
1669 common->cachelsz = csz << 2; /* convert to bytes */
1670
1350 r = ath9k_hw_init(ah); 1671 r = ath9k_hw_init(ah);
1351 if (r) { 1672 if (r) {
1352 DPRINTF(sc, ATH_DBG_FATAL, 1673 ath_print(common, ATH_DBG_FATAL,
1353 "Unable to initialize hardware; " 1674 "Unable to initialize hardware; "
1354 "initialization status: %d\n", r); 1675 "initialization status: %d\n", r);
1355 goto bad; 1676 goto bad_free_hw;
1677 }
1678
1679 if (ath9k_init_debug(ah) < 0) {
1680 ath_print(common, ATH_DBG_FATAL,
1681 "Unable to create debugfs files\n");
1682 goto bad_free_hw;
1356 } 1683 }
1357 1684
1358 /* Get the hardware key cache size. */ 1685 /* Get the hardware key cache size. */
1359 sc->keymax = ah->caps.keycache_size; 1686 common->keymax = ah->caps.keycache_size;
1360 if (sc->keymax > ATH_KEYMAX) { 1687 if (common->keymax > ATH_KEYMAX) {
1361 DPRINTF(sc, ATH_DBG_ANY, 1688 ath_print(common, ATH_DBG_ANY,
1362 "Warning, using only %u entries in %u key cache\n", 1689 "Warning, using only %u entries in %u key cache\n",
1363 ATH_KEYMAX, sc->keymax); 1690 ATH_KEYMAX, common->keymax);
1364 sc->keymax = ATH_KEYMAX; 1691 common->keymax = ATH_KEYMAX;
1365 } 1692 }
1366 1693
1367 /* 1694 /*
1368 * Reset the key cache since some parts do not 1695 * Reset the key cache since some parts do not
1369 * reset the contents on initial power up. 1696 * reset the contents on initial power up.
1370 */ 1697 */
1371 for (i = 0; i < sc->keymax; i++) 1698 for (i = 0; i < common->keymax; i++)
1372 ath9k_hw_keyreset(ah, (u16) i); 1699 ath9k_hw_keyreset(ah, (u16) i);
1373 1700
1374 /* default to MONITOR mode */ 1701 /* default to MONITOR mode */
@@ -1386,17 +1713,17 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1386 * priority. Note that the hal handles reseting 1713 * priority. Note that the hal handles reseting
1387 * these queues at the needed time. 1714 * these queues at the needed time.
1388 */ 1715 */
1389 sc->beacon.beaconq = ath_beaconq_setup(ah); 1716 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1390 if (sc->beacon.beaconq == -1) { 1717 if (sc->beacon.beaconq == -1) {
1391 DPRINTF(sc, ATH_DBG_FATAL, 1718 ath_print(common, ATH_DBG_FATAL,
1392 "Unable to setup a beacon xmit queue\n"); 1719 "Unable to setup a beacon xmit queue\n");
1393 r = -EIO; 1720 r = -EIO;
1394 goto bad2; 1721 goto bad2;
1395 } 1722 }
1396 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 1723 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1397 if (sc->beacon.cabq == NULL) { 1724 if (sc->beacon.cabq == NULL) {
1398 DPRINTF(sc, ATH_DBG_FATAL, 1725 ath_print(common, ATH_DBG_FATAL,
1399 "Unable to setup CAB xmit queue\n"); 1726 "Unable to setup CAB xmit queue\n");
1400 r = -EIO; 1727 r = -EIO;
1401 goto bad2; 1728 goto bad2;
1402 } 1729 }
@@ -1410,27 +1737,27 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1410 /* Setup data queues */ 1737 /* Setup data queues */
1411 /* NB: ensure BK queue is the lowest priority h/w queue */ 1738 /* NB: ensure BK queue is the lowest priority h/w queue */
1412 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { 1739 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1413 DPRINTF(sc, ATH_DBG_FATAL, 1740 ath_print(common, ATH_DBG_FATAL,
1414 "Unable to setup xmit queue for BK traffic\n"); 1741 "Unable to setup xmit queue for BK traffic\n");
1415 r = -EIO; 1742 r = -EIO;
1416 goto bad2; 1743 goto bad2;
1417 } 1744 }
1418 1745
1419 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { 1746 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1420 DPRINTF(sc, ATH_DBG_FATAL, 1747 ath_print(common, ATH_DBG_FATAL,
1421 "Unable to setup xmit queue for BE traffic\n"); 1748 "Unable to setup xmit queue for BE traffic\n");
1422 r = -EIO; 1749 r = -EIO;
1423 goto bad2; 1750 goto bad2;
1424 } 1751 }
1425 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { 1752 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1426 DPRINTF(sc, ATH_DBG_FATAL, 1753 ath_print(common, ATH_DBG_FATAL,
1427 "Unable to setup xmit queue for VI traffic\n"); 1754 "Unable to setup xmit queue for VI traffic\n");
1428 r = -EIO; 1755 r = -EIO;
1429 goto bad2; 1756 goto bad2;
1430 } 1757 }
1431 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { 1758 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1432 DPRINTF(sc, ATH_DBG_FATAL, 1759 ath_print(common, ATH_DBG_FATAL,
1433 "Unable to setup xmit queue for VO traffic\n"); 1760 "Unable to setup xmit queue for VO traffic\n");
1434 r = -EIO; 1761 r = -EIO;
1435 goto bad2; 1762 goto bad2;
1436 } 1763 }
@@ -1438,8 +1765,8 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1438 /* Initializes the noise floor to a reasonable default value. 1765 /* Initializes the noise floor to a reasonable default value.
1439 * Later on this will be updated during ANI processing. */ 1766 * Later on this will be updated during ANI processing. */
1440 1767
1441 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR; 1768 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1442 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc); 1769 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1443 1770
1444 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, 1771 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1445 ATH9K_CIPHER_TKIP, NULL)) { 1772 ATH9K_CIPHER_TKIP, NULL)) {
@@ -1465,7 +1792,7 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1465 ATH9K_CIPHER_MIC, NULL) 1792 ATH9K_CIPHER_MIC, NULL)
1466 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT, 1793 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1467 0, NULL)) 1794 0, NULL))
1468 sc->splitmic = 1; 1795 common->splitmic = 1;
1469 1796
1470 /* turn on mcast key search if possible */ 1797 /* turn on mcast key search if possible */
1471 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) 1798 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
@@ -1480,14 +1807,14 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1480 sc->sc_flags |= SC_OP_RXAGGR; 1807 sc->sc_flags |= SC_OP_RXAGGR;
1481 } 1808 }
1482 1809
1483 sc->tx_chainmask = ah->caps.tx_chainmask; 1810 common->tx_chainmask = ah->caps.tx_chainmask;
1484 sc->rx_chainmask = ah->caps.rx_chainmask; 1811 common->rx_chainmask = ah->caps.rx_chainmask;
1485 1812
1486 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 1813 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1487 sc->rx.defant = ath9k_hw_getdefantenna(ah); 1814 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1488 1815
1489 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1816 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1490 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN); 1817 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1491 1818
1492 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ 1819 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1493 1820
@@ -1515,10 +1842,24 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1515 ARRAY_SIZE(ath9k_5ghz_chantable); 1842 ARRAY_SIZE(ath9k_5ghz_chantable);
1516 } 1843 }
1517 1844
1518 if (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) { 1845 switch (ah->btcoex_hw.scheme) {
1519 r = ath9k_hw_btcoex_init(ah); 1846 case ATH_BTCOEX_CFG_NONE:
1847 break;
1848 case ATH_BTCOEX_CFG_2WIRE:
1849 ath9k_hw_btcoex_init_2wire(ah);
1850 break;
1851 case ATH_BTCOEX_CFG_3WIRE:
1852 ath9k_hw_btcoex_init_3wire(ah);
1853 r = ath_init_btcoex_timer(sc);
1520 if (r) 1854 if (r)
1521 goto bad2; 1855 goto bad2;
1856 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1857 ath9k_hw_init_btcoex_hw(ah, qnum);
1858 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1859 break;
1860 default:
1861 WARN_ON(1);
1862 break;
1522 } 1863 }
1523 1864
1524 return 0; 1865 return 0;
@@ -1527,12 +1868,9 @@ bad2:
1527 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1868 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1528 if (ATH_TXQ_SETUP(sc, i)) 1869 if (ATH_TXQ_SETUP(sc, i))
1529 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1870 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1530bad:
1531 ath9k_hw_detach(ah);
1532 sc->sc_ah = NULL;
1533bad_no_ah:
1534 ath9k_exit_debug(sc);
1535 1871
1872bad_free_hw:
1873 ath9k_uninit_hw(sc);
1536 return r; 1874 return r;
1537} 1875}
1538 1876
@@ -1555,7 +1893,7 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1555 BIT(NL80211_IFTYPE_ADHOC) | 1893 BIT(NL80211_IFTYPE_ADHOC) |
1556 BIT(NL80211_IFTYPE_MESH_POINT); 1894 BIT(NL80211_IFTYPE_MESH_POINT);
1557 1895
1558 hw->wiphy->ps_default = false; 1896 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1559 1897
1560 hw->queues = 4; 1898 hw->queues = 4;
1561 hw->max_rates = 4; 1899 hw->max_rates = 4;
@@ -1576,34 +1914,40 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1576} 1914}
1577 1915
1578/* Device driver core initialization */ 1916/* Device driver core initialization */
1579int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid) 1917int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1918 const struct ath_bus_ops *bus_ops)
1580{ 1919{
1581 struct ieee80211_hw *hw = sc->hw; 1920 struct ieee80211_hw *hw = sc->hw;
1921 struct ath_common *common;
1922 struct ath_hw *ah;
1582 int error = 0, i; 1923 int error = 0, i;
1583 struct ath_regulatory *reg; 1924 struct ath_regulatory *reg;
1584 1925
1585 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n"); 1926 dev_dbg(sc->dev, "Attach ATH hw\n");
1586 1927
1587 error = ath_init_softc(devid, sc, subsysid); 1928 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1588 if (error != 0) 1929 if (error != 0)
1589 return error; 1930 return error;
1590 1931
1932 ah = sc->sc_ah;
1933 common = ath9k_hw_common(ah);
1934
1591 /* get mac address from hardware and set in mac80211 */ 1935 /* get mac address from hardware and set in mac80211 */
1592 1936
1593 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr); 1937 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1594 1938
1595 ath_set_hw_capab(sc, hw); 1939 ath_set_hw_capab(sc, hw);
1596 1940
1597 error = ath_regd_init(&sc->common.regulatory, sc->hw->wiphy, 1941 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1598 ath9k_reg_notifier); 1942 ath9k_reg_notifier);
1599 if (error) 1943 if (error)
1600 return error; 1944 return error;
1601 1945
1602 reg = &sc->common.regulatory; 1946 reg = &common->regulatory;
1603 1947
1604 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 1948 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1605 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 1949 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1606 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 1950 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1607 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 1951 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1608 } 1952 }
1609 1953
@@ -1641,9 +1985,7 @@ error_attach:
1641 if (ATH_TXQ_SETUP(sc, i)) 1985 if (ATH_TXQ_SETUP(sc, i))
1642 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1986 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1643 1987
1644 ath9k_hw_detach(sc->sc_ah); 1988 ath9k_uninit_hw(sc);
1645 sc->sc_ah = NULL;
1646 ath9k_exit_debug(sc);
1647 1989
1648 return error; 1990 return error;
1649} 1991}
@@ -1651,6 +1993,7 @@ error_attach:
1651int ath_reset(struct ath_softc *sc, bool retry_tx) 1993int ath_reset(struct ath_softc *sc, bool retry_tx)
1652{ 1994{
1653 struct ath_hw *ah = sc->sc_ah; 1995 struct ath_hw *ah = sc->sc_ah;
1996 struct ath_common *common = ath9k_hw_common(ah);
1654 struct ieee80211_hw *hw = sc->hw; 1997 struct ieee80211_hw *hw = sc->hw;
1655 int r; 1998 int r;
1656 1999
@@ -1662,12 +2005,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1662 spin_lock_bh(&sc->sc_resetlock); 2005 spin_lock_bh(&sc->sc_resetlock);
1663 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 2006 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1664 if (r) 2007 if (r)
1665 DPRINTF(sc, ATH_DBG_FATAL, 2008 ath_print(common, ATH_DBG_FATAL,
1666 "Unable to reset hardware; reset status %d\n", r); 2009 "Unable to reset hardware; reset status %d\n", r);
1667 spin_unlock_bh(&sc->sc_resetlock); 2010 spin_unlock_bh(&sc->sc_resetlock);
1668 2011
1669 if (ath_startrecv(sc) != 0) 2012 if (ath_startrecv(sc) != 0)
1670 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n"); 2013 ath_print(common, ATH_DBG_FATAL,
2014 "Unable to start recv logic\n");
1671 2015
1672 /* 2016 /*
1673 * We may be doing a reset in response to a request 2017 * We may be doing a reset in response to a request
@@ -1710,19 +2054,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1710 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2054 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1711#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 2055#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1712#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 2056#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1713 2057 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1714 struct ath_desc *ds; 2058 struct ath_desc *ds;
1715 struct ath_buf *bf; 2059 struct ath_buf *bf;
1716 int i, bsize, error; 2060 int i, bsize, error;
1717 2061
1718 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 2062 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1719 name, nbuf, ndesc); 2063 name, nbuf, ndesc);
1720 2064
1721 INIT_LIST_HEAD(head); 2065 INIT_LIST_HEAD(head);
1722 /* ath_desc must be a multiple of DWORDs */ 2066 /* ath_desc must be a multiple of DWORDs */
1723 if ((sizeof(struct ath_desc) % 4) != 0) { 2067 if ((sizeof(struct ath_desc) % 4) != 0) {
1724 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n"); 2068 ath_print(common, ATH_DBG_FATAL,
1725 ASSERT((sizeof(struct ath_desc) % 4) == 0); 2069 "ath_desc not DWORD aligned\n");
2070 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
1726 error = -ENOMEM; 2071 error = -ENOMEM;
1727 goto fail; 2072 goto fail;
1728 } 2073 }
@@ -1755,9 +2100,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1755 goto fail; 2100 goto fail;
1756 } 2101 }
1757 ds = dd->dd_desc; 2102 ds = dd->dd_desc;
1758 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 2103 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1759 name, ds, (u32) dd->dd_desc_len, 2104 name, ds, (u32) dd->dd_desc_len,
1760 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 2105 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1761 2106
1762 /* allocate buffers */ 2107 /* allocate buffers */
1763 bsize = sizeof(struct ath_buf) * nbuf; 2108 bsize = sizeof(struct ath_buf) * nbuf;
@@ -1780,7 +2125,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1780 * descriptor fetch. 2125 * descriptor fetch.
1781 */ 2126 */
1782 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 2127 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1783 ASSERT((caddr_t) bf->bf_desc < 2128 BUG_ON((caddr_t) bf->bf_desc >=
1784 ((caddr_t) dd->dd_desc + 2129 ((caddr_t) dd->dd_desc +
1785 dd->dd_desc_len)); 2130 dd->dd_desc_len));
1786 2131
@@ -1884,31 +2229,50 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1884 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 2229 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1885 } 2230 }
1886 2231
1887 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 2232 if (conf_is_ht(conf))
1888
1889 if (conf_is_ht(conf)) {
1890 if (conf_is_ht40(conf))
1891 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1892
1893 ichan->chanmode = ath_get_extchanmode(sc, chan, 2233 ichan->chanmode = ath_get_extchanmode(sc, chan,
1894 conf->channel_type); 2234 conf->channel_type);
1895 }
1896} 2235}
1897 2236
1898/**********************/ 2237/**********************/
1899/* mac80211 callbacks */ 2238/* mac80211 callbacks */
1900/**********************/ 2239/**********************/
1901 2240
2241/*
2242 * (Re)start btcoex timers
2243 */
2244static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
2245{
2246 struct ath_btcoex *btcoex = &sc->btcoex;
2247 struct ath_hw *ah = sc->sc_ah;
2248
2249 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
2250 "Starting btcoex timers");
2251
2252 /* make sure duty cycle timer is also stopped when resuming */
2253 if (btcoex->hw_timer_enabled)
2254 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
2255
2256 btcoex->bt_priority_cnt = 0;
2257 btcoex->bt_priority_time = jiffies;
2258 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
2259
2260 mod_timer(&btcoex->period_timer, jiffies);
2261}
2262
1902static int ath9k_start(struct ieee80211_hw *hw) 2263static int ath9k_start(struct ieee80211_hw *hw)
1903{ 2264{
1904 struct ath_wiphy *aphy = hw->priv; 2265 struct ath_wiphy *aphy = hw->priv;
1905 struct ath_softc *sc = aphy->sc; 2266 struct ath_softc *sc = aphy->sc;
2267 struct ath_hw *ah = sc->sc_ah;
2268 struct ath_common *common = ath9k_hw_common(ah);
1906 struct ieee80211_channel *curchan = hw->conf.channel; 2269 struct ieee80211_channel *curchan = hw->conf.channel;
1907 struct ath9k_channel *init_channel; 2270 struct ath9k_channel *init_channel;
1908 int r; 2271 int r;
1909 2272
1910 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with " 2273 ath_print(common, ATH_DBG_CONFIG,
1911 "initial channel: %d MHz\n", curchan->center_freq); 2274 "Starting driver with initial channel: %d MHz\n",
2275 curchan->center_freq);
1912 2276
1913 mutex_lock(&sc->mutex); 2277 mutex_lock(&sc->mutex);
1914 2278
@@ -1940,7 +2304,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1940 init_channel = ath_get_curchannel(sc, hw); 2304 init_channel = ath_get_curchannel(sc, hw);
1941 2305
1942 /* Reset SERDES registers */ 2306 /* Reset SERDES registers */
1943 ath9k_hw_configpcipowersave(sc->sc_ah, 0, 0); 2307 ath9k_hw_configpcipowersave(ah, 0, 0);
1944 2308
1945 /* 2309 /*
1946 * The basic interface to setting the hardware in a good 2310 * The basic interface to setting the hardware in a good
@@ -1950,12 +2314,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
1950 * and then setup of the interrupt mask. 2314 * and then setup of the interrupt mask.
1951 */ 2315 */
1952 spin_lock_bh(&sc->sc_resetlock); 2316 spin_lock_bh(&sc->sc_resetlock);
1953 r = ath9k_hw_reset(sc->sc_ah, init_channel, false); 2317 r = ath9k_hw_reset(ah, init_channel, false);
1954 if (r) { 2318 if (r) {
1955 DPRINTF(sc, ATH_DBG_FATAL, 2319 ath_print(common, ATH_DBG_FATAL,
1956 "Unable to reset hardware; reset status %d " 2320 "Unable to reset hardware; reset status %d "
1957 "(freq %u MHz)\n", r, 2321 "(freq %u MHz)\n", r,
1958 curchan->center_freq); 2322 curchan->center_freq);
1959 spin_unlock_bh(&sc->sc_resetlock); 2323 spin_unlock_bh(&sc->sc_resetlock);
1960 goto mutex_unlock; 2324 goto mutex_unlock;
1961 } 2325 }
@@ -1975,7 +2339,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1975 * here except setup the interrupt mask. 2339 * here except setup the interrupt mask.
1976 */ 2340 */
1977 if (ath_startrecv(sc) != 0) { 2341 if (ath_startrecv(sc) != 0) {
1978 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n"); 2342 ath_print(common, ATH_DBG_FATAL,
2343 "Unable to start recv logic\n");
1979 r = -EIO; 2344 r = -EIO;
1980 goto mutex_unlock; 2345 goto mutex_unlock;
1981 } 2346 }
@@ -1985,10 +2350,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
1985 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 2350 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1986 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 2351 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1987 2352
1988 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 2353 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1989 sc->imask |= ATH9K_INT_GTT; 2354 sc->imask |= ATH9K_INT_GTT;
1990 2355
1991 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2356 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1992 sc->imask |= ATH9K_INT_CST; 2357 sc->imask |= ATH9K_INT_CST;
1993 2358
1994 ath_cache_conf_rate(sc, &hw->conf); 2359 ath_cache_conf_rate(sc, &hw->conf);
@@ -1997,21 +2362,22 @@ static int ath9k_start(struct ieee80211_hw *hw)
1997 2362
1998 /* Disable BMISS interrupt when we're not associated */ 2363 /* Disable BMISS interrupt when we're not associated */
1999 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 2364 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2000 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 2365 ath9k_hw_set_interrupts(ah, sc->imask);
2001 2366
2002 ieee80211_wake_queues(hw); 2367 ieee80211_wake_queues(hw);
2003 2368
2004 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 2369 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
2005 2370
2006 if ((sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) && 2371 if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
2007 !(sc->sc_flags & SC_OP_BTCOEX_ENABLED)) { 2372 !ah->btcoex_hw.enabled) {
2008 ath_btcoex_set_weight(&sc->btcoex_info, AR_BT_COEX_WGHT, 2373 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
2009 AR_STOMP_LOW_WLAN_WGHT); 2374 AR_STOMP_LOW_WLAN_WGHT);
2010 ath9k_hw_btcoex_enable(sc->sc_ah); 2375 ath9k_hw_btcoex_enable(ah);
2011 2376
2012 ath_pcie_aspm_disable(sc); 2377 if (common->bus_ops->bt_coex_prep)
2013 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 2378 common->bus_ops->bt_coex_prep(common);
2014 ath_btcoex_timer_resume(sc, &sc->btcoex_info); 2379 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
2380 ath9k_btcoex_timer_resume(sc);
2015 } 2381 }
2016 2382
2017mutex_unlock: 2383mutex_unlock:
@@ -2026,12 +2392,14 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2026 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2392 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2027 struct ath_wiphy *aphy = hw->priv; 2393 struct ath_wiphy *aphy = hw->priv;
2028 struct ath_softc *sc = aphy->sc; 2394 struct ath_softc *sc = aphy->sc;
2395 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2029 struct ath_tx_control txctl; 2396 struct ath_tx_control txctl;
2030 int hdrlen, padsize; 2397 int hdrlen, padsize;
2031 2398
2032 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { 2399 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
2033 printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state " 2400 ath_print(common, ATH_DBG_XMIT,
2034 "%d\n", wiphy_name(hw->wiphy), aphy->state); 2401 "ath9k: %s: TX in unexpected wiphy state "
2402 "%d\n", wiphy_name(hw->wiphy), aphy->state);
2035 goto exit; 2403 goto exit;
2036 } 2404 }
2037 2405
@@ -2044,8 +2412,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2044 if (ieee80211_is_data(hdr->frame_control) && 2412 if (ieee80211_is_data(hdr->frame_control) &&
2045 !ieee80211_is_nullfunc(hdr->frame_control) && 2413 !ieee80211_is_nullfunc(hdr->frame_control) &&
2046 !ieee80211_has_pm(hdr->frame_control)) { 2414 !ieee80211_has_pm(hdr->frame_control)) {
2047 DPRINTF(sc, ATH_DBG_PS, "Add PM=1 for a TX frame " 2415 ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame "
2048 "while in PS mode\n"); 2416 "while in PS mode\n");
2049 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 2417 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
2050 } 2418 }
2051 } 2419 }
@@ -2060,11 +2428,12 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2060 ath9k_ps_wakeup(sc); 2428 ath9k_ps_wakeup(sc);
2061 ath9k_hw_setrxabort(sc->sc_ah, 0); 2429 ath9k_hw_setrxabort(sc->sc_ah, 0);
2062 if (ieee80211_is_pspoll(hdr->frame_control)) { 2430 if (ieee80211_is_pspoll(hdr->frame_control)) {
2063 DPRINTF(sc, ATH_DBG_PS, "Sending PS-Poll to pick a " 2431 ath_print(common, ATH_DBG_PS,
2064 "buffered frame\n"); 2432 "Sending PS-Poll to pick a buffered frame\n");
2065 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA; 2433 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
2066 } else { 2434 } else {
2067 DPRINTF(sc, ATH_DBG_PS, "Wake up to complete TX\n"); 2435 ath_print(common, ATH_DBG_PS,
2436 "Wake up to complete TX\n");
2068 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK; 2437 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
2069 } 2438 }
2070 /* 2439 /*
@@ -2106,10 +2475,10 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2106 if (!txctl.txq) 2475 if (!txctl.txq)
2107 goto exit; 2476 goto exit;
2108 2477
2109 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); 2478 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2110 2479
2111 if (ath_tx_start(hw, skb, &txctl) != 0) { 2480 if (ath_tx_start(hw, skb, &txctl) != 0) {
2112 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n"); 2481 ath_print(common, ATH_DBG_XMIT, "TX failed\n");
2113 goto exit; 2482 goto exit;
2114 } 2483 }
2115 2484
@@ -2119,10 +2488,28 @@ exit:
2119 return 0; 2488 return 0;
2120} 2489}
2121 2490
2491/*
2492 * Pause btcoex timer and bt duty cycle timer
2493 */
2494static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
2495{
2496 struct ath_btcoex *btcoex = &sc->btcoex;
2497 struct ath_hw *ah = sc->sc_ah;
2498
2499 del_timer_sync(&btcoex->period_timer);
2500
2501 if (btcoex->hw_timer_enabled)
2502 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
2503
2504 btcoex->hw_timer_enabled = false;
2505}
2506
2122static void ath9k_stop(struct ieee80211_hw *hw) 2507static void ath9k_stop(struct ieee80211_hw *hw)
2123{ 2508{
2124 struct ath_wiphy *aphy = hw->priv; 2509 struct ath_wiphy *aphy = hw->priv;
2125 struct ath_softc *sc = aphy->sc; 2510 struct ath_softc *sc = aphy->sc;
2511 struct ath_hw *ah = sc->sc_ah;
2512 struct ath_common *common = ath9k_hw_common(ah);
2126 2513
2127 mutex_lock(&sc->mutex); 2514 mutex_lock(&sc->mutex);
2128 2515
@@ -2137,7 +2524,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2137 } 2524 }
2138 2525
2139 if (sc->sc_flags & SC_OP_INVALID) { 2526 if (sc->sc_flags & SC_OP_INVALID) {
2140 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n"); 2527 ath_print(common, ATH_DBG_ANY, "Device not present\n");
2141 mutex_unlock(&sc->mutex); 2528 mutex_unlock(&sc->mutex);
2142 return; 2529 return;
2143 } 2530 }
@@ -2147,33 +2534,33 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2147 return; /* another wiphy still in use */ 2534 return; /* another wiphy still in use */
2148 } 2535 }
2149 2536
2150 if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) { 2537 if (ah->btcoex_hw.enabled) {
2151 ath9k_hw_btcoex_disable(sc->sc_ah); 2538 ath9k_hw_btcoex_disable(ah);
2152 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 2539 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
2153 ath_btcoex_timer_pause(sc, &sc->btcoex_info); 2540 ath9k_btcoex_timer_pause(sc);
2154 } 2541 }
2155 2542
2156 /* make sure h/w will not generate any interrupt 2543 /* make sure h/w will not generate any interrupt
2157 * before setting the invalid flag. */ 2544 * before setting the invalid flag. */
2158 ath9k_hw_set_interrupts(sc->sc_ah, 0); 2545 ath9k_hw_set_interrupts(ah, 0);
2159 2546
2160 if (!(sc->sc_flags & SC_OP_INVALID)) { 2547 if (!(sc->sc_flags & SC_OP_INVALID)) {
2161 ath_drain_all_txq(sc, false); 2548 ath_drain_all_txq(sc, false);
2162 ath_stoprecv(sc); 2549 ath_stoprecv(sc);
2163 ath9k_hw_phy_disable(sc->sc_ah); 2550 ath9k_hw_phy_disable(ah);
2164 } else 2551 } else
2165 sc->rx.rxlink = NULL; 2552 sc->rx.rxlink = NULL;
2166 2553
2167 /* disable HAL and put h/w to sleep */ 2554 /* disable HAL and put h/w to sleep */
2168 ath9k_hw_disable(sc->sc_ah); 2555 ath9k_hw_disable(ah);
2169 ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1); 2556 ath9k_hw_configpcipowersave(ah, 1, 1);
2170 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 2557 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
2171 2558
2172 sc->sc_flags |= SC_OP_INVALID; 2559 sc->sc_flags |= SC_OP_INVALID;
2173 2560
2174 mutex_unlock(&sc->mutex); 2561 mutex_unlock(&sc->mutex);
2175 2562
2176 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n"); 2563 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
2177} 2564}
2178 2565
2179static int ath9k_add_interface(struct ieee80211_hw *hw, 2566static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -2181,6 +2568,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2181{ 2568{
2182 struct ath_wiphy *aphy = hw->priv; 2569 struct ath_wiphy *aphy = hw->priv;
2183 struct ath_softc *sc = aphy->sc; 2570 struct ath_softc *sc = aphy->sc;
2571 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2184 struct ath_vif *avp = (void *)conf->vif->drv_priv; 2572 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2185 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 2573 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2186 int ret = 0; 2574 int ret = 0;
@@ -2207,13 +2595,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2207 ic_opmode = conf->type; 2595 ic_opmode = conf->type;
2208 break; 2596 break;
2209 default: 2597 default:
2210 DPRINTF(sc, ATH_DBG_FATAL, 2598 ath_print(common, ATH_DBG_FATAL,
2211 "Interface type %d not yet supported\n", conf->type); 2599 "Interface type %d not yet supported\n", conf->type);
2212 ret = -EOPNOTSUPP; 2600 ret = -EOPNOTSUPP;
2213 goto out; 2601 goto out;
2214 } 2602 }
2215 2603
2216 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode); 2604 ath_print(common, ATH_DBG_CONFIG,
2605 "Attach a VIF of type: %d\n", ic_opmode);
2217 2606
2218 /* Set the VIF opmode */ 2607 /* Set the VIF opmode */
2219 avp->av_opmode = ic_opmode; 2608 avp->av_opmode = ic_opmode;
@@ -2251,7 +2640,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2251 if (conf->type == NL80211_IFTYPE_AP || 2640 if (conf->type == NL80211_IFTYPE_AP ||
2252 conf->type == NL80211_IFTYPE_ADHOC || 2641 conf->type == NL80211_IFTYPE_ADHOC ||
2253 conf->type == NL80211_IFTYPE_MONITOR) 2642 conf->type == NL80211_IFTYPE_MONITOR)
2254 ath_start_ani(sc); 2643 ath_start_ani(common);
2255 2644
2256out: 2645out:
2257 mutex_unlock(&sc->mutex); 2646 mutex_unlock(&sc->mutex);
@@ -2263,15 +2652,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2263{ 2652{
2264 struct ath_wiphy *aphy = hw->priv; 2653 struct ath_wiphy *aphy = hw->priv;
2265 struct ath_softc *sc = aphy->sc; 2654 struct ath_softc *sc = aphy->sc;
2655 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2266 struct ath_vif *avp = (void *)conf->vif->drv_priv; 2656 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2267 int i; 2657 int i;
2268 2658
2269 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n"); 2659 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
2270 2660
2271 mutex_lock(&sc->mutex); 2661 mutex_lock(&sc->mutex);
2272 2662
2273 /* Stop ANI */ 2663 /* Stop ANI */
2274 del_timer_sync(&sc->ani.timer); 2664 del_timer_sync(&common->ani.timer);
2275 2665
2276 /* Reclaim beacon resources */ 2666 /* Reclaim beacon resources */
2277 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 2667 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
@@ -2301,27 +2691,43 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2301{ 2691{
2302 struct ath_wiphy *aphy = hw->priv; 2692 struct ath_wiphy *aphy = hw->priv;
2303 struct ath_softc *sc = aphy->sc; 2693 struct ath_softc *sc = aphy->sc;
2694 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2304 struct ieee80211_conf *conf = &hw->conf; 2695 struct ieee80211_conf *conf = &hw->conf;
2305 struct ath_hw *ah = sc->sc_ah; 2696 struct ath_hw *ah = sc->sc_ah;
2306 bool all_wiphys_idle = false, disable_radio = false; 2697 bool disable_radio;
2307 2698
2308 mutex_lock(&sc->mutex); 2699 mutex_lock(&sc->mutex);
2309 2700
2310 /* Leave this as the first check */ 2701 /*
2702 * Leave this as the first check because we need to turn on the
2703 * radio if it was disabled before prior to processing the rest
2704 * of the changes. Likewise we must only disable the radio towards
2705 * the end.
2706 */
2311 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 2707 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
2708 bool enable_radio;
2709 bool all_wiphys_idle;
2710 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
2312 2711
2313 spin_lock_bh(&sc->wiphy_lock); 2712 spin_lock_bh(&sc->wiphy_lock);
2314 all_wiphys_idle = ath9k_all_wiphys_idle(sc); 2713 all_wiphys_idle = ath9k_all_wiphys_idle(sc);
2714 ath9k_set_wiphy_idle(aphy, idle);
2715
2716 if (!idle && all_wiphys_idle)
2717 enable_radio = true;
2718
2719 /*
2720 * After we unlock here its possible another wiphy
2721 * can be re-renabled so to account for that we will
2722 * only disable the radio toward the end of this routine
2723 * if by then all wiphys are still idle.
2724 */
2315 spin_unlock_bh(&sc->wiphy_lock); 2725 spin_unlock_bh(&sc->wiphy_lock);
2316 2726
2317 if (conf->flags & IEEE80211_CONF_IDLE){ 2727 if (enable_radio) {
2318 if (all_wiphys_idle) 2728 ath_radio_enable(sc, hw);
2319 disable_radio = true; 2729 ath_print(common, ATH_DBG_CONFIG,
2320 } 2730 "not-idle: enabling radio\n");
2321 else if (all_wiphys_idle) {
2322 ath_radio_enable(sc);
2323 DPRINTF(sc, ATH_DBG_CONFIG,
2324 "not-idle: enabling radio\n");
2325 } 2731 }
2326 } 2732 }
2327 2733
@@ -2339,7 +2745,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2339 sc->ps_enabled = true; 2745 sc->ps_enabled = true;
2340 } else { 2746 } else {
2341 sc->ps_enabled = false; 2747 sc->ps_enabled = false;
2342 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 2748 ath9k_setpower(sc, ATH9K_PM_AWAKE);
2343 if (!(ah->caps.hw_caps & 2749 if (!(ah->caps.hw_caps &
2344 ATH9K_HW_CAP_AUTOSLEEP)) { 2750 ATH9K_HW_CAP_AUTOSLEEP)) {
2345 ath9k_hw_setrxabort(sc->sc_ah, 0); 2751 ath9k_hw_setrxabort(sc->sc_ah, 0);
@@ -2374,8 +2780,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2374 goto skip_chan_change; 2780 goto skip_chan_change;
2375 } 2781 }
2376 2782
2377 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 2783 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2378 curchan->center_freq); 2784 curchan->center_freq);
2379 2785
2380 /* XXX: remove me eventualy */ 2786 /* XXX: remove me eventualy */
2381 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); 2787 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
@@ -2383,7 +2789,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2383 ath_update_chainmask(sc, conf_is_ht(conf)); 2789 ath_update_chainmask(sc, conf_is_ht(conf));
2384 2790
2385 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 2791 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
2386 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n"); 2792 ath_print(common, ATH_DBG_FATAL,
2793 "Unable to set channel\n");
2387 mutex_unlock(&sc->mutex); 2794 mutex_unlock(&sc->mutex);
2388 return -EINVAL; 2795 return -EINVAL;
2389 } 2796 }
@@ -2393,9 +2800,13 @@ skip_chan_change:
2393 if (changed & IEEE80211_CONF_CHANGE_POWER) 2800 if (changed & IEEE80211_CONF_CHANGE_POWER)
2394 sc->config.txpowlimit = 2 * conf->power_level; 2801 sc->config.txpowlimit = 2 * conf->power_level;
2395 2802
2803 spin_lock_bh(&sc->wiphy_lock);
2804 disable_radio = ath9k_all_wiphys_idle(sc);
2805 spin_unlock_bh(&sc->wiphy_lock);
2806
2396 if (disable_radio) { 2807 if (disable_radio) {
2397 DPRINTF(sc, ATH_DBG_CONFIG, "idle: disabling radio\n"); 2808 ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
2398 ath_radio_disable(sc); 2809 ath_radio_disable(sc, hw);
2399 } 2810 }
2400 2811
2401 mutex_unlock(&sc->mutex); 2812 mutex_unlock(&sc->mutex);
@@ -2431,7 +2842,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
2431 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 2842 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2432 ath9k_ps_restore(sc); 2843 ath9k_ps_restore(sc);
2433 2844
2434 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", rfilt); 2845 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
2846 "Set HW RX filter: 0x%x\n", rfilt);
2435} 2847}
2436 2848
2437static void ath9k_sta_notify(struct ieee80211_hw *hw, 2849static void ath9k_sta_notify(struct ieee80211_hw *hw,
@@ -2459,6 +2871,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2459{ 2871{
2460 struct ath_wiphy *aphy = hw->priv; 2872 struct ath_wiphy *aphy = hw->priv;
2461 struct ath_softc *sc = aphy->sc; 2873 struct ath_softc *sc = aphy->sc;
2874 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2462 struct ath9k_tx_queue_info qi; 2875 struct ath9k_tx_queue_info qi;
2463 int ret = 0, qnum; 2876 int ret = 0, qnum;
2464 2877
@@ -2475,15 +2888,15 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2475 qi.tqi_burstTime = params->txop; 2888 qi.tqi_burstTime = params->txop;
2476 qnum = ath_get_hal_qnum(queue, sc); 2889 qnum = ath_get_hal_qnum(queue, sc);
2477 2890
2478 DPRINTF(sc, ATH_DBG_CONFIG, 2891 ath_print(common, ATH_DBG_CONFIG,
2479 "Configure tx [queue/halq] [%d/%d], " 2892 "Configure tx [queue/halq] [%d/%d], "
2480 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 2893 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2481 queue, qnum, params->aifs, params->cw_min, 2894 queue, qnum, params->aifs, params->cw_min,
2482 params->cw_max, params->txop); 2895 params->cw_max, params->txop);
2483 2896
2484 ret = ath_txq_update(sc, qnum, &qi); 2897 ret = ath_txq_update(sc, qnum, &qi);
2485 if (ret) 2898 if (ret)
2486 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n"); 2899 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
2487 2900
2488 mutex_unlock(&sc->mutex); 2901 mutex_unlock(&sc->mutex);
2489 2902
@@ -2498,6 +2911,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
2498{ 2911{
2499 struct ath_wiphy *aphy = hw->priv; 2912 struct ath_wiphy *aphy = hw->priv;
2500 struct ath_softc *sc = aphy->sc; 2913 struct ath_softc *sc = aphy->sc;
2914 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2501 int ret = 0; 2915 int ret = 0;
2502 2916
2503 if (modparam_nohwcrypt) 2917 if (modparam_nohwcrypt)
@@ -2505,11 +2919,11 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
2505 2919
2506 mutex_lock(&sc->mutex); 2920 mutex_lock(&sc->mutex);
2507 ath9k_ps_wakeup(sc); 2921 ath9k_ps_wakeup(sc);
2508 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n"); 2922 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
2509 2923
2510 switch (cmd) { 2924 switch (cmd) {
2511 case SET_KEY: 2925 case SET_KEY:
2512 ret = ath_key_config(sc, vif, sta, key); 2926 ret = ath_key_config(common, vif, sta, key);
2513 if (ret >= 0) { 2927 if (ret >= 0) {
2514 key->hw_key_idx = ret; 2928 key->hw_key_idx = ret;
2515 /* push IV and Michael MIC generation to stack */ 2929 /* push IV and Michael MIC generation to stack */
@@ -2522,7 +2936,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
2522 } 2936 }
2523 break; 2937 break;
2524 case DISABLE_KEY: 2938 case DISABLE_KEY:
2525 ath_key_delete(sc, key); 2939 ath_key_delete(common, key);
2526 break; 2940 break;
2527 default: 2941 default:
2528 ret = -EINVAL; 2942 ret = -EINVAL;
@@ -2542,94 +2956,67 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2542 struct ath_wiphy *aphy = hw->priv; 2956 struct ath_wiphy *aphy = hw->priv;
2543 struct ath_softc *sc = aphy->sc; 2957 struct ath_softc *sc = aphy->sc;
2544 struct ath_hw *ah = sc->sc_ah; 2958 struct ath_hw *ah = sc->sc_ah;
2959 struct ath_common *common = ath9k_hw_common(ah);
2545 struct ath_vif *avp = (void *)vif->drv_priv; 2960 struct ath_vif *avp = (void *)vif->drv_priv;
2546 u32 rfilt = 0; 2961 int error;
2547 int error, i;
2548 2962
2549 mutex_lock(&sc->mutex); 2963 mutex_lock(&sc->mutex);
2550 2964
2551 /* 2965 if (changed & BSS_CHANGED_BSSID) {
2552 * TODO: Need to decide which hw opmode to use for 2966 /* Set BSSID */
2553 * multi-interface cases 2967 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
2554 * XXX: This belongs into add_interface! 2968 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
2555 */ 2969 common->curaid = 0;
2556 if (vif->type == NL80211_IFTYPE_AP && 2970 ath9k_hw_write_associd(ah);
2557 ah->opmode != NL80211_IFTYPE_AP) {
2558 ah->opmode = NL80211_IFTYPE_STATION;
2559 ath9k_hw_setopmode(ah);
2560 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2561 sc->curaid = 0;
2562 ath9k_hw_write_associd(sc);
2563 /* Request full reset to get hw opmode changed properly */
2564 sc->sc_flags |= SC_OP_FULL_RESET;
2565 }
2566
2567 if ((changed & BSS_CHANGED_BSSID) &&
2568 !is_zero_ether_addr(bss_conf->bssid)) {
2569 switch (vif->type) {
2570 case NL80211_IFTYPE_STATION:
2571 case NL80211_IFTYPE_ADHOC:
2572 case NL80211_IFTYPE_MESH_POINT:
2573 /* Set BSSID */
2574 memcpy(sc->curbssid, bss_conf->bssid, ETH_ALEN);
2575 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
2576 sc->curaid = 0;
2577 ath9k_hw_write_associd(sc);
2578
2579 /* Set aggregation protection mode parameters */
2580 sc->config.ath_aggr_prot = 0;
2581
2582 DPRINTF(sc, ATH_DBG_CONFIG,
2583 "RX filter 0x%x bssid %pM aid 0x%x\n",
2584 rfilt, sc->curbssid, sc->curaid);
2585
2586 /* need to reconfigure the beacon */
2587 sc->sc_flags &= ~SC_OP_BEACONS ;
2588 2971
2589 break; 2972 /* Set aggregation protection mode parameters */
2590 default: 2973 sc->config.ath_aggr_prot = 0;
2591 break; 2974
2592 } 2975 /* Only legacy IBSS for now */
2976 if (vif->type == NL80211_IFTYPE_ADHOC)
2977 ath_update_chainmask(sc, 0);
2978
2979 ath_print(common, ATH_DBG_CONFIG,
2980 "BSSID: %pM aid: 0x%x\n",
2981 common->curbssid, common->curaid);
2982
2983 /* need to reconfigure the beacon */
2984 sc->sc_flags &= ~SC_OP_BEACONS ;
2593 } 2985 }
2594 2986
2595 if ((vif->type == NL80211_IFTYPE_ADHOC) || 2987 /* Enable transmission of beacons (AP, IBSS, MESH) */
2596 (vif->type == NL80211_IFTYPE_AP) || 2988 if ((changed & BSS_CHANGED_BEACON) ||
2597 (vif->type == NL80211_IFTYPE_MESH_POINT)) { 2989 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) {
2598 if ((changed & BSS_CHANGED_BEACON) || 2990 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2599 (changed & BSS_CHANGED_BEACON_ENABLED && 2991 error = ath_beacon_alloc(aphy, vif);
2600 bss_conf->enable_beacon)) { 2992 if (!error)
2601 /* 2993 ath_beacon_config(sc, vif);
2602 * Allocate and setup the beacon frame. 2994 }
2603 *
2604 * Stop any previous beacon DMA. This may be
2605 * necessary, for example, when an ibss merge
2606 * causes reconfiguration; we may be called
2607 * with beacon transmission active.
2608 */
2609 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2610 2995
2996 /* Disable transmission of beacons */
2997 if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
2998 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2999
3000 if (changed & BSS_CHANGED_BEACON_INT) {
3001 sc->beacon_interval = bss_conf->beacon_int;
3002 /*
3003 * In case of AP mode, the HW TSF has to be reset
3004 * when the beacon interval changes.
3005 */
3006 if (vif->type == NL80211_IFTYPE_AP) {
3007 sc->sc_flags |= SC_OP_TSF_RESET;
3008 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2611 error = ath_beacon_alloc(aphy, vif); 3009 error = ath_beacon_alloc(aphy, vif);
2612 if (!error) 3010 if (!error)
2613 ath_beacon_config(sc, vif); 3011 ath_beacon_config(sc, vif);
3012 } else {
3013 ath_beacon_config(sc, vif);
2614 } 3014 }
2615 } 3015 }
2616 3016
2617 /* Check for WLAN_CAPABILITY_PRIVACY ? */
2618 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
2619 for (i = 0; i < IEEE80211_WEP_NKID; i++)
2620 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2621 ath9k_hw_keysetmac(sc->sc_ah,
2622 (u16)i,
2623 sc->curbssid);
2624 }
2625
2626 /* Only legacy IBSS for now */
2627 if (vif->type == NL80211_IFTYPE_ADHOC)
2628 ath_update_chainmask(sc, 0);
2629
2630 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 3017 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2631 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 3018 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2632 bss_conf->use_short_preamble); 3019 bss_conf->use_short_preamble);
2633 if (bss_conf->use_short_preamble) 3020 if (bss_conf->use_short_preamble)
2634 sc->sc_flags |= SC_OP_PREAMBLE_SHORT; 3021 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2635 else 3022 else
@@ -2637,8 +3024,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2637 } 3024 }
2638 3025
2639 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 3026 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2640 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 3027 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2641 bss_conf->use_cts_prot); 3028 bss_conf->use_cts_prot);
2642 if (bss_conf->use_cts_prot && 3029 if (bss_conf->use_cts_prot &&
2643 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 3030 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2644 sc->sc_flags |= SC_OP_PROTECT_ENABLE; 3031 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
@@ -2647,23 +3034,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2647 } 3034 }
2648 3035
2649 if (changed & BSS_CHANGED_ASSOC) { 3036 if (changed & BSS_CHANGED_ASSOC) {
2650 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 3037 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2651 bss_conf->assoc); 3038 bss_conf->assoc);
2652 ath9k_bss_assoc_info(sc, vif, bss_conf); 3039 ath9k_bss_assoc_info(sc, vif, bss_conf);
2653 } 3040 }
2654 3041
2655 /*
2656 * The HW TSF has to be reset when the beacon interval changes.
2657 * We set the flag here, and ath_beacon_config_ap() would take this
2658 * into account when it gets called through the subsequent
2659 * config_interface() call - with IFCC_BEACON in the changed field.
2660 */
2661
2662 if (changed & BSS_CHANGED_BEACON_INT) {
2663 sc->sc_flags |= SC_OP_TSF_RESET;
2664 sc->beacon_interval = bss_conf->beacon_int;
2665 }
2666
2667 mutex_unlock(&sc->mutex); 3042 mutex_unlock(&sc->mutex);
2668} 3043}
2669 3044
@@ -2696,11 +3071,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2696 struct ath_softc *sc = aphy->sc; 3071 struct ath_softc *sc = aphy->sc;
2697 3072
2698 mutex_lock(&sc->mutex); 3073 mutex_lock(&sc->mutex);
3074
3075 ath9k_ps_wakeup(sc);
2699 ath9k_hw_reset_tsf(sc->sc_ah); 3076 ath9k_hw_reset_tsf(sc->sc_ah);
3077 ath9k_ps_restore(sc);
3078
2700 mutex_unlock(&sc->mutex); 3079 mutex_unlock(&sc->mutex);
2701} 3080}
2702 3081
2703static int ath9k_ampdu_action(struct ieee80211_hw *hw, 3082static int ath9k_ampdu_action(struct ieee80211_hw *hw,
3083 struct ieee80211_vif *vif,
2704 enum ieee80211_ampdu_mlme_action action, 3084 enum ieee80211_ampdu_mlme_action action,
2705 struct ieee80211_sta *sta, 3085 struct ieee80211_sta *sta,
2706 u16 tid, u16 *ssn) 3086 u16 tid, u16 *ssn)
@@ -2718,17 +3098,18 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2718 break; 3098 break;
2719 case IEEE80211_AMPDU_TX_START: 3099 case IEEE80211_AMPDU_TX_START:
2720 ath_tx_aggr_start(sc, sta, tid, ssn); 3100 ath_tx_aggr_start(sc, sta, tid, ssn);
2721 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid); 3101 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2722 break; 3102 break;
2723 case IEEE80211_AMPDU_TX_STOP: 3103 case IEEE80211_AMPDU_TX_STOP:
2724 ath_tx_aggr_stop(sc, sta, tid); 3104 ath_tx_aggr_stop(sc, sta, tid);
2725 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid); 3105 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2726 break; 3106 break;
2727 case IEEE80211_AMPDU_TX_OPERATIONAL: 3107 case IEEE80211_AMPDU_TX_OPERATIONAL:
2728 ath_tx_aggr_resume(sc, sta, tid); 3108 ath_tx_aggr_resume(sc, sta, tid);
2729 break; 3109 break;
2730 default: 3110 default:
2731 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n"); 3111 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
3112 "Unknown AMPDU action\n");
2732 } 3113 }
2733 3114
2734 return ret; 3115 return ret;
@@ -2796,64 +3177,6 @@ struct ieee80211_ops ath9k_ops = {
2796 .rfkill_poll = ath9k_rfkill_poll_state, 3177 .rfkill_poll = ath9k_rfkill_poll_state,
2797}; 3178};
2798 3179
2799static struct {
2800 u32 version;
2801 const char * name;
2802} ath_mac_bb_names[] = {
2803 { AR_SREV_VERSION_5416_PCI, "5416" },
2804 { AR_SREV_VERSION_5416_PCIE, "5418" },
2805 { AR_SREV_VERSION_9100, "9100" },
2806 { AR_SREV_VERSION_9160, "9160" },
2807 { AR_SREV_VERSION_9280, "9280" },
2808 { AR_SREV_VERSION_9285, "9285" },
2809 { AR_SREV_VERSION_9287, "9287" }
2810};
2811
2812static struct {
2813 u16 version;
2814 const char * name;
2815} ath_rf_names[] = {
2816 { 0, "5133" },
2817 { AR_RAD5133_SREV_MAJOR, "5133" },
2818 { AR_RAD5122_SREV_MAJOR, "5122" },
2819 { AR_RAD2133_SREV_MAJOR, "2133" },
2820 { AR_RAD2122_SREV_MAJOR, "2122" }
2821};
2822
2823/*
2824 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2825 */
2826const char *
2827ath_mac_bb_name(u32 mac_bb_version)
2828{
2829 int i;
2830
2831 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2832 if (ath_mac_bb_names[i].version == mac_bb_version) {
2833 return ath_mac_bb_names[i].name;
2834 }
2835 }
2836
2837 return "????";
2838}
2839
2840/*
2841 * Return the RF name. "????" is returned if the RF is unknown.
2842 */
2843const char *
2844ath_rf_name(u16 rf_version)
2845{
2846 int i;
2847
2848 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2849 if (ath_rf_names[i].version == rf_version) {
2850 return ath_rf_names[i].name;
2851 }
2852 }
2853
2854 return "????";
2855}
2856
2857static int __init ath9k_init(void) 3180static int __init ath9k_init(void)
2858{ 3181{
2859 int error; 3182 int error;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 903dd8ad9d43..5321f735e5a0 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -31,8 +31,9 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
31}; 31};
32 32
33/* return bus cachesize in 4B word units */ 33/* return bus cachesize in 4B word units */
34static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz) 34static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
35{ 35{
36 struct ath_softc *sc = (struct ath_softc *) common->priv;
36 u8 u8tmp; 37 u8 u8tmp;
37 38
38 pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp); 39 pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp);
@@ -48,8 +49,9 @@ static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz)
48 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */ 49 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
49} 50}
50 51
51static void ath_pci_cleanup(struct ath_softc *sc) 52static void ath_pci_cleanup(struct ath_common *common)
52{ 53{
54 struct ath_softc *sc = (struct ath_softc *) common->priv;
53 struct pci_dev *pdev = to_pci_dev(sc->dev); 55 struct pci_dev *pdev = to_pci_dev(sc->dev);
54 56
55 pci_iounmap(pdev, sc->mem); 57 pci_iounmap(pdev, sc->mem);
@@ -57,9 +59,11 @@ static void ath_pci_cleanup(struct ath_softc *sc)
57 pci_release_region(pdev, 0); 59 pci_release_region(pdev, 0);
58} 60}
59 61
60static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data) 62static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
61{ 63{
62 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); 64 struct ath_hw *ah = (struct ath_hw *) common->ah;
65
66 common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
63 67
64 if (!ath9k_hw_wait(ah, 68 if (!ath9k_hw_wait(ah,
65 AR_EEPROM_STATUS_DATA, 69 AR_EEPROM_STATUS_DATA,
@@ -69,16 +73,34 @@ static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
69 return false; 73 return false;
70 } 74 }
71 75
72 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA), 76 *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
73 AR_EEPROM_STATUS_DATA_VAL); 77 AR_EEPROM_STATUS_DATA_VAL);
74 78
75 return true; 79 return true;
76} 80}
77 81
78static struct ath_bus_ops ath_pci_bus_ops = { 82/*
83 * Bluetooth coexistance requires disabling ASPM.
84 */
85static void ath_pci_bt_coex_prep(struct ath_common *common)
86{
87 struct ath_softc *sc = (struct ath_softc *) common->priv;
88 struct pci_dev *pdev = to_pci_dev(sc->dev);
89 u8 aspm;
90
91 if (!pdev->is_pcie)
92 return;
93
94 pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
95 aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
96 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
97}
98
99const static struct ath_bus_ops ath_pci_bus_ops = {
79 .read_cachesize = ath_pci_read_cachesize, 100 .read_cachesize = ath_pci_read_cachesize,
80 .cleanup = ath_pci_cleanup, 101 .cleanup = ath_pci_cleanup,
81 .eeprom_read = ath_pci_eeprom_read, 102 .eeprom_read = ath_pci_eeprom_read,
103 .bt_coex_prep = ath_pci_bt_coex_prep,
82}; 104};
83 105
84static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 106static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -92,6 +114,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
92 u32 val; 114 u32 val;
93 int ret = 0; 115 int ret = 0;
94 struct ath_hw *ah; 116 struct ath_hw *ah;
117 char hw_name[64];
95 118
96 if (pci_enable_device(pdev)) 119 if (pci_enable_device(pdev))
97 return -EIO; 120 return -EIO;
@@ -177,10 +200,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
177 sc->hw = hw; 200 sc->hw = hw;
178 sc->dev = &pdev->dev; 201 sc->dev = &pdev->dev;
179 sc->mem = mem; 202 sc->mem = mem;
180 sc->bus_ops = &ath_pci_bus_ops;
181 203
182 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); 204 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
183 ret = ath_init_device(id->device, sc, subsysid); 205 ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
184 if (ret) { 206 if (ret) {
185 dev_err(&pdev->dev, "failed to initialize device\n"); 207 dev_err(&pdev->dev, "failed to initialize device\n");
186 goto bad3; 208 goto bad3;
@@ -197,14 +219,11 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
197 sc->irq = pdev->irq; 219 sc->irq = pdev->irq;
198 220
199 ah = sc->sc_ah; 221 ah = sc->sc_ah;
222 ath9k_hw_name(ah, hw_name, sizeof(hw_name));
200 printk(KERN_INFO 223 printk(KERN_INFO
201 "%s: Atheros AR%s MAC/BB Rev:%x " 224 "%s: %s mem=0x%lx, irq=%d\n",
202 "AR%s RF Rev:%x: mem=0x%lx, irq=%d\n",
203 wiphy_name(hw->wiphy), 225 wiphy_name(hw->wiphy),
204 ath_mac_bb_name(ah->hw_version.macVersion), 226 hw_name,
205 ah->hw_version.macRev,
206 ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
207 ah->hw_version.phyRev,
208 (unsigned long)mem, pdev->irq); 227 (unsigned long)mem, pdev->irq);
209 228
210 return 0; 229 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
index 63bf9a307c6a..c3b59390fe38 100644
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ b/drivers/net/wireless/ath/ath9k/phy.c
@@ -14,90 +14,70 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17/**
18 * DOC: Programming Atheros 802.11n analog front end radios
19 *
20 * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
21 * devices have either an external AR2133 analog front end radio for single
22 * band 2.4 GHz communication or an AR5133 analog front end radio for dual
23 * band 2.4 GHz / 5 GHz communication.
24 *
25 * All devices after the AR5416 and AR5418 family starting with the AR9280
26 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
27 * into a single-chip and require less programming.
28 *
29 * The following single-chips exist with a respective embedded radio:
30 *
31 * AR9280 - 11n dual-band 2x2 MIMO for PCIe
32 * AR9281 - 11n single-band 1x2 MIMO for PCIe
33 * AR9285 - 11n single-band 1x1 for PCIe
34 * AR9287 - 11n single-band 2x2 MIMO for PCIe
35 *
36 * AR9220 - 11n dual-band 2x2 MIMO for PCI
37 * AR9223 - 11n single-band 2x2 MIMO for PCI
38 *
39 * AR9287 - 11n single-band 1x1 MIMO for USB
40 */
18 41
19void 42#include "hw.h"
20ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex, u32 freqIndex,
21 int regWrites)
22{
23 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
24}
25 43
26bool 44/**
27ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) 45 * ath9k_hw_write_regs - ??
46 *
47 * @ah: atheros hardware structure
48 * @freqIndex:
49 * @regWrites:
50 *
51 * Used for both the chipsets with an external AR2133/AR5133 radios and
52 * single-chip devices.
53 */
54void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
28{ 55{
29 u32 channelSel = 0; 56 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
30 u32 bModeSynth = 0;
31 u32 aModeRefSel = 0;
32 u32 reg32 = 0;
33 u16 freq;
34 struct chan_centers centers;
35
36 ath9k_hw_get_channel_centers(ah, chan, &centers);
37 freq = centers.synth_center;
38
39 if (freq < 4800) {
40 u32 txctl;
41
42 if (((freq - 2192) % 5) == 0) {
43 channelSel = ((freq - 672) * 2 - 3040) / 10;
44 bModeSynth = 0;
45 } else if (((freq - 2224) % 5) == 0) {
46 channelSel = ((freq - 704) * 2 - 3040) / 10;
47 bModeSynth = 1;
48 } else {
49 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
50 "Invalid channel %u MHz\n", freq);
51 return false;
52 }
53
54 channelSel = (channelSel << 2) & 0xff;
55 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
56
57 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
58 if (freq == 2484) {
59
60 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
61 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
62 } else {
63 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
64 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
65 }
66
67 } else if ((freq % 20) == 0 && freq >= 5120) {
68 channelSel =
69 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
70 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
71 } else if ((freq % 10) == 0) {
72 channelSel =
73 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
74 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
75 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
76 else
77 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
78 } else if ((freq % 5) == 0) {
79 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
80 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
81 } else {
82 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
83 "Invalid channel %u MHz\n", freq);
84 return false;
85 }
86
87 reg32 =
88 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
89 (1 << 5) | 0x1;
90
91 REG_WRITE(ah, AR_PHY(0x37), reg32);
92
93 ah->curchan = chan;
94 ah->curchan_rad_index = -1;
95
96 return true;
97} 57}
98 58
99void ath9k_hw_ar9280_set_channel(struct ath_hw *ah, 59/**
100 struct ath9k_channel *chan) 60 * ath9k_hw_ar9280_set_channel - set channel on single-chip device
61 * @ah: atheros hardware structure
62 * @chan:
63 *
64 * This is the function to change channel on single-chip devices, that is
65 * all devices after ar9280.
66 *
67 * This function takes the channel value in MHz and sets
68 * hardware channel value. Assumes writes have been enabled to analog bus.
69 *
70 * Actual Expression,
71 *
72 * For 2GHz channel,
73 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
74 * (freq_ref = 40MHz)
75 *
76 * For 5GHz channel,
77 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
78 * (freq_ref = 40MHz/(24>>amodeRefSel))
79 */
80int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
101{ 81{
102 u16 bMode, fracMode, aModeRefSel = 0; 82 u16 bMode, fracMode, aModeRefSel = 0;
103 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; 83 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
@@ -110,22 +90,34 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
110 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); 90 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
111 reg32 &= 0xc0000000; 91 reg32 &= 0xc0000000;
112 92
113 if (freq < 4800) { 93 if (freq < 4800) { /* 2 GHz, fractional mode */
114 u32 txctl; 94 u32 txctl;
95 int regWrites = 0;
115 96
116 bMode = 1; 97 bMode = 1;
117 fracMode = 1; 98 fracMode = 1;
118 aModeRefSel = 0; 99 aModeRefSel = 0;
119 channelSel = (freq * 0x10000) / 15; 100 channelSel = (freq * 0x10000) / 15;
120 101
121 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); 102 if (AR_SREV_9287_11_OR_LATER(ah)) {
122 if (freq == 2484) { 103 if (freq == 2484) {
123 104 /* Enable channel spreading for channel 14 */
124 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, 105 REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
125 txctl | AR_PHY_CCK_TX_CTRL_JAPAN); 106 1, regWrites);
107 } else {
108 REG_WRITE_ARRAY(&ah->iniCckfirNormal,
109 1, regWrites);
110 }
126 } else { 111 } else {
127 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, 112 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
128 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); 113 if (freq == 2484) {
114 /* Enable channel spreading for channel 14 */
115 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
116 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
117 } else {
118 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
119 txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
120 }
129 } 121 }
130 } else { 122 } else {
131 bMode = 0; 123 bMode = 0;
@@ -143,10 +135,15 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
143 case 1: 135 case 1:
144 default: 136 default:
145 aModeRefSel = 0; 137 aModeRefSel = 0;
138 /*
139 * Enable 2G (fractional) mode for channels
140 * which are 5MHz spaced.
141 */
146 fracMode = 1; 142 fracMode = 1;
147 refDivA = 1; 143 refDivA = 1;
148 channelSel = (freq * 0x8000) / 15; 144 channelSel = (freq * 0x8000) / 15;
149 145
146 /* RefDivA setting */
150 REG_RMW_FIELD(ah, AR_AN_SYNTH9, 147 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
151 AR_AN_SYNTH9_REFDIVA, refDivA); 148 AR_AN_SYNTH9_REFDIVA, refDivA);
152 149
@@ -168,12 +165,284 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
168 165
169 ah->curchan = chan; 166 ah->curchan = chan;
170 ah->curchan_rad_index = -1; 167 ah->curchan_rad_index = -1;
168
169 return 0;
170}
171
172/**
173 * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
174 * @ah: atheros hardware structure
175 * @chan:
176 *
177 * For single-chip solutions. Converts to baseband spur frequency given the
178 * input channel frequency and compute register settings below.
179 */
180void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
181{
182 int bb_spur = AR_NO_SPUR;
183 int freq;
184 int bin, cur_bin;
185 int bb_spur_off, spur_subchannel_sd;
186 int spur_freq_sd;
187 int spur_delta_phase;
188 int denominator;
189 int upper, lower, cur_vit_mask;
190 int tmp, newVal;
191 int i;
192 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
193 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
194 };
195 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
196 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
197 };
198 int inc[4] = { 0, 100, 0, 0 };
199 struct chan_centers centers;
200
201 int8_t mask_m[123];
202 int8_t mask_p[123];
203 int8_t mask_amt;
204 int tmp_mask;
205 int cur_bb_spur;
206 bool is2GHz = IS_CHAN_2GHZ(chan);
207
208 memset(&mask_m, 0, sizeof(int8_t) * 123);
209 memset(&mask_p, 0, sizeof(int8_t) * 123);
210
211 ath9k_hw_get_channel_centers(ah, chan, &centers);
212 freq = centers.synth_center;
213
214 ah->config.spurmode = SPUR_ENABLE_EEPROM;
215 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
216 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
217
218 if (is2GHz)
219 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
220 else
221 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
222
223 if (AR_NO_SPUR == cur_bb_spur)
224 break;
225 cur_bb_spur = cur_bb_spur - freq;
226
227 if (IS_CHAN_HT40(chan)) {
228 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
229 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
230 bb_spur = cur_bb_spur;
231 break;
232 }
233 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
234 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
235 bb_spur = cur_bb_spur;
236 break;
237 }
238 }
239
240 if (AR_NO_SPUR == bb_spur) {
241 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
242 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
243 return;
244 } else {
245 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
246 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
247 }
248
249 bin = bb_spur * 320;
250
251 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
252
253 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
254 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
255 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
256 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
257 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
258
259 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
260 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
261 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
262 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
263 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
264 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
265
266 if (IS_CHAN_HT40(chan)) {
267 if (bb_spur < 0) {
268 spur_subchannel_sd = 1;
269 bb_spur_off = bb_spur + 10;
270 } else {
271 spur_subchannel_sd = 0;
272 bb_spur_off = bb_spur - 10;
273 }
274 } else {
275 spur_subchannel_sd = 0;
276 bb_spur_off = bb_spur;
277 }
278
279 if (IS_CHAN_HT40(chan))
280 spur_delta_phase =
281 ((bb_spur * 262144) /
282 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
283 else
284 spur_delta_phase =
285 ((bb_spur * 524288) /
286 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
287
288 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
289 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
290
291 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
292 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
293 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
294 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
295
296 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
297 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
298
299 cur_bin = -6000;
300 upper = bin + 100;
301 lower = bin - 100;
302
303 for (i = 0; i < 4; i++) {
304 int pilot_mask = 0;
305 int chan_mask = 0;
306 int bp = 0;
307 for (bp = 0; bp < 30; bp++) {
308 if ((cur_bin > lower) && (cur_bin < upper)) {
309 pilot_mask = pilot_mask | 0x1 << bp;
310 chan_mask = chan_mask | 0x1 << bp;
311 }
312 cur_bin += 100;
313 }
314 cur_bin += inc[i];
315 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
316 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
317 }
318
319 cur_vit_mask = 6100;
320 upper = bin + 120;
321 lower = bin - 120;
322
323 for (i = 0; i < 123; i++) {
324 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
325
326 /* workaround for gcc bug #37014 */
327 volatile int tmp_v = abs(cur_vit_mask - bin);
328
329 if (tmp_v < 75)
330 mask_amt = 1;
331 else
332 mask_amt = 0;
333 if (cur_vit_mask < 0)
334 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
335 else
336 mask_p[cur_vit_mask / 100] = mask_amt;
337 }
338 cur_vit_mask -= 100;
339 }
340
341 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
342 | (mask_m[48] << 26) | (mask_m[49] << 24)
343 | (mask_m[50] << 22) | (mask_m[51] << 20)
344 | (mask_m[52] << 18) | (mask_m[53] << 16)
345 | (mask_m[54] << 14) | (mask_m[55] << 12)
346 | (mask_m[56] << 10) | (mask_m[57] << 8)
347 | (mask_m[58] << 6) | (mask_m[59] << 4)
348 | (mask_m[60] << 2) | (mask_m[61] << 0);
349 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
350 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
351
352 tmp_mask = (mask_m[31] << 28)
353 | (mask_m[32] << 26) | (mask_m[33] << 24)
354 | (mask_m[34] << 22) | (mask_m[35] << 20)
355 | (mask_m[36] << 18) | (mask_m[37] << 16)
356 | (mask_m[48] << 14) | (mask_m[39] << 12)
357 | (mask_m[40] << 10) | (mask_m[41] << 8)
358 | (mask_m[42] << 6) | (mask_m[43] << 4)
359 | (mask_m[44] << 2) | (mask_m[45] << 0);
360 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
361 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
362
363 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
364 | (mask_m[18] << 26) | (mask_m[18] << 24)
365 | (mask_m[20] << 22) | (mask_m[20] << 20)
366 | (mask_m[22] << 18) | (mask_m[22] << 16)
367 | (mask_m[24] << 14) | (mask_m[24] << 12)
368 | (mask_m[25] << 10) | (mask_m[26] << 8)
369 | (mask_m[27] << 6) | (mask_m[28] << 4)
370 | (mask_m[29] << 2) | (mask_m[30] << 0);
371 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
372 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
373
374 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
375 | (mask_m[2] << 26) | (mask_m[3] << 24)
376 | (mask_m[4] << 22) | (mask_m[5] << 20)
377 | (mask_m[6] << 18) | (mask_m[7] << 16)
378 | (mask_m[8] << 14) | (mask_m[9] << 12)
379 | (mask_m[10] << 10) | (mask_m[11] << 8)
380 | (mask_m[12] << 6) | (mask_m[13] << 4)
381 | (mask_m[14] << 2) | (mask_m[15] << 0);
382 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
383 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
384
385 tmp_mask = (mask_p[15] << 28)
386 | (mask_p[14] << 26) | (mask_p[13] << 24)
387 | (mask_p[12] << 22) | (mask_p[11] << 20)
388 | (mask_p[10] << 18) | (mask_p[9] << 16)
389 | (mask_p[8] << 14) | (mask_p[7] << 12)
390 | (mask_p[6] << 10) | (mask_p[5] << 8)
391 | (mask_p[4] << 6) | (mask_p[3] << 4)
392 | (mask_p[2] << 2) | (mask_p[1] << 0);
393 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
394 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
395
396 tmp_mask = (mask_p[30] << 28)
397 | (mask_p[29] << 26) | (mask_p[28] << 24)
398 | (mask_p[27] << 22) | (mask_p[26] << 20)
399 | (mask_p[25] << 18) | (mask_p[24] << 16)
400 | (mask_p[23] << 14) | (mask_p[22] << 12)
401 | (mask_p[21] << 10) | (mask_p[20] << 8)
402 | (mask_p[19] << 6) | (mask_p[18] << 4)
403 | (mask_p[17] << 2) | (mask_p[16] << 0);
404 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
405 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
406
407 tmp_mask = (mask_p[45] << 28)
408 | (mask_p[44] << 26) | (mask_p[43] << 24)
409 | (mask_p[42] << 22) | (mask_p[41] << 20)
410 | (mask_p[40] << 18) | (mask_p[39] << 16)
411 | (mask_p[38] << 14) | (mask_p[37] << 12)
412 | (mask_p[36] << 10) | (mask_p[35] << 8)
413 | (mask_p[34] << 6) | (mask_p[33] << 4)
414 | (mask_p[32] << 2) | (mask_p[31] << 0);
415 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
416 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
417
418 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
419 | (mask_p[59] << 26) | (mask_p[58] << 24)
420 | (mask_p[57] << 22) | (mask_p[56] << 20)
421 | (mask_p[55] << 18) | (mask_p[54] << 16)
422 | (mask_p[53] << 14) | (mask_p[52] << 12)
423 | (mask_p[51] << 10) | (mask_p[50] << 8)
424 | (mask_p[49] << 6) | (mask_p[48] << 4)
425 | (mask_p[47] << 2) | (mask_p[46] << 0);
426 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
427 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
171} 428}
172 429
173static void 430/* All code below is for non single-chip solutions */
174ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32, 431
175 u32 numBits, u32 firstBit, 432/**
176 u32 column) 433 * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
434 * @rfbuf:
435 * @reg32:
436 * @numBits:
437 * @firstBit:
438 * @column:
439 *
440 * Performs analog "swizzling" of parameters into their location.
441 * Used on external AR2133/AR5133 radios.
442 */
443static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
444 u32 numBits, u32 firstBit,
445 u32 column)
177{ 446{
178 u32 tmp32, mask, arrayEntry, lastBit; 447 u32 tmp32, mask, arrayEntry, lastBit;
179 int32_t bitPosition, bitsLeft; 448 int32_t bitPosition, bitsLeft;
@@ -197,26 +466,466 @@ ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
197 } 466 }
198} 467}
199 468
200bool 469/*
201ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan, 470 * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
202 u16 modesIndex) 471 * rf_pwd_icsyndiv.
472 *
473 * Theoretical Rules:
474 * if 2 GHz band
475 * if forceBiasAuto
476 * if synth_freq < 2412
477 * bias = 0
478 * else if 2412 <= synth_freq <= 2422
479 * bias = 1
480 * else // synth_freq > 2422
481 * bias = 2
482 * else if forceBias > 0
483 * bias = forceBias & 7
484 * else
485 * no change, use value from ini file
486 * else
487 * no change, invalid band
488 *
489 * 1st Mod:
490 * 2422 also uses value of 2
491 * <approved>
492 *
493 * 2nd Mod:
494 * Less than 2412 uses value of 0, 2412 and above uses value of 2
495 */
496static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
497{
498 struct ath_common *common = ath9k_hw_common(ah);
499 u32 tmp_reg;
500 int reg_writes = 0;
501 u32 new_bias = 0;
502
503 if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
504 return;
505 }
506
507 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
508
509 if (synth_freq < 2412)
510 new_bias = 0;
511 else if (synth_freq < 2422)
512 new_bias = 1;
513 else
514 new_bias = 2;
515
516 /* pre-reverse this field */
517 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
518
519 ath_print(common, ATH_DBG_CONFIG,
520 "Force rf_pwd_icsyndiv to %1d on %4d\n",
521 new_bias, synth_freq);
522
523 /* swizzle rf_pwd_icsyndiv */
524 ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
525
526 /* write Bank 6 with new params */
527 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
528}
529
530/**
531 * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
532 * @ah: atheros hardware stucture
533 * @chan:
534 *
535 * For the external AR2133/AR5133 radios, takes the MHz channel value and set
536 * the channel value. Assumes writes enabled to analog bus and bank6 register
537 * cache in ah->analogBank6Data.
538 */
539int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
540{
541 struct ath_common *common = ath9k_hw_common(ah);
542 u32 channelSel = 0;
543 u32 bModeSynth = 0;
544 u32 aModeRefSel = 0;
545 u32 reg32 = 0;
546 u16 freq;
547 struct chan_centers centers;
548
549 ath9k_hw_get_channel_centers(ah, chan, &centers);
550 freq = centers.synth_center;
551
552 if (freq < 4800) {
553 u32 txctl;
554
555 if (((freq - 2192) % 5) == 0) {
556 channelSel = ((freq - 672) * 2 - 3040) / 10;
557 bModeSynth = 0;
558 } else if (((freq - 2224) % 5) == 0) {
559 channelSel = ((freq - 704) * 2 - 3040) / 10;
560 bModeSynth = 1;
561 } else {
562 ath_print(common, ATH_DBG_FATAL,
563 "Invalid channel %u MHz\n", freq);
564 return -EINVAL;
565 }
566
567 channelSel = (channelSel << 2) & 0xff;
568 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
569
570 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
571 if (freq == 2484) {
572
573 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
574 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
575 } else {
576 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
577 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
578 }
579
580 } else if ((freq % 20) == 0 && freq >= 5120) {
581 channelSel =
582 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
583 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
584 } else if ((freq % 10) == 0) {
585 channelSel =
586 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
587 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
588 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
589 else
590 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
591 } else if ((freq % 5) == 0) {
592 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
593 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
594 } else {
595 ath_print(common, ATH_DBG_FATAL,
596 "Invalid channel %u MHz\n", freq);
597 return -EINVAL;
598 }
599
600 ath9k_hw_force_bias(ah, freq);
601
602 reg32 =
603 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
604 (1 << 5) | 0x1;
605
606 REG_WRITE(ah, AR_PHY(0x37), reg32);
607
608 ah->curchan = chan;
609 ah->curchan_rad_index = -1;
610
611 return 0;
612}
613
614/**
615 * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
616 * @ah: atheros hardware structure
617 * @chan:
618 *
619 * For non single-chip solutions. Converts to baseband spur frequency given the
620 * input channel frequency and compute register settings below.
621 */
622void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
623{
624 int bb_spur = AR_NO_SPUR;
625 int bin, cur_bin;
626 int spur_freq_sd;
627 int spur_delta_phase;
628 int denominator;
629 int upper, lower, cur_vit_mask;
630 int tmp, new;
631 int i;
632 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
633 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
634 };
635 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
636 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
637 };
638 int inc[4] = { 0, 100, 0, 0 };
639
640 int8_t mask_m[123];
641 int8_t mask_p[123];
642 int8_t mask_amt;
643 int tmp_mask;
644 int cur_bb_spur;
645 bool is2GHz = IS_CHAN_2GHZ(chan);
646
647 memset(&mask_m, 0, sizeof(int8_t) * 123);
648 memset(&mask_p, 0, sizeof(int8_t) * 123);
649
650 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
651 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
652 if (AR_NO_SPUR == cur_bb_spur)
653 break;
654 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
655 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
656 bb_spur = cur_bb_spur;
657 break;
658 }
659 }
660
661 if (AR_NO_SPUR == bb_spur)
662 return;
663
664 bin = bb_spur * 32;
665
666 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
667 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
668 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
669 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
670 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
671
672 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
673
674 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
675 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
676 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
677 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
678 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
679 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
680
681 spur_delta_phase = ((bb_spur * 524288) / 100) &
682 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
683
684 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
685 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
686
687 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
688 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
689 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
690 REG_WRITE(ah, AR_PHY_TIMING11, new);
691
692 cur_bin = -6000;
693 upper = bin + 100;
694 lower = bin - 100;
695
696 for (i = 0; i < 4; i++) {
697 int pilot_mask = 0;
698 int chan_mask = 0;
699 int bp = 0;
700 for (bp = 0; bp < 30; bp++) {
701 if ((cur_bin > lower) && (cur_bin < upper)) {
702 pilot_mask = pilot_mask | 0x1 << bp;
703 chan_mask = chan_mask | 0x1 << bp;
704 }
705 cur_bin += 100;
706 }
707 cur_bin += inc[i];
708 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
709 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
710 }
711
712 cur_vit_mask = 6100;
713 upper = bin + 120;
714 lower = bin - 120;
715
716 for (i = 0; i < 123; i++) {
717 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
718
719 /* workaround for gcc bug #37014 */
720 volatile int tmp_v = abs(cur_vit_mask - bin);
721
722 if (tmp_v < 75)
723 mask_amt = 1;
724 else
725 mask_amt = 0;
726 if (cur_vit_mask < 0)
727 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
728 else
729 mask_p[cur_vit_mask / 100] = mask_amt;
730 }
731 cur_vit_mask -= 100;
732 }
733
734 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
735 | (mask_m[48] << 26) | (mask_m[49] << 24)
736 | (mask_m[50] << 22) | (mask_m[51] << 20)
737 | (mask_m[52] << 18) | (mask_m[53] << 16)
738 | (mask_m[54] << 14) | (mask_m[55] << 12)
739 | (mask_m[56] << 10) | (mask_m[57] << 8)
740 | (mask_m[58] << 6) | (mask_m[59] << 4)
741 | (mask_m[60] << 2) | (mask_m[61] << 0);
742 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
743 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
744
745 tmp_mask = (mask_m[31] << 28)
746 | (mask_m[32] << 26) | (mask_m[33] << 24)
747 | (mask_m[34] << 22) | (mask_m[35] << 20)
748 | (mask_m[36] << 18) | (mask_m[37] << 16)
749 | (mask_m[48] << 14) | (mask_m[39] << 12)
750 | (mask_m[40] << 10) | (mask_m[41] << 8)
751 | (mask_m[42] << 6) | (mask_m[43] << 4)
752 | (mask_m[44] << 2) | (mask_m[45] << 0);
753 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
754 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
755
756 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
757 | (mask_m[18] << 26) | (mask_m[18] << 24)
758 | (mask_m[20] << 22) | (mask_m[20] << 20)
759 | (mask_m[22] << 18) | (mask_m[22] << 16)
760 | (mask_m[24] << 14) | (mask_m[24] << 12)
761 | (mask_m[25] << 10) | (mask_m[26] << 8)
762 | (mask_m[27] << 6) | (mask_m[28] << 4)
763 | (mask_m[29] << 2) | (mask_m[30] << 0);
764 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
765 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
766
767 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
768 | (mask_m[2] << 26) | (mask_m[3] << 24)
769 | (mask_m[4] << 22) | (mask_m[5] << 20)
770 | (mask_m[6] << 18) | (mask_m[7] << 16)
771 | (mask_m[8] << 14) | (mask_m[9] << 12)
772 | (mask_m[10] << 10) | (mask_m[11] << 8)
773 | (mask_m[12] << 6) | (mask_m[13] << 4)
774 | (mask_m[14] << 2) | (mask_m[15] << 0);
775 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
776 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
777
778 tmp_mask = (mask_p[15] << 28)
779 | (mask_p[14] << 26) | (mask_p[13] << 24)
780 | (mask_p[12] << 22) | (mask_p[11] << 20)
781 | (mask_p[10] << 18) | (mask_p[9] << 16)
782 | (mask_p[8] << 14) | (mask_p[7] << 12)
783 | (mask_p[6] << 10) | (mask_p[5] << 8)
784 | (mask_p[4] << 6) | (mask_p[3] << 4)
785 | (mask_p[2] << 2) | (mask_p[1] << 0);
786 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
787 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
788
789 tmp_mask = (mask_p[30] << 28)
790 | (mask_p[29] << 26) | (mask_p[28] << 24)
791 | (mask_p[27] << 22) | (mask_p[26] << 20)
792 | (mask_p[25] << 18) | (mask_p[24] << 16)
793 | (mask_p[23] << 14) | (mask_p[22] << 12)
794 | (mask_p[21] << 10) | (mask_p[20] << 8)
795 | (mask_p[19] << 6) | (mask_p[18] << 4)
796 | (mask_p[17] << 2) | (mask_p[16] << 0);
797 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
798 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
799
800 tmp_mask = (mask_p[45] << 28)
801 | (mask_p[44] << 26) | (mask_p[43] << 24)
802 | (mask_p[42] << 22) | (mask_p[41] << 20)
803 | (mask_p[40] << 18) | (mask_p[39] << 16)
804 | (mask_p[38] << 14) | (mask_p[37] << 12)
805 | (mask_p[36] << 10) | (mask_p[35] << 8)
806 | (mask_p[34] << 6) | (mask_p[33] << 4)
807 | (mask_p[32] << 2) | (mask_p[31] << 0);
808 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
809 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
810
811 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
812 | (mask_p[59] << 26) | (mask_p[58] << 24)
813 | (mask_p[57] << 22) | (mask_p[56] << 20)
814 | (mask_p[55] << 18) | (mask_p[54] << 16)
815 | (mask_p[53] << 14) | (mask_p[52] << 12)
816 | (mask_p[51] << 10) | (mask_p[50] << 8)
817 | (mask_p[49] << 6) | (mask_p[48] << 4)
818 | (mask_p[47] << 2) | (mask_p[46] << 0);
819 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
820 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
821}
822
823/**
824 * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
825 * @ah: atheros hardware structure
826 *
827 * Only required for older devices with external AR2133/AR5133 radios.
828 */
829int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
830{
831#define ATH_ALLOC_BANK(bank, size) do { \
832 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
833 if (!bank) { \
834 ath_print(common, ATH_DBG_FATAL, \
835 "Cannot allocate RF banks\n"); \
836 return -ENOMEM; \
837 } \
838 } while (0);
839
840 struct ath_common *common = ath9k_hw_common(ah);
841
842 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
843
844 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
845 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
846 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
847 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
848 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
849 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
850 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
851 ATH_ALLOC_BANK(ah->addac5416_21,
852 ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
853 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
854
855 return 0;
856#undef ATH_ALLOC_BANK
857}
858
859
860/**
861 * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
862 * @ah: atheros hardware struture
863 * For the external AR2133/AR5133 radios banks.
864 */
865void
866ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
867{
868#define ATH_FREE_BANK(bank) do { \
869 kfree(bank); \
870 bank = NULL; \
871 } while (0);
872
873 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
874
875 ATH_FREE_BANK(ah->analogBank0Data);
876 ATH_FREE_BANK(ah->analogBank1Data);
877 ATH_FREE_BANK(ah->analogBank2Data);
878 ATH_FREE_BANK(ah->analogBank3Data);
879 ATH_FREE_BANK(ah->analogBank6Data);
880 ATH_FREE_BANK(ah->analogBank6TPCData);
881 ATH_FREE_BANK(ah->analogBank7Data);
882 ATH_FREE_BANK(ah->addac5416_21);
883 ATH_FREE_BANK(ah->bank6Temp);
884
885#undef ATH_FREE_BANK
886}
887
888/* *
889 * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
890 * @ah: atheros hardware structure
891 * @chan:
892 * @modesIndex:
893 *
894 * Used for the external AR2133/AR5133 radios.
895 *
896 * Reads the EEPROM header info from the device structure and programs
897 * all rf registers. This routine requires access to the analog
898 * rf device. This is not required for single-chip devices.
899 */
900bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
901 u16 modesIndex)
203{ 902{
204 u32 eepMinorRev; 903 u32 eepMinorRev;
205 u32 ob5GHz = 0, db5GHz = 0; 904 u32 ob5GHz = 0, db5GHz = 0;
206 u32 ob2GHz = 0, db2GHz = 0; 905 u32 ob2GHz = 0, db2GHz = 0;
207 int regWrites = 0; 906 int regWrites = 0;
208 907
908 /*
909 * Software does not need to program bank data
910 * for single chip devices, that is AR9280 or anything
911 * after that.
912 */
209 if (AR_SREV_9280_10_OR_LATER(ah)) 913 if (AR_SREV_9280_10_OR_LATER(ah))
210 return true; 914 return true;
211 915
916 /* Setup rf parameters */
212 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); 917 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
213 918
919 /* Setup Bank 0 Write */
214 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1); 920 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
215 921
922 /* Setup Bank 1 Write */
216 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1); 923 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
217 924
925 /* Setup Bank 2 Write */
218 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1); 926 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
219 927
928 /* Setup Bank 6 Write */
220 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3, 929 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
221 modesIndex); 930 modesIndex);
222 { 931 {
@@ -227,6 +936,7 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
227 } 936 }
228 } 937 }
229 938
939 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
230 if (eepMinorRev >= 2) { 940 if (eepMinorRev >= 2) {
231 if (IS_CHAN_2GHZ(chan)) { 941 if (IS_CHAN_2GHZ(chan)) {
232 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2); 942 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
@@ -245,8 +955,10 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
245 } 955 }
246 } 956 }
247 957
958 /* Setup Bank 7 Setup */
248 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1); 959 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
249 960
961 /* Write Analog registers */
250 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, 962 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
251 regWrites); 963 regWrites);
252 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data, 964 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
@@ -262,137 +974,3 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
262 974
263 return true; 975 return true;
264} 976}
265
266void
267ath9k_hw_rf_free(struct ath_hw *ah)
268{
269#define ATH_FREE_BANK(bank) do { \
270 kfree(bank); \
271 bank = NULL; \
272 } while (0);
273
274 ATH_FREE_BANK(ah->analogBank0Data);
275 ATH_FREE_BANK(ah->analogBank1Data);
276 ATH_FREE_BANK(ah->analogBank2Data);
277 ATH_FREE_BANK(ah->analogBank3Data);
278 ATH_FREE_BANK(ah->analogBank6Data);
279 ATH_FREE_BANK(ah->analogBank6TPCData);
280 ATH_FREE_BANK(ah->analogBank7Data);
281 ATH_FREE_BANK(ah->addac5416_21);
282 ATH_FREE_BANK(ah->bank6Temp);
283#undef ATH_FREE_BANK
284}
285
286bool ath9k_hw_init_rf(struct ath_hw *ah, int *status)
287{
288 if (!AR_SREV_9280_10_OR_LATER(ah)) {
289 ah->analogBank0Data =
290 kzalloc((sizeof(u32) *
291 ah->iniBank0.ia_rows), GFP_KERNEL);
292 ah->analogBank1Data =
293 kzalloc((sizeof(u32) *
294 ah->iniBank1.ia_rows), GFP_KERNEL);
295 ah->analogBank2Data =
296 kzalloc((sizeof(u32) *
297 ah->iniBank2.ia_rows), GFP_KERNEL);
298 ah->analogBank3Data =
299 kzalloc((sizeof(u32) *
300 ah->iniBank3.ia_rows), GFP_KERNEL);
301 ah->analogBank6Data =
302 kzalloc((sizeof(u32) *
303 ah->iniBank6.ia_rows), GFP_KERNEL);
304 ah->analogBank6TPCData =
305 kzalloc((sizeof(u32) *
306 ah->iniBank6TPC.ia_rows), GFP_KERNEL);
307 ah->analogBank7Data =
308 kzalloc((sizeof(u32) *
309 ah->iniBank7.ia_rows), GFP_KERNEL);
310
311 if (ah->analogBank0Data == NULL
312 || ah->analogBank1Data == NULL
313 || ah->analogBank2Data == NULL
314 || ah->analogBank3Data == NULL
315 || ah->analogBank6Data == NULL
316 || ah->analogBank6TPCData == NULL
317 || ah->analogBank7Data == NULL) {
318 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
319 "Cannot allocate RF banks\n");
320 *status = -ENOMEM;
321 return false;
322 }
323
324 ah->addac5416_21 =
325 kzalloc((sizeof(u32) *
326 ah->iniAddac.ia_rows *
327 ah->iniAddac.ia_columns), GFP_KERNEL);
328 if (ah->addac5416_21 == NULL) {
329 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
330 "Cannot allocate addac5416_21\n");
331 *status = -ENOMEM;
332 return false;
333 }
334
335 ah->bank6Temp =
336 kzalloc((sizeof(u32) *
337 ah->iniBank6.ia_rows), GFP_KERNEL);
338 if (ah->bank6Temp == NULL) {
339 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
340 "Cannot allocate bank6Temp\n");
341 *status = -ENOMEM;
342 return false;
343 }
344 }
345
346 return true;
347}
348
349void
350ath9k_hw_decrease_chain_power(struct ath_hw *ah, struct ath9k_channel *chan)
351{
352 int i, regWrites = 0;
353 u32 bank6SelMask;
354 u32 *bank6Temp = ah->bank6Temp;
355
356 switch (ah->config.diversity_control) {
357 case ATH9K_ANT_FIXED_A:
358 bank6SelMask =
359 (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
360 REDUCE_CHAIN_0 : REDUCE_CHAIN_1;
361 break;
362 case ATH9K_ANT_FIXED_B:
363 bank6SelMask =
364 (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
365 REDUCE_CHAIN_1 : REDUCE_CHAIN_0;
366 break;
367 case ATH9K_ANT_VARIABLE:
368 return;
369 break;
370 default:
371 return;
372 break;
373 }
374
375 for (i = 0; i < ah->iniBank6.ia_rows; i++)
376 bank6Temp[i] = ah->analogBank6Data[i];
377
378 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
379
380 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
381 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
382 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
383 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
384 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
385 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
386 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
387 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
388 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
389
390 REG_WRITE_RF_ARRAY(&ah->iniBank6, bank6Temp, regWrites);
391
392 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
393#ifdef ALTER_SWITCH
394 REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
395 (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
396 | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
397#endif
398}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index dfda6f444648..31de27dc0c4a 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,20 +17,23 @@
17#ifndef PHY_H 17#ifndef PHY_H
18#define PHY_H 18#define PHY_H
19 19
20void ath9k_hw_ar9280_set_channel(struct ath_hw *ah, 20/* Common between single chip and non single-chip solutions */
21 struct ath9k_channel 21void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites);
22 *chan); 22
23bool ath9k_hw_set_channel(struct ath_hw *ah, 23/* Single chip radio settings */
24 struct ath9k_channel *chan); 24int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex, 25void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
26 u32 freqIndex, int regWrites); 26
27/* Routines below are for non single-chip solutions */
28int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
29void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
30
31int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
32void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
33
27bool ath9k_hw_set_rf_regs(struct ath_hw *ah, 34bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
28 struct ath9k_channel *chan, 35 struct ath9k_channel *chan,
29 u16 modesIndex); 36 u16 modesIndex);
30void ath9k_hw_decrease_chain_power(struct ath_hw *ah,
31 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hw *ah,
33 int *status);
34 37
35#define AR_PHY_BASE 0x9800 38#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) 39#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
@@ -45,6 +48,7 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
45#define AR_PHY_FC_DYN2040_EN 0x00000004 48#define AR_PHY_FC_DYN2040_EN 0x00000004
46#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008 49#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
47#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010 50#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
51/* For 25 MHz channel spacing -- not used but supported by hw */
48#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020 52#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
49#define AR_PHY_FC_HT_EN 0x00000040 53#define AR_PHY_FC_HT_EN 0x00000040
50#define AR_PHY_FC_SHORT_GI_40 0x00000080 54#define AR_PHY_FC_SHORT_GI_40 0x00000080
@@ -185,8 +189,20 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
185#define AR_PHY_PLL_CTL_44_2133 0xeb 189#define AR_PHY_PLL_CTL_44_2133 0xeb
186#define AR_PHY_PLL_CTL_40_2133 0xea 190#define AR_PHY_PLL_CTL_40_2133 0xea
187 191
188#define AR_PHY_SPECTRAL_SCAN 0x9912 192#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
189#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1 193#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
194#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
195#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
196#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
197#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
198#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
199#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
200#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
201#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
202#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
203#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
204#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
205#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
190 206
191#define AR_PHY_RX_DELAY 0x9914 207#define AR_PHY_RX_DELAY 0x9914
192#define AR_PHY_SEARCH_START_DELAY 0x9918 208#define AR_PHY_SEARCH_START_DELAY 0x9918
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 1895d63aad0a..1d96777b4cd2 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -425,7 +425,7 @@ static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
425static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv, 425static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv,
426 u8 index, int valid_tx_rate) 426 u8 index, int valid_tx_rate)
427{ 427{
428 ASSERT(index <= ath_rc_priv->rate_table_size); 428 BUG_ON(index > ath_rc_priv->rate_table_size);
429 ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0; 429 ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0;
430} 430}
431 431
@@ -859,12 +859,12 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
859static bool ath_rc_update_per(struct ath_softc *sc, 859static bool ath_rc_update_per(struct ath_softc *sc,
860 const struct ath_rate_table *rate_table, 860 const struct ath_rate_table *rate_table,
861 struct ath_rate_priv *ath_rc_priv, 861 struct ath_rate_priv *ath_rc_priv,
862 struct ath_tx_info_priv *tx_info_priv, 862 struct ieee80211_tx_info *tx_info,
863 int tx_rate, int xretries, int retries, 863 int tx_rate, int xretries, int retries,
864 u32 now_msec) 864 u32 now_msec)
865{ 865{
866 bool state_change = false; 866 bool state_change = false;
867 int count; 867 int count, n_bad_frames;
868 u8 last_per; 868 u8 last_per;
869 static u32 nretry_to_per_lookup[10] = { 869 static u32 nretry_to_per_lookup[10] = {
870 100 * 0 / 1, 870 100 * 0 / 1,
@@ -880,6 +880,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
880 }; 880 };
881 881
882 last_per = ath_rc_priv->per[tx_rate]; 882 last_per = ath_rc_priv->per[tx_rate];
883 n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
883 884
884 if (xretries) { 885 if (xretries) {
885 if (xretries == 1) { 886 if (xretries == 1) {
@@ -907,7 +908,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
907 if (retries >= count) 908 if (retries >= count)
908 retries = count - 1; 909 retries = count - 1;
909 910
910 if (tx_info_priv->n_bad_frames) { 911 if (n_bad_frames) {
911 /* new_PER = 7/8*old_PER + 1/8*(currentPER) 912 /* new_PER = 7/8*old_PER + 1/8*(currentPER)
912 * Assuming that n_frames is not 0. The current PER 913 * Assuming that n_frames is not 0. The current PER
913 * from the retries is 100 * retries / (retries+1), 914 * from the retries is 100 * retries / (retries+1),
@@ -920,14 +921,14 @@ static bool ath_rc_update_per(struct ath_softc *sc,
920 * the above PER. The expression below is a 921 * the above PER. The expression below is a
921 * simplified version of the sum of these two terms. 922 * simplified version of the sum of these two terms.
922 */ 923 */
923 if (tx_info_priv->n_frames > 0) { 924 if (tx_info->status.ampdu_len > 0) {
924 int n_frames, n_bad_frames; 925 int n_frames, n_bad_tries;
925 u8 cur_per, new_per; 926 u8 cur_per, new_per;
926 927
927 n_bad_frames = retries * tx_info_priv->n_frames + 928 n_bad_tries = retries * tx_info->status.ampdu_len +
928 tx_info_priv->n_bad_frames; 929 n_bad_frames;
929 n_frames = tx_info_priv->n_frames * (retries + 1); 930 n_frames = tx_info->status.ampdu_len * (retries + 1);
930 cur_per = (100 * n_bad_frames / n_frames) >> 3; 931 cur_per = (100 * n_bad_tries / n_frames) >> 3;
931 new_per = (u8)(last_per - (last_per >> 3) + cur_per); 932 new_per = (u8)(last_per - (last_per >> 3) + cur_per);
932 ath_rc_priv->per[tx_rate] = new_per; 933 ath_rc_priv->per[tx_rate] = new_per;
933 } 934 }
@@ -943,8 +944,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
943 * this was a probe. Otherwise, ignore the probe. 944 * this was a probe. Otherwise, ignore the probe.
944 */ 945 */
945 if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) { 946 if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
946 if (retries > 0 || 2 * tx_info_priv->n_bad_frames > 947 if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
947 tx_info_priv->n_frames) {
948 /* 948 /*
949 * Since we probed with just a single attempt, 949 * Since we probed with just a single attempt,
950 * any retries means the probe failed. Also, 950 * any retries means the probe failed. Also,
@@ -1003,7 +1003,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
1003 1003
1004static void ath_rc_update_ht(struct ath_softc *sc, 1004static void ath_rc_update_ht(struct ath_softc *sc,
1005 struct ath_rate_priv *ath_rc_priv, 1005 struct ath_rate_priv *ath_rc_priv,
1006 struct ath_tx_info_priv *tx_info_priv, 1006 struct ieee80211_tx_info *tx_info,
1007 int tx_rate, int xretries, int retries) 1007 int tx_rate, int xretries, int retries)
1008{ 1008{
1009 u32 now_msec = jiffies_to_msecs(jiffies); 1009 u32 now_msec = jiffies_to_msecs(jiffies);
@@ -1020,7 +1020,7 @@ static void ath_rc_update_ht(struct ath_softc *sc,
1020 1020
1021 /* Update PER first */ 1021 /* Update PER first */
1022 state_change = ath_rc_update_per(sc, rate_table, ath_rc_priv, 1022 state_change = ath_rc_update_per(sc, rate_table, ath_rc_priv,
1023 tx_info_priv, tx_rate, xretries, 1023 tx_info, tx_rate, xretries,
1024 retries, now_msec); 1024 retries, now_msec);
1025 1025
1026 /* 1026 /*
@@ -1098,7 +1098,6 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1098 struct ieee80211_tx_info *tx_info, 1098 struct ieee80211_tx_info *tx_info,
1099 int final_ts_idx, int xretries, int long_retry) 1099 int final_ts_idx, int xretries, int long_retry)
1100{ 1100{
1101 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1102 const struct ath_rate_table *rate_table; 1101 const struct ath_rate_table *rate_table;
1103 struct ieee80211_tx_rate *rates = tx_info->status.rates; 1102 struct ieee80211_tx_rate *rates = tx_info->status.rates;
1104 u8 flags; 1103 u8 flags;
@@ -1124,9 +1123,8 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1124 return; 1123 return;
1125 1124
1126 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 1125 rix = ath_rc_get_rateindex(rate_table, &rates[i]);
1127 ath_rc_update_ht(sc, ath_rc_priv, 1126 ath_rc_update_ht(sc, ath_rc_priv, tx_info,
1128 tx_info_priv, rix, 1127 rix, xretries ? 1 : 2,
1129 xretries ? 1 : 2,
1130 rates[i].count); 1128 rates[i].count);
1131 } 1129 }
1132 } 1130 }
@@ -1149,8 +1147,7 @@ static void ath_rc_tx_status(struct ath_softc *sc,
1149 return; 1147 return;
1150 1148
1151 rix = ath_rc_get_rateindex(rate_table, &rates[i]); 1149 rix = ath_rc_get_rateindex(rate_table, &rates[i]);
1152 ath_rc_update_ht(sc, ath_rc_priv, tx_info_priv, rix, 1150 ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
1153 xretries, long_retry);
1154} 1151}
1155 1152
1156static const 1153static const
@@ -1160,6 +1157,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1160 bool is_cw_40) 1157 bool is_cw_40)
1161{ 1158{
1162 int mode = 0; 1159 int mode = 0;
1160 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1163 1161
1164 switch(band) { 1162 switch(band) {
1165 case IEEE80211_BAND_2GHZ: 1163 case IEEE80211_BAND_2GHZ:
@@ -1177,13 +1175,14 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1177 mode = ATH9K_MODE_11NA_HT40PLUS; 1175 mode = ATH9K_MODE_11NA_HT40PLUS;
1178 break; 1176 break;
1179 default: 1177 default:
1180 DPRINTF(sc, ATH_DBG_CONFIG, "Invalid band\n"); 1178 ath_print(common, ATH_DBG_CONFIG, "Invalid band\n");
1181 return NULL; 1179 return NULL;
1182 } 1180 }
1183 1181
1184 BUG_ON(mode >= ATH9K_MODE_MAX); 1182 BUG_ON(mode >= ATH9K_MODE_MAX);
1185 1183
1186 DPRINTF(sc, ATH_DBG_CONFIG, "Choosing rate table for mode: %d\n", mode); 1184 ath_print(common, ATH_DBG_CONFIG,
1185 "Choosing rate table for mode: %d\n", mode);
1187 return sc->hw_rate_table[mode]; 1186 return sc->hw_rate_table[mode];
1188} 1187}
1189 1188
@@ -1194,11 +1193,13 @@ static void ath_rc_init(struct ath_softc *sc,
1194 const struct ath_rate_table *rate_table) 1193 const struct ath_rate_table *rate_table)
1195{ 1194{
1196 struct ath_rateset *rateset = &ath_rc_priv->neg_rates; 1195 struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
1196 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1197 u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates; 1197 u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates;
1198 u8 i, j, k, hi = 0, hthi = 0; 1198 u8 i, j, k, hi = 0, hthi = 0;
1199 1199
1200 if (!rate_table) { 1200 if (!rate_table) {
1201 DPRINTF(sc, ATH_DBG_FATAL, "Rate table not initialized\n"); 1201 ath_print(common, ATH_DBG_FATAL,
1202 "Rate table not initialized\n");
1202 return; 1203 return;
1203 } 1204 }
1204 1205
@@ -1239,7 +1240,7 @@ static void ath_rc_init(struct ath_softc *sc,
1239 1240
1240 ath_rc_priv->rate_table_size = hi + 1; 1241 ath_rc_priv->rate_table_size = hi + 1;
1241 ath_rc_priv->rate_max_phy = 0; 1242 ath_rc_priv->rate_max_phy = 0;
1242 ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE); 1243 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1243 1244
1244 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { 1245 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1245 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) { 1246 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1253,16 +1254,17 @@ static void ath_rc_init(struct ath_softc *sc,
1253 1254
1254 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1]; 1255 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
1255 } 1256 }
1256 ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE); 1257 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1257 ASSERT(k <= RATE_TABLE_SIZE); 1258 BUG_ON(k > RATE_TABLE_SIZE);
1258 1259
1259 ath_rc_priv->max_valid_rate = k; 1260 ath_rc_priv->max_valid_rate = k;
1260 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1261 ath_rc_sort_validrates(rate_table, ath_rc_priv);
1261 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; 1262 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
1262 sc->cur_rate_table = rate_table; 1263 sc->cur_rate_table = rate_table;
1263 1264
1264 DPRINTF(sc, ATH_DBG_CONFIG, "RC Initialized with capabilities: 0x%x\n", 1265 ath_print(common, ATH_DBG_CONFIG,
1265 ath_rc_priv->ht_cap); 1266 "RC Initialized with capabilities: 0x%x\n",
1267 ath_rc_priv->ht_cap);
1266} 1268}
1267 1269
1268static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, 1270static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1296,23 +1298,30 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1296{ 1298{
1297 struct ath_softc *sc = priv; 1299 struct ath_softc *sc = priv;
1298 struct ath_rate_priv *ath_rc_priv = priv_sta; 1300 struct ath_rate_priv *ath_rc_priv = priv_sta;
1299 struct ath_tx_info_priv *tx_info_priv = NULL;
1300 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1301 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1301 struct ieee80211_hdr *hdr; 1302 struct ieee80211_hdr *hdr;
1302 int final_ts_idx, tx_status = 0, is_underrun = 0; 1303 int final_ts_idx = 0, tx_status = 0, is_underrun = 0;
1304 int long_retry = 0;
1303 __le16 fc; 1305 __le16 fc;
1306 int i;
1304 1307
1305 hdr = (struct ieee80211_hdr *)skb->data; 1308 hdr = (struct ieee80211_hdr *)skb->data;
1306 fc = hdr->frame_control; 1309 fc = hdr->frame_control;
1307 tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1310 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
1308 final_ts_idx = tx_info_priv->tx.ts_rateindex; 1311 struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
1312 if (!rate->count)
1313 break;
1314
1315 final_ts_idx = i;
1316 long_retry = rate->count - 1;
1317 }
1309 1318
1310 if (!priv_sta || !ieee80211_is_data(fc) || 1319 if (!priv_sta || !ieee80211_is_data(fc) ||
1311 !tx_info_priv->update_rc) 1320 !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC))
1312 goto exit; 1321 return;
1313 1322
1314 if (tx_info_priv->tx.ts_status & ATH9K_TXERR_FILT) 1323 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
1315 goto exit; 1324 return;
1316 1325
1317 /* 1326 /*
1318 * If underrun error is seen assume it as an excessive retry only 1327 * If underrun error is seen assume it as an excessive retry only
@@ -1320,20 +1329,17 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1320 * Adjust the long retry as if the frame was tried hw->max_rate_tries 1329 * Adjust the long retry as if the frame was tried hw->max_rate_tries
1321 * times. This affects how ratectrl updates PER for the failed rate. 1330 * times. This affects how ratectrl updates PER for the failed rate.
1322 */ 1331 */
1323 if (tx_info_priv->tx.ts_flags & 1332 if ((tx_info->pad[0] & ATH_TX_INFO_UNDERRUN) &&
1324 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) && 1333 (sc->sc_ah->tx_trig_level >= ath_rc_priv->tx_triglevel_max)) {
1325 ((sc->sc_ah->tx_trig_level) >= ath_rc_priv->tx_triglevel_max)) {
1326 tx_status = 1; 1334 tx_status = 1;
1327 is_underrun = 1; 1335 is_underrun = 1;
1328 } 1336 }
1329 1337
1330 if ((tx_info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) || 1338 if (tx_info->pad[0] & ATH_TX_INFO_XRETRY)
1331 (tx_info_priv->tx.ts_status & ATH9K_TXERR_FIFO))
1332 tx_status = 1; 1339 tx_status = 1;
1333 1340
1334 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status, 1341 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
1335 (is_underrun) ? sc->hw->max_rate_tries : 1342 (is_underrun) ? sc->hw->max_rate_tries : long_retry);
1336 tx_info_priv->tx.ts_longretry);
1337 1343
1338 /* Check if aggregation has to be enabled for this tid */ 1344 /* Check if aggregation has to be enabled for this tid */
1339 if (conf_is_ht(&sc->hw->conf) && 1345 if (conf_is_ht(&sc->hw->conf) &&
@@ -1347,13 +1353,11 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1347 an = (struct ath_node *)sta->drv_priv; 1353 an = (struct ath_node *)sta->drv_priv;
1348 1354
1349 if(ath_tx_aggr_check(sc, an, tid)) 1355 if(ath_tx_aggr_check(sc, an, tid))
1350 ieee80211_start_tx_ba_session(sc->hw, hdr->addr1, tid); 1356 ieee80211_start_tx_ba_session(sta, tid);
1351 } 1357 }
1352 } 1358 }
1353 1359
1354 ath_debug_stat_rc(sc, skb); 1360 ath_debug_stat_rc(sc, skb);
1355exit:
1356 kfree(tx_info_priv);
1357} 1361}
1358 1362
1359static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband, 1363static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
@@ -1438,9 +1442,9 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1438 oper_cw40, oper_sgi40); 1442 oper_cw40, oper_sgi40);
1439 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1443 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1440 1444
1441 DPRINTF(sc, ATH_DBG_CONFIG, 1445 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
1442 "Operating HT Bandwidth changed to: %d\n", 1446 "Operating HT Bandwidth changed to: %d\n",
1443 sc->hw->conf.channel_type); 1447 sc->hw->conf.channel_type);
1444 } 1448 }
1445 } 1449 }
1446} 1450}
@@ -1463,8 +1467,8 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
1463 1467
1464 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp); 1468 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
1465 if (!rate_priv) { 1469 if (!rate_priv) {
1466 DPRINTF(sc, ATH_DBG_FATAL, 1470 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1467 "Unable to allocate private rc structure\n"); 1471 "Unable to allocate private rc structure\n");
1468 return NULL; 1472 return NULL;
1469 } 1473 }
1470 1474
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index fa21a628ddd0..51f85ecbe88d 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -19,6 +19,8 @@
19#ifndef RC_H 19#ifndef RC_H
20#define RC_H 20#define RC_H
21 21
22#include "hw.h"
23
22struct ath_softc; 24struct ath_softc;
23 25
24#define ATH_RATE_MAX 30 26#define ATH_RATE_MAX 30
@@ -165,24 +167,18 @@ struct ath_rate_priv {
165 struct ath_rate_softc *asc; 167 struct ath_rate_softc *asc;
166}; 168};
167 169
170#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
171#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
172#define ATH_TX_INFO_UPDATE_RC (1 << 2)
173#define ATH_TX_INFO_XRETRY (1 << 3)
174#define ATH_TX_INFO_UNDERRUN (1 << 4)
175
168enum ath9k_internal_frame_type { 176enum ath9k_internal_frame_type {
169 ATH9K_NOT_INTERNAL, 177 ATH9K_NOT_INTERNAL,
170 ATH9K_INT_PAUSE, 178 ATH9K_INT_PAUSE,
171 ATH9K_INT_UNPAUSE 179 ATH9K_INT_UNPAUSE
172}; 180};
173 181
174struct ath_tx_info_priv {
175 struct ath_wiphy *aphy;
176 struct ath_tx_status tx;
177 int n_frames;
178 int n_bad_frames;
179 bool update_rc;
180 enum ath9k_internal_frame_type frame_type;
181};
182
183#define ATH_TX_INFO_PRIV(tx_info) \
184 ((struct ath_tx_info_priv *)((tx_info)->rate_driver_data[0]))
185
186void ath_rate_attach(struct ath_softc *sc); 182void ath_rate_attach(struct ath_softc *sc);
187u8 ath_rate_findrateix(struct ath_softc *sc, u8 dot11_rate); 183u8 ath_rate_findrateix(struct ath_softc *sc, u8 dot11_rate);
188int ath_rate_control_register(void); 184int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ec0abf823995..477365e5ae69 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -48,6 +48,7 @@ static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
48static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 48static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
49{ 49{
50 struct ath_hw *ah = sc->sc_ah; 50 struct ath_hw *ah = sc->sc_ah;
51 struct ath_common *common = ath9k_hw_common(ah);
51 struct ath_desc *ds; 52 struct ath_desc *ds;
52 struct sk_buff *skb; 53 struct sk_buff *skb;
53 54
@@ -59,14 +60,16 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
59 60
60 /* virtual addr of the beginning of the buffer. */ 61 /* virtual addr of the beginning of the buffer. */
61 skb = bf->bf_mpdu; 62 skb = bf->bf_mpdu;
62 ASSERT(skb != NULL); 63 BUG_ON(skb == NULL);
63 ds->ds_vdata = skb->data; 64 ds->ds_vdata = skb->data;
64 65
65 /* setup rx descriptors. The rx.bufsize here tells the harware 66 /*
67 * setup rx descriptors. The rx_bufsize here tells the hardware
66 * how much data it can DMA to us and that we are prepared 68 * how much data it can DMA to us and that we are prepared
67 * to process */ 69 * to process
70 */
68 ath9k_hw_setuprxdesc(ah, ds, 71 ath9k_hw_setuprxdesc(ah, ds,
69 sc->rx.bufsize, 72 common->rx_bufsize,
70 0); 73 0);
71 74
72 if (sc->rx.rxlink == NULL) 75 if (sc->rx.rxlink == NULL)
@@ -86,192 +89,11 @@ static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
86 sc->rx.rxotherant = 0; 89 sc->rx.rxotherant = 0;
87} 90}
88 91
89/*
90 * Extend 15-bit time stamp from rx descriptor to
91 * a full 64-bit TSF using the current h/w TSF.
92*/
93static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
94{
95 u64 tsf;
96
97 tsf = ath9k_hw_gettsf64(sc->sc_ah);
98 if ((tsf & 0x7fff) < rstamp)
99 tsf -= 0x8000;
100 return (tsf & ~0x7fff) | rstamp;
101}
102
103/*
104 * For Decrypt or Demic errors, we only mark packet status here and always push
105 * up the frame up to let mac80211 handle the actual error case, be it no
106 * decryption key or real decryption error. This let us keep statistics there.
107 */
108static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
109 struct ieee80211_rx_status *rx_status, bool *decrypt_error,
110 struct ath_softc *sc)
111{
112 struct ieee80211_hdr *hdr;
113 u8 ratecode;
114 __le16 fc;
115 struct ieee80211_hw *hw;
116 struct ieee80211_sta *sta;
117 struct ath_node *an;
118 int last_rssi = ATH_RSSI_DUMMY_MARKER;
119
120
121 hdr = (struct ieee80211_hdr *)skb->data;
122 fc = hdr->frame_control;
123 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
124 hw = ath_get_virt_hw(sc, hdr);
125
126 if (ds->ds_rxstat.rs_more) {
127 /*
128 * Frame spans multiple descriptors; this cannot happen yet
129 * as we don't support jumbograms. If not in monitor mode,
130 * discard the frame. Enable this if you want to see
131 * error frames in Monitor mode.
132 */
133 if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR)
134 goto rx_next;
135 } else if (ds->ds_rxstat.rs_status != 0) {
136 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
137 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
138 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
139 goto rx_next;
140
141 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
142 *decrypt_error = true;
143 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
144 if (ieee80211_is_ctl(fc))
145 /*
146 * Sometimes, we get invalid
147 * MIC failures on valid control frames.
148 * Remove these mic errors.
149 */
150 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
151 else
152 rx_status->flag |= RX_FLAG_MMIC_ERROR;
153 }
154 /*
155 * Reject error frames with the exception of
156 * decryption and MIC failures. For monitor mode,
157 * we also ignore the CRC error.
158 */
159 if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) {
160 if (ds->ds_rxstat.rs_status &
161 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
162 ATH9K_RXERR_CRC))
163 goto rx_next;
164 } else {
165 if (ds->ds_rxstat.rs_status &
166 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
167 goto rx_next;
168 }
169 }
170 }
171
172 ratecode = ds->ds_rxstat.rs_rate;
173
174 if (ratecode & 0x80) {
175 /* HT rate */
176 rx_status->flag |= RX_FLAG_HT;
177 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
178 rx_status->flag |= RX_FLAG_40MHZ;
179 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
180 rx_status->flag |= RX_FLAG_SHORT_GI;
181 rx_status->rate_idx = ratecode & 0x7f;
182 } else {
183 int i = 0, cur_band, n_rates;
184
185 cur_band = hw->conf.channel->band;
186 n_rates = sc->sbands[cur_band].n_bitrates;
187
188 for (i = 0; i < n_rates; i++) {
189 if (sc->sbands[cur_band].bitrates[i].hw_value ==
190 ratecode) {
191 rx_status->rate_idx = i;
192 break;
193 }
194
195 if (sc->sbands[cur_band].bitrates[i].hw_value_short ==
196 ratecode) {
197 rx_status->rate_idx = i;
198 rx_status->flag |= RX_FLAG_SHORTPRE;
199 break;
200 }
201 }
202 }
203
204 rcu_read_lock();
205 sta = ieee80211_find_sta(sc->hw, hdr->addr2);
206 if (sta) {
207 an = (struct ath_node *) sta->drv_priv;
208 if (ds->ds_rxstat.rs_rssi != ATH9K_RSSI_BAD &&
209 !ds->ds_rxstat.rs_moreaggr)
210 ATH_RSSI_LPF(an->last_rssi, ds->ds_rxstat.rs_rssi);
211 last_rssi = an->last_rssi;
212 }
213 rcu_read_unlock();
214
215 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
216 ds->ds_rxstat.rs_rssi = ATH_EP_RND(last_rssi,
217 ATH_RSSI_EP_MULTIPLIER);
218 if (ds->ds_rxstat.rs_rssi < 0)
219 ds->ds_rxstat.rs_rssi = 0;
220 else if (ds->ds_rxstat.rs_rssi > 127)
221 ds->ds_rxstat.rs_rssi = 127;
222
223 /* Update Beacon RSSI, this is used by ANI. */
224 if (ieee80211_is_beacon(fc))
225 sc->sc_ah->stats.avgbrssi = ds->ds_rxstat.rs_rssi;
226
227 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
228 rx_status->band = hw->conf.channel->band;
229 rx_status->freq = hw->conf.channel->center_freq;
230 rx_status->noise = sc->ani.noise_floor;
231 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + ds->ds_rxstat.rs_rssi;
232 rx_status->antenna = ds->ds_rxstat.rs_antenna;
233
234 /*
235 * Theory for reporting quality:
236 *
237 * At a hardware RSSI of 45 you will be able to use MCS 7 reliably.
238 * At a hardware RSSI of 45 you will be able to use MCS 15 reliably.
239 * At a hardware RSSI of 35 you should be able use 54 Mbps reliably.
240 *
241 * MCS 7 is the highets MCS index usable by a 1-stream device.
242 * MCS 15 is the highest MCS index usable by a 2-stream device.
243 *
244 * All ath9k devices are either 1-stream or 2-stream.
245 *
246 * How many bars you see is derived from the qual reporting.
247 *
248 * A more elaborate scheme can be used here but it requires tables
249 * of SNR/throughput for each possible mode used. For the MCS table
250 * you can refer to the wireless wiki:
251 *
252 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
253 *
254 */
255 if (conf_is_ht(&hw->conf))
256 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45;
257 else
258 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 35;
259
260 /* rssi can be more than 45 though, anything above that
261 * should be considered at 100% */
262 if (rx_status->qual > 100)
263 rx_status->qual = 100;
264
265 rx_status->flag |= RX_FLAG_TSFT;
266
267 return 1;
268rx_next:
269 return 0;
270}
271
272static void ath_opmode_init(struct ath_softc *sc) 92static void ath_opmode_init(struct ath_softc *sc)
273{ 93{
274 struct ath_hw *ah = sc->sc_ah; 94 struct ath_hw *ah = sc->sc_ah;
95 struct ath_common *common = ath9k_hw_common(ah);
96
275 u32 rfilt, mfilt[2]; 97 u32 rfilt, mfilt[2];
276 98
277 /* configure rx filter */ 99 /* configure rx filter */
@@ -280,13 +102,13 @@ static void ath_opmode_init(struct ath_softc *sc)
280 102
281 /* configure bssid mask */ 103 /* configure bssid mask */
282 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 104 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
283 ath9k_hw_setbssidmask(sc); 105 ath_hw_setbssidmask(common);
284 106
285 /* configure operational mode */ 107 /* configure operational mode */
286 ath9k_hw_setopmode(ah); 108 ath9k_hw_setopmode(ah);
287 109
288 /* Handle any link-level address change. */ 110 /* Handle any link-level address change. */
289 ath9k_hw_setmac(ah, sc->sc_ah->macaddr); 111 ath9k_hw_setmac(ah, common->macaddr);
290 112
291 /* calculate and install multicast filter */ 113 /* calculate and install multicast filter */
292 mfilt[0] = mfilt[1] = ~0; 114 mfilt[0] = mfilt[1] = ~0;
@@ -295,6 +117,7 @@ static void ath_opmode_init(struct ath_softc *sc)
295 117
296int ath_rx_init(struct ath_softc *sc, int nbufs) 118int ath_rx_init(struct ath_softc *sc, int nbufs)
297{ 119{
120 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
298 struct sk_buff *skb; 121 struct sk_buff *skb;
299 struct ath_buf *bf; 122 struct ath_buf *bf;
300 int error = 0; 123 int error = 0;
@@ -303,24 +126,24 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
303 sc->sc_flags &= ~SC_OP_RXFLUSH; 126 sc->sc_flags &= ~SC_OP_RXFLUSH;
304 spin_lock_init(&sc->rx.rxbuflock); 127 spin_lock_init(&sc->rx.rxbuflock);
305 128
306 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 129 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
307 min(sc->common.cachelsz, (u16)64)); 130 min(common->cachelsz, (u16)64));
308 131
309 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 132 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
310 sc->common.cachelsz, sc->rx.bufsize); 133 common->cachelsz, common->rx_bufsize);
311 134
312 /* Initialize rx descriptors */ 135 /* Initialize rx descriptors */
313 136
314 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 137 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
315 "rx", nbufs, 1); 138 "rx", nbufs, 1);
316 if (error != 0) { 139 if (error != 0) {
317 DPRINTF(sc, ATH_DBG_FATAL, 140 ath_print(common, ATH_DBG_FATAL,
318 "failed to allocate rx descriptors: %d\n", error); 141 "failed to allocate rx descriptors: %d\n", error);
319 goto err; 142 goto err;
320 } 143 }
321 144
322 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 145 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
323 skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_KERNEL); 146 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
324 if (skb == NULL) { 147 if (skb == NULL) {
325 error = -ENOMEM; 148 error = -ENOMEM;
326 goto err; 149 goto err;
@@ -328,14 +151,14 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
328 151
329 bf->bf_mpdu = skb; 152 bf->bf_mpdu = skb;
330 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 153 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
331 sc->rx.bufsize, 154 common->rx_bufsize,
332 DMA_FROM_DEVICE); 155 DMA_FROM_DEVICE);
333 if (unlikely(dma_mapping_error(sc->dev, 156 if (unlikely(dma_mapping_error(sc->dev,
334 bf->bf_buf_addr))) { 157 bf->bf_buf_addr))) {
335 dev_kfree_skb_any(skb); 158 dev_kfree_skb_any(skb);
336 bf->bf_mpdu = NULL; 159 bf->bf_mpdu = NULL;
337 DPRINTF(sc, ATH_DBG_FATAL, 160 ath_print(common, ATH_DBG_FATAL,
338 "dma_mapping_error() on RX init\n"); 161 "dma_mapping_error() on RX init\n");
339 error = -ENOMEM; 162 error = -ENOMEM;
340 goto err; 163 goto err;
341 } 164 }
@@ -352,6 +175,8 @@ err:
352 175
353void ath_rx_cleanup(struct ath_softc *sc) 176void ath_rx_cleanup(struct ath_softc *sc)
354{ 177{
178 struct ath_hw *ah = sc->sc_ah;
179 struct ath_common *common = ath9k_hw_common(ah);
355 struct sk_buff *skb; 180 struct sk_buff *skb;
356 struct ath_buf *bf; 181 struct ath_buf *bf;
357 182
@@ -359,7 +184,7 @@ void ath_rx_cleanup(struct ath_softc *sc)
359 skb = bf->bf_mpdu; 184 skb = bf->bf_mpdu;
360 if (skb) { 185 if (skb) {
361 dma_unmap_single(sc->dev, bf->bf_buf_addr, 186 dma_unmap_single(sc->dev, bf->bf_buf_addr,
362 sc->rx.bufsize, DMA_FROM_DEVICE); 187 common->rx_bufsize, DMA_FROM_DEVICE);
363 dev_kfree_skb(skb); 188 dev_kfree_skb(skb);
364 } 189 }
365 } 190 }
@@ -420,7 +245,10 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
420 else 245 else
421 rfilt |= ATH9K_RX_FILTER_BEACON; 246 rfilt |= ATH9K_RX_FILTER_BEACON;
422 247
423 if (sc->rx.rxfilter & FIF_PSPOLL) 248 if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
249 AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
250 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
251 (sc->rx.rxfilter & FIF_PSPOLL))
424 rfilt |= ATH9K_RX_FILTER_PSPOLL; 252 rfilt |= ATH9K_RX_FILTER_PSPOLL;
425 253
426 if (conf_is_ht(&sc->hw->conf)) 254 if (conf_is_ht(&sc->hw->conf))
@@ -527,20 +355,22 @@ static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
527static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 355static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
528{ 356{
529 struct ieee80211_mgmt *mgmt; 357 struct ieee80211_mgmt *mgmt;
358 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
530 359
531 if (skb->len < 24 + 8 + 2 + 2) 360 if (skb->len < 24 + 8 + 2 + 2)
532 return; 361 return;
533 362
534 mgmt = (struct ieee80211_mgmt *)skb->data; 363 mgmt = (struct ieee80211_mgmt *)skb->data;
535 if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0) 364 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
536 return; /* not from our current AP */ 365 return; /* not from our current AP */
537 366
538 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 367 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
539 368
540 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 369 if (sc->sc_flags & SC_OP_BEACON_SYNC) {
541 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 370 sc->sc_flags &= ~SC_OP_BEACON_SYNC;
542 DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on " 371 ath_print(common, ATH_DBG_PS,
543 "timestamp from the AP\n"); 372 "Reconfigure Beacon timers based on "
373 "timestamp from the AP\n");
544 ath_beacon_config(sc, NULL); 374 ath_beacon_config(sc, NULL);
545 } 375 }
546 376
@@ -552,8 +382,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
552 * a backup trigger for returning into NETWORK SLEEP state, 382 * a backup trigger for returning into NETWORK SLEEP state,
553 * so we are waiting for it as well. 383 * so we are waiting for it as well.
554 */ 384 */
555 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " 385 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
556 "buffered broadcast/multicast frame(s)\n"); 386 "buffered broadcast/multicast frame(s)\n");
557 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 387 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
558 return; 388 return;
559 } 389 }
@@ -565,13 +395,15 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
565 * been delivered. 395 * been delivered.
566 */ 396 */
567 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 397 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
568 DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n"); 398 ath_print(common, ATH_DBG_PS,
399 "PS wait for CAB frames timed out\n");
569 } 400 }
570} 401}
571 402
572static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 403static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
573{ 404{
574 struct ieee80211_hdr *hdr; 405 struct ieee80211_hdr *hdr;
406 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
575 407
576 hdr = (struct ieee80211_hdr *)skb->data; 408 hdr = (struct ieee80211_hdr *)skb->data;
577 409
@@ -589,14 +421,15 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
589 * point. 421 * point.
590 */ 422 */
591 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 423 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
592 DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to " 424 ath_print(common, ATH_DBG_PS,
593 "sleep\n"); 425 "All PS CAB frames received, back to sleep\n");
594 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 426 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
595 !is_multicast_ether_addr(hdr->addr1) && 427 !is_multicast_ether_addr(hdr->addr1) &&
596 !ieee80211_has_morefrags(hdr->frame_control)) { 428 !ieee80211_has_morefrags(hdr->frame_control)) {
597 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 429 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
598 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 430 ath_print(common, ATH_DBG_PS,
599 "received PS-Poll data (0x%x)\n", 431 "Going back to sleep after having received "
432 "PS-Poll data (0x%x)\n",
600 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 433 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
601 SC_OP_WAIT_FOR_CAB | 434 SC_OP_WAIT_FOR_CAB |
602 SC_OP_WAIT_FOR_PSPOLL_DATA | 435 SC_OP_WAIT_FOR_PSPOLL_DATA |
@@ -604,8 +437,9 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
604 } 437 }
605} 438}
606 439
607static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb, 440static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
608 struct ieee80211_rx_status *rx_status) 441 struct ath_softc *sc, struct sk_buff *skb,
442 struct ieee80211_rx_status *rxs)
609{ 443{
610 struct ieee80211_hdr *hdr; 444 struct ieee80211_hdr *hdr;
611 445
@@ -625,19 +459,14 @@ static void ath_rx_send_to_mac80211(struct ath_softc *sc, struct sk_buff *skb,
625 if (aphy == NULL) 459 if (aphy == NULL)
626 continue; 460 continue;
627 nskb = skb_copy(skb, GFP_ATOMIC); 461 nskb = skb_copy(skb, GFP_ATOMIC);
628 if (nskb) { 462 if (!nskb)
629 memcpy(IEEE80211_SKB_RXCB(nskb), rx_status, 463 continue;
630 sizeof(*rx_status)); 464 ieee80211_rx(aphy->hw, nskb);
631 ieee80211_rx(aphy->hw, nskb);
632 }
633 } 465 }
634 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status));
635 ieee80211_rx(sc->hw, skb); 466 ieee80211_rx(sc->hw, skb);
636 } else { 467 } else
637 /* Deliver unicast frames based on receiver address */ 468 /* Deliver unicast frames based on receiver address */
638 memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); 469 ieee80211_rx(hw, skb);
639 ieee80211_rx(ath_get_virt_hw(sc, hdr), skb);
640 }
641} 470}
642 471
643int ath_rx_tasklet(struct ath_softc *sc, int flush) 472int ath_rx_tasklet(struct ath_softc *sc, int flush)
@@ -648,14 +477,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
648 477
649 struct ath_buf *bf; 478 struct ath_buf *bf;
650 struct ath_desc *ds; 479 struct ath_desc *ds;
480 struct ath_rx_status *rx_stats;
651 struct sk_buff *skb = NULL, *requeue_skb; 481 struct sk_buff *skb = NULL, *requeue_skb;
652 struct ieee80211_rx_status rx_status; 482 struct ieee80211_rx_status *rxs;
653 struct ath_hw *ah = sc->sc_ah; 483 struct ath_hw *ah = sc->sc_ah;
484 struct ath_common *common = ath9k_hw_common(ah);
485 /*
486 * The hw can techncically differ from common->hw when using ath9k
487 * virtual wiphy so to account for that we iterate over the active
488 * wiphys and find the appropriate wiphy and therefore hw.
489 */
490 struct ieee80211_hw *hw = NULL;
654 struct ieee80211_hdr *hdr; 491 struct ieee80211_hdr *hdr;
655 int hdrlen, padsize, retval; 492 int retval;
656 bool decrypt_error = false; 493 bool decrypt_error = false;
657 u8 keyix;
658 __le16 fc;
659 494
660 spin_lock_bh(&sc->rx.rxbuflock); 495 spin_lock_bh(&sc->rx.rxbuflock);
661 496
@@ -727,9 +562,15 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
727 * 2. requeueing the same buffer to h/w 562 * 2. requeueing the same buffer to h/w
728 */ 563 */
729 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, 564 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
730 sc->rx.bufsize, 565 common->rx_bufsize,
731 DMA_FROM_DEVICE); 566 DMA_FROM_DEVICE);
732 567
568 hdr = (struct ieee80211_hdr *) skb->data;
569 rxs = IEEE80211_SKB_RXCB(skb);
570
571 hw = ath_get_virt_hw(sc, hdr);
572 rx_stats = &ds->ds_rxstat;
573
733 /* 574 /*
734 * If we're asked to flush receive queue, directly 575 * If we're asked to flush receive queue, directly
735 * chain it back at the queue without processing it. 576 * chain it back at the queue without processing it.
@@ -737,19 +578,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
737 if (flush) 578 if (flush)
738 goto requeue; 579 goto requeue;
739 580
740 if (!ds->ds_rxstat.rs_datalen) 581 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats,
741 goto requeue; 582 rxs, &decrypt_error);
742 583 if (retval)
743 /* The status portion of the descriptor could get corrupted. */
744 if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen)
745 goto requeue;
746
747 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
748 goto requeue; 584 goto requeue;
749 585
750 /* Ensure we always have an skb to requeue once we are done 586 /* Ensure we always have an skb to requeue once we are done
751 * processing the current buffer's skb */ 587 * processing the current buffer's skb */
752 requeue_skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_ATOMIC); 588 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
753 589
754 /* If there is no memory we ignore the current RX'd frame, 590 /* If there is no memory we ignore the current RX'd frame,
755 * tell hardware it can give us a new frame using the old 591 * tell hardware it can give us a new frame using the old
@@ -760,60 +596,26 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
760 596
761 /* Unmap the frame */ 597 /* Unmap the frame */
762 dma_unmap_single(sc->dev, bf->bf_buf_addr, 598 dma_unmap_single(sc->dev, bf->bf_buf_addr,
763 sc->rx.bufsize, 599 common->rx_bufsize,
764 DMA_FROM_DEVICE); 600 DMA_FROM_DEVICE);
765 601
766 skb_put(skb, ds->ds_rxstat.rs_datalen); 602 skb_put(skb, rx_stats->rs_datalen);
767
768 /* see if any padding is done by the hw and remove it */
769 hdr = (struct ieee80211_hdr *)skb->data;
770 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
771 fc = hdr->frame_control;
772
773 /* The MAC header is padded to have 32-bit boundary if the
774 * packet payload is non-zero. The general calculation for
775 * padsize would take into account odd header lengths:
776 * padsize = (4 - hdrlen % 4) % 4; However, since only
777 * even-length headers are used, padding can only be 0 or 2
778 * bytes and we can optimize this a bit. In addition, we must
779 * not try to remove padding from short control frames that do
780 * not have payload. */
781 padsize = hdrlen & 3;
782 if (padsize && hdrlen >= 24) {
783 memmove(skb->data + padsize, skb->data, hdrlen);
784 skb_pull(skb, padsize);
785 }
786
787 keyix = ds->ds_rxstat.rs_keyix;
788 603
789 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { 604 ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats,
790 rx_status.flag |= RX_FLAG_DECRYPTED; 605 rxs, decrypt_error);
791 } else if (ieee80211_has_protected(fc)
792 && !decrypt_error && skb->len >= hdrlen + 4) {
793 keyix = skb->data[hdrlen + 3] >> 6;
794
795 if (test_bit(keyix, sc->keymap))
796 rx_status.flag |= RX_FLAG_DECRYPTED;
797 }
798 if (ah->sw_mgmt_crypto &&
799 (rx_status.flag & RX_FLAG_DECRYPTED) &&
800 ieee80211_is_mgmt(fc)) {
801 /* Use software decrypt for management frames. */
802 rx_status.flag &= ~RX_FLAG_DECRYPTED;
803 }
804 606
805 /* We will now give hardware our shiny new allocated skb */ 607 /* We will now give hardware our shiny new allocated skb */
806 bf->bf_mpdu = requeue_skb; 608 bf->bf_mpdu = requeue_skb;
807 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 609 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
808 sc->rx.bufsize, 610 common->rx_bufsize,
809 DMA_FROM_DEVICE); 611 DMA_FROM_DEVICE);
810 if (unlikely(dma_mapping_error(sc->dev, 612 if (unlikely(dma_mapping_error(sc->dev,
811 bf->bf_buf_addr))) { 613 bf->bf_buf_addr))) {
812 dev_kfree_skb_any(requeue_skb); 614 dev_kfree_skb_any(requeue_skb);
813 bf->bf_mpdu = NULL; 615 bf->bf_mpdu = NULL;
814 DPRINTF(sc, ATH_DBG_FATAL, 616 ath_print(common, ATH_DBG_FATAL,
815 "dma_mapping_error() on RX\n"); 617 "dma_mapping_error() on RX\n");
816 ath_rx_send_to_mac80211(sc, skb, &rx_status); 618 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
817 break; 619 break;
818 } 620 }
819 bf->bf_dmacontext = bf->bf_buf_addr; 621 bf->bf_dmacontext = bf->bf_buf_addr;
@@ -824,7 +626,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
824 */ 626 */
825 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { 627 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
826 if (++sc->rx.rxotherant >= 3) 628 if (++sc->rx.rxotherant >= 3)
827 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); 629 ath_setdefantenna(sc, rx_stats->rs_antenna);
828 } else { 630 } else {
829 sc->rx.rxotherant = 0; 631 sc->rx.rxotherant = 0;
830 } 632 }
@@ -834,7 +636,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
834 SC_OP_WAIT_FOR_PSPOLL_DATA))) 636 SC_OP_WAIT_FOR_PSPOLL_DATA)))
835 ath_rx_ps(sc, skb); 637 ath_rx_ps(sc, skb);
836 638
837 ath_rx_send_to_mac80211(sc, skb, &rx_status); 639 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
838 640
839requeue: 641requeue:
840 list_move_tail(&bf->list, &sc->rx.rxbuf); 642 list_move_tail(&bf->list, &sc->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d83b77f821e9..49ec25f020f0 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -17,6 +17,8 @@
17#ifndef REG_H 17#ifndef REG_H
18#define REG_H 18#define REG_H
19 19
20#include "../reg.h"
21
20#define AR_CR 0x0008 22#define AR_CR 0x0008
21#define AR_CR_RXE 0x00000004 23#define AR_CR_RXE 0x00000004
22#define AR_CR_RXD 0x00000020 24#define AR_CR_RXD 0x00000020
@@ -969,10 +971,10 @@ enum {
969#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4 971#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_S 4
970#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080 972#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080
971#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7 973#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7
974#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00000400
975#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 10
972#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000 976#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB 0x00001000
973#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12 977#define AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB_S 12
974#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB 0x00001000
975#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB_S 1
976#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000 978#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000
977#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15 979#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15
978#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 980#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
@@ -1421,9 +1423,6 @@ enum {
1421#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000 1423#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
1422#define AR_SLEEP2_BEACON_TIMEOUT_S 21 1424#define AR_SLEEP2_BEACON_TIMEOUT_S 21
1423 1425
1424#define AR_BSSMSKL 0x80e0
1425#define AR_BSSMSKU 0x80e4
1426
1427#define AR_TPC 0x80e8 1426#define AR_TPC 0x80e8
1428#define AR_TPC_ACK 0x0000003f 1427#define AR_TPC_ACK 0x0000003f
1429#define AR_TPC_ACK_S 0x00 1428#define AR_TPC_ACK_S 0x00
@@ -1705,4 +1704,7 @@ enum {
1705#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24) 1704#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
1706#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28) 1705#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
1707 1706
1707#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
1708#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
1709
1708#endif 1710#endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 19b88f8177fd..cd26caaf44e7 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -40,6 +40,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
40{ 40{
41 struct ath_wiphy *aphy = hw->priv; 41 struct ath_wiphy *aphy = hw->priv;
42 struct ath_softc *sc = aphy->sc; 42 struct ath_softc *sc = aphy->sc;
43 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
43 struct ath9k_vif_iter_data iter_data; 44 struct ath9k_vif_iter_data iter_data;
44 int i, j; 45 int i, j;
45 u8 mask[ETH_ALEN]; 46 u8 mask[ETH_ALEN];
@@ -51,7 +52,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
51 */ 52 */
52 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC); 53 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
53 if (iter_data.addr) { 54 if (iter_data.addr) {
54 memcpy(iter_data.addr, sc->sc_ah->macaddr, ETH_ALEN); 55 memcpy(iter_data.addr, common->macaddr, ETH_ALEN);
55 iter_data.count = 1; 56 iter_data.count = 1;
56 } else 57 } else
57 iter_data.count = 0; 58 iter_data.count = 0;
@@ -86,20 +87,21 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
86 kfree(iter_data.addr); 87 kfree(iter_data.addr);
87 88
88 /* Invert the mask and configure hardware */ 89 /* Invert the mask and configure hardware */
89 sc->bssidmask[0] = ~mask[0]; 90 common->bssidmask[0] = ~mask[0];
90 sc->bssidmask[1] = ~mask[1]; 91 common->bssidmask[1] = ~mask[1];
91 sc->bssidmask[2] = ~mask[2]; 92 common->bssidmask[2] = ~mask[2];
92 sc->bssidmask[3] = ~mask[3]; 93 common->bssidmask[3] = ~mask[3];
93 sc->bssidmask[4] = ~mask[4]; 94 common->bssidmask[4] = ~mask[4];
94 sc->bssidmask[5] = ~mask[5]; 95 common->bssidmask[5] = ~mask[5];
95 96
96 ath9k_hw_setbssidmask(sc); 97 ath_hw_setbssidmask(common);
97} 98}
98 99
99int ath9k_wiphy_add(struct ath_softc *sc) 100int ath9k_wiphy_add(struct ath_softc *sc)
100{ 101{
101 int i, error; 102 int i, error;
102 struct ath_wiphy *aphy; 103 struct ath_wiphy *aphy;
104 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
103 struct ieee80211_hw *hw; 105 struct ieee80211_hw *hw;
104 u8 addr[ETH_ALEN]; 106 u8 addr[ETH_ALEN];
105 107
@@ -138,7 +140,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
138 sc->sec_wiphy[i] = aphy; 140 sc->sec_wiphy[i] = aphy;
139 spin_unlock_bh(&sc->wiphy_lock); 141 spin_unlock_bh(&sc->wiphy_lock);
140 142
141 memcpy(addr, sc->sc_ah->macaddr, ETH_ALEN); 143 memcpy(addr, common->macaddr, ETH_ALEN);
142 addr[0] |= 0x02; /* Locally managed address */ 144 addr[0] |= 0x02; /* Locally managed address */
143 /* 145 /*
144 * XOR virtual wiphy index into the least significant bits to generate 146 * XOR virtual wiphy index into the least significant bits to generate
@@ -296,6 +298,7 @@ static void ath9k_wiphy_unpause_channel(struct ath_softc *sc)
296void ath9k_wiphy_chan_work(struct work_struct *work) 298void ath9k_wiphy_chan_work(struct work_struct *work)
297{ 299{
298 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work); 300 struct ath_softc *sc = container_of(work, struct ath_softc, chan_work);
301 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
299 struct ath_wiphy *aphy = sc->next_wiphy; 302 struct ath_wiphy *aphy = sc->next_wiphy;
300 303
301 if (aphy == NULL) 304 if (aphy == NULL)
@@ -311,6 +314,10 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
311 /* XXX: remove me eventually */ 314 /* XXX: remove me eventually */
312 ath9k_update_ichannel(sc, aphy->hw, 315 ath9k_update_ichannel(sc, aphy->hw,
313 &sc->sc_ah->channels[sc->chan_idx]); 316 &sc->sc_ah->channels[sc->chan_idx]);
317
318 /* sync hw configuration for hw code */
319 common->hw = aphy->hw;
320
314 ath_update_chainmask(sc, sc->chan_is_ht); 321 ath_update_chainmask(sc, sc->chan_is_ht);
315 if (ath_set_channel(sc, aphy->hw, 322 if (ath_set_channel(sc, aphy->hw,
316 &sc->sc_ah->channels[sc->chan_idx]) < 0) { 323 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
@@ -331,13 +338,11 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
331void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 338void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
332{ 339{
333 struct ath_wiphy *aphy = hw->priv; 340 struct ath_wiphy *aphy = hw->priv;
334 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
335 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 341 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
336 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
337 342
338 if (tx_info_priv && tx_info_priv->frame_type == ATH9K_INT_PAUSE && 343 if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) &&
339 aphy->state == ATH_WIPHY_PAUSING) { 344 aphy->state == ATH_WIPHY_PAUSING) {
340 if (!(info->flags & IEEE80211_TX_STAT_ACK)) { 345 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
341 printk(KERN_DEBUG "ath9k: %s: no ACK for pause " 346 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
342 "frame\n", wiphy_name(hw->wiphy)); 347 "frame\n", wiphy_name(hw->wiphy));
343 /* 348 /*
@@ -356,9 +361,6 @@ void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
356 } 361 }
357 } 362 }
358 363
359 kfree(tx_info_priv);
360 tx_info->rate_driver_data[0] = NULL;
361
362 dev_kfree_skb(skb); 364 dev_kfree_skb(skb);
363} 365}
364 366
@@ -519,8 +521,9 @@ int ath9k_wiphy_select(struct ath_wiphy *aphy)
519 * frame being completed) 521 * frame being completed)
520 */ 522 */
521 spin_unlock_bh(&sc->wiphy_lock); 523 spin_unlock_bh(&sc->wiphy_lock);
522 ath_radio_disable(sc); 524 ath_radio_disable(sc, aphy->hw);
523 ath_radio_enable(sc); 525 ath_radio_enable(sc, aphy->hw);
526 /* Only the primary wiphy hw is used for queuing work */
524 ieee80211_queue_work(aphy->sc->hw, 527 ieee80211_queue_work(aphy->sc->hw,
525 &aphy->sc->chan_work); 528 &aphy->sc->chan_work);
526 return -EBUSY; /* previous select still in progress */ 529 return -EBUSY; /* previous select still in progress */
@@ -666,15 +669,78 @@ void ath9k_wiphy_set_scheduler(struct ath_softc *sc, unsigned int msec_int)
666bool ath9k_all_wiphys_idle(struct ath_softc *sc) 669bool ath9k_all_wiphys_idle(struct ath_softc *sc)
667{ 670{
668 unsigned int i; 671 unsigned int i;
669 if (sc->pri_wiphy->state != ATH_WIPHY_INACTIVE) { 672 if (!sc->pri_wiphy->idle)
670 return false; 673 return false;
671 }
672 for (i = 0; i < sc->num_sec_wiphy; i++) { 674 for (i = 0; i < sc->num_sec_wiphy; i++) {
673 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 675 struct ath_wiphy *aphy = sc->sec_wiphy[i];
674 if (!aphy) 676 if (!aphy)
675 continue; 677 continue;
676 if (aphy->state != ATH_WIPHY_INACTIVE) 678 if (!aphy->idle)
677 return false; 679 return false;
678 } 680 }
679 return true; 681 return true;
680} 682}
683
684/* caller must hold wiphy_lock */
685void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
686{
687 struct ath_softc *sc = aphy->sc;
688
689 aphy->idle = idle;
690 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
691 "Marking %s as %s\n",
692 wiphy_name(aphy->hw->wiphy),
693 idle ? "idle" : "not-idle");
694}
695/* Only bother starting a queue on an active virtual wiphy */
696void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
697{
698 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
699 unsigned int i;
700
701 spin_lock_bh(&sc->wiphy_lock);
702
703 /* Start the primary wiphy */
704 if (sc->pri_wiphy->state == ATH_WIPHY_ACTIVE) {
705 ieee80211_wake_queue(hw, skb_queue);
706 goto unlock;
707 }
708
709 /* Now start the secondary wiphy queues */
710 for (i = 0; i < sc->num_sec_wiphy; i++) {
711 struct ath_wiphy *aphy = sc->sec_wiphy[i];
712 if (!aphy)
713 continue;
714 if (aphy->state != ATH_WIPHY_ACTIVE)
715 continue;
716
717 hw = aphy->hw;
718 ieee80211_wake_queue(hw, skb_queue);
719 break;
720 }
721
722unlock:
723 spin_unlock_bh(&sc->wiphy_lock);
724}
725
726/* Go ahead and propagate information to all virtual wiphys, it won't hurt */
727void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue)
728{
729 struct ieee80211_hw *hw = sc->pri_wiphy->hw;
730 unsigned int i;
731
732 spin_lock_bh(&sc->wiphy_lock);
733
734 /* Stop the primary wiphy */
735 ieee80211_stop_queue(hw, skb_queue);
736
737 /* Now stop the secondary wiphy queues */
738 for (i = 0; i < sc->num_sec_wiphy; i++) {
739 struct ath_wiphy *aphy = sc->sec_wiphy[i];
740 if (!aphy)
741 continue;
742 hw = aphy->hw;
743 ieee80211_stop_queue(hw, skb_queue);
744 }
745 spin_unlock_bh(&sc->wiphy_lock);
746}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 42551a48c8ac..745d91995d78 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -107,7 +107,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
107{ 107{
108 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 108 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
109 109
110 ASSERT(tid->paused > 0); 110 BUG_ON(tid->paused <= 0);
111 spin_lock_bh(&txq->axq_lock); 111 spin_lock_bh(&txq->axq_lock);
112 112
113 tid->paused--; 113 tid->paused--;
@@ -131,7 +131,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
131 struct list_head bf_head; 131 struct list_head bf_head;
132 INIT_LIST_HEAD(&bf_head); 132 INIT_LIST_HEAD(&bf_head);
133 133
134 ASSERT(tid->paused > 0); 134 BUG_ON(tid->paused <= 0);
135 spin_lock_bh(&txq->axq_lock); 135 spin_lock_bh(&txq->axq_lock);
136 136
137 tid->paused--; 137 tid->paused--;
@@ -143,7 +143,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
143 143
144 while (!list_empty(&tid->buf_q)) { 144 while (!list_empty(&tid->buf_q)) {
145 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 145 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
146 ASSERT(!bf_isretried(bf)); 146 BUG_ON(bf_isretried(bf));
147 list_move_tail(&bf->list, &bf_head); 147 list_move_tail(&bf->list, &bf_head);
148 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 148 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
149 } 149 }
@@ -178,7 +178,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
178 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 178 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
179 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 179 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
180 180
181 ASSERT(tid->tx_buf[cindex] == NULL); 181 BUG_ON(tid->tx_buf[cindex] != NULL);
182 tid->tx_buf[cindex] = bf; 182 tid->tx_buf[cindex] = bf;
183 183
184 if (index >= ((tid->baw_tail - tid->baw_head) & 184 if (index >= ((tid->baw_tail - tid->baw_head) &
@@ -251,6 +251,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
251 251
252 ATH_TXBUF_RESET(tbf); 252 ATH_TXBUF_RESET(tbf);
253 253
254 tbf->aphy = bf->aphy;
254 tbf->bf_mpdu = bf->bf_mpdu; 255 tbf->bf_mpdu = bf->bf_mpdu;
255 tbf->bf_buf_addr = bf->bf_buf_addr; 256 tbf->bf_buf_addr = bf->bf_buf_addr;
256 *(tbf->bf_desc) = *(bf->bf_desc); 257 *(tbf->bf_desc) = *(bf->bf_desc);
@@ -267,7 +268,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
267 struct ath_node *an = NULL; 268 struct ath_node *an = NULL;
268 struct sk_buff *skb; 269 struct sk_buff *skb;
269 struct ieee80211_sta *sta; 270 struct ieee80211_sta *sta;
271 struct ieee80211_hw *hw;
270 struct ieee80211_hdr *hdr; 272 struct ieee80211_hdr *hdr;
273 struct ieee80211_tx_info *tx_info;
271 struct ath_atx_tid *tid = NULL; 274 struct ath_atx_tid *tid = NULL;
272 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 275 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
273 struct ath_desc *ds = bf_last->bf_desc; 276 struct ath_desc *ds = bf_last->bf_desc;
@@ -280,9 +283,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
280 skb = bf->bf_mpdu; 283 skb = bf->bf_mpdu;
281 hdr = (struct ieee80211_hdr *)skb->data; 284 hdr = (struct ieee80211_hdr *)skb->data;
282 285
286 tx_info = IEEE80211_SKB_CB(skb);
287 hw = bf->aphy->hw;
288
283 rcu_read_lock(); 289 rcu_read_lock();
284 290
285 sta = ieee80211_find_sta(sc->hw, hdr->addr1); 291 /* XXX: use ieee80211_find_sta! */
292 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
286 if (!sta) { 293 if (!sta) {
287 rcu_read_unlock(); 294 rcu_read_unlock();
288 return; 295 return;
@@ -358,7 +365,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
358 else 365 else
359 INIT_LIST_HEAD(&bf_head); 366 INIT_LIST_HEAD(&bf_head);
360 } else { 367 } else {
361 ASSERT(!list_empty(bf_q)); 368 BUG_ON(list_empty(bf_q));
362 list_move_tail(&bf->list, &bf_head); 369 list_move_tail(&bf->list, &bf_head);
363 } 370 }
364 371
@@ -456,7 +463,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
456 struct sk_buff *skb; 463 struct sk_buff *skb;
457 struct ieee80211_tx_info *tx_info; 464 struct ieee80211_tx_info *tx_info;
458 struct ieee80211_tx_rate *rates; 465 struct ieee80211_tx_rate *rates;
459 struct ath_tx_info_priv *tx_info_priv;
460 u32 max_4ms_framelen, frmlen; 466 u32 max_4ms_framelen, frmlen;
461 u16 aggr_limit, legacy = 0; 467 u16 aggr_limit, legacy = 0;
462 int i; 468 int i;
@@ -464,7 +470,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
464 skb = bf->bf_mpdu; 470 skb = bf->bf_mpdu;
465 tx_info = IEEE80211_SKB_CB(skb); 471 tx_info = IEEE80211_SKB_CB(skb);
466 rates = tx_info->control.rates; 472 rates = tx_info->control.rates;
467 tx_info_priv = (struct ath_tx_info_priv *)tx_info->rate_driver_data[0];
468 473
469 /* 474 /*
470 * Find the lowest frame length among the rate series that will have a 475 * Find the lowest frame length among the rate series that will have a
@@ -694,7 +699,6 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
694 /* anchor last desc of aggregate */ 699 /* anchor last desc of aggregate */
695 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 700 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
696 701
697 txq->axq_aggr_depth++;
698 ath_tx_txqaddbuf(sc, txq, &bf_q); 702 ath_tx_txqaddbuf(sc, txq, &bf_q);
699 TX_STAT_INC(txq->axq_qnum, a_aggr); 703 TX_STAT_INC(txq->axq_qnum, a_aggr);
700 704
@@ -815,6 +819,7 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
815struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 819struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
816{ 820{
817 struct ath_hw *ah = sc->sc_ah; 821 struct ath_hw *ah = sc->sc_ah;
822 struct ath_common *common = ath9k_hw_common(ah);
818 struct ath9k_tx_queue_info qi; 823 struct ath9k_tx_queue_info qi;
819 int qnum; 824 int qnum;
820 825
@@ -854,9 +859,9 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
854 return NULL; 859 return NULL;
855 } 860 }
856 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 861 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
857 DPRINTF(sc, ATH_DBG_FATAL, 862 ath_print(common, ATH_DBG_FATAL,
858 "qnum %u out of range, max %u!\n", 863 "qnum %u out of range, max %u!\n",
859 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 864 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
860 ath9k_hw_releasetxqueue(ah, qnum); 865 ath9k_hw_releasetxqueue(ah, qnum);
861 return NULL; 866 return NULL;
862 } 867 }
@@ -869,8 +874,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
869 INIT_LIST_HEAD(&txq->axq_acq); 874 INIT_LIST_HEAD(&txq->axq_acq);
870 spin_lock_init(&txq->axq_lock); 875 spin_lock_init(&txq->axq_lock);
871 txq->axq_depth = 0; 876 txq->axq_depth = 0;
872 txq->axq_aggr_depth = 0;
873 txq->axq_linkbuf = NULL;
874 txq->axq_tx_inprogress = false; 877 txq->axq_tx_inprogress = false;
875 sc->tx.txqsetup |= 1<<qnum; 878 sc->tx.txqsetup |= 1<<qnum;
876 } 879 }
@@ -884,9 +887,9 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
884 switch (qtype) { 887 switch (qtype) {
885 case ATH9K_TX_QUEUE_DATA: 888 case ATH9K_TX_QUEUE_DATA:
886 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 889 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
887 DPRINTF(sc, ATH_DBG_FATAL, 890 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
888 "HAL AC %u out of range, max %zu!\n", 891 "HAL AC %u out of range, max %zu!\n",
889 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 892 haltype, ARRAY_SIZE(sc->tx.hwq_map));
890 return -1; 893 return -1;
891 } 894 }
892 qnum = sc->tx.hwq_map[haltype]; 895 qnum = sc->tx.hwq_map[haltype];
@@ -906,18 +909,19 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
906struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) 909struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
907{ 910{
908 struct ath_txq *txq = NULL; 911 struct ath_txq *txq = NULL;
912 u16 skb_queue = skb_get_queue_mapping(skb);
909 int qnum; 913 int qnum;
910 914
911 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); 915 qnum = ath_get_hal_qnum(skb_queue, sc);
912 txq = &sc->tx.txq[qnum]; 916 txq = &sc->tx.txq[qnum];
913 917
914 spin_lock_bh(&txq->axq_lock); 918 spin_lock_bh(&txq->axq_lock);
915 919
916 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 920 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
917 DPRINTF(sc, ATH_DBG_XMIT, 921 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
918 "TX queue: %d is full, depth: %d\n", 922 "TX queue: %d is full, depth: %d\n",
919 qnum, txq->axq_depth); 923 qnum, txq->axq_depth);
920 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb)); 924 ath_mac80211_stop_queue(sc, skb_queue);
921 txq->stopped = 1; 925 txq->stopped = 1;
922 spin_unlock_bh(&txq->axq_lock); 926 spin_unlock_bh(&txq->axq_lock);
923 return NULL; 927 return NULL;
@@ -945,7 +949,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
945 return 0; 949 return 0;
946 } 950 }
947 951
948 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum); 952 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
949 953
950 ath9k_hw_get_txq_props(ah, qnum, &qi); 954 ath9k_hw_get_txq_props(ah, qnum, &qi);
951 qi.tqi_aifs = qinfo->tqi_aifs; 955 qi.tqi_aifs = qinfo->tqi_aifs;
@@ -955,8 +959,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
955 qi.tqi_readyTime = qinfo->tqi_readyTime; 959 qi.tqi_readyTime = qinfo->tqi_readyTime;
956 960
957 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 961 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
958 DPRINTF(sc, ATH_DBG_FATAL, 962 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
959 "Unable to update hardware queue %u!\n", qnum); 963 "Unable to update hardware queue %u!\n", qnum);
960 error = -EIO; 964 error = -EIO;
961 } else { 965 } else {
962 ath9k_hw_resettxqueue(ah, qnum); 966 ath9k_hw_resettxqueue(ah, qnum);
@@ -1004,7 +1008,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1004 1008
1005 if (list_empty(&txq->axq_q)) { 1009 if (list_empty(&txq->axq_q)) {
1006 txq->axq_link = NULL; 1010 txq->axq_link = NULL;
1007 txq->axq_linkbuf = NULL;
1008 spin_unlock_bh(&txq->axq_lock); 1011 spin_unlock_bh(&txq->axq_lock);
1009 break; 1012 break;
1010 } 1013 }
@@ -1055,6 +1058,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1055void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1058void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1056{ 1059{
1057 struct ath_hw *ah = sc->sc_ah; 1060 struct ath_hw *ah = sc->sc_ah;
1061 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1058 struct ath_txq *txq; 1062 struct ath_txq *txq;
1059 int i, npend = 0; 1063 int i, npend = 0;
1060 1064
@@ -1076,14 +1080,15 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1076 if (npend) { 1080 if (npend) {
1077 int r; 1081 int r;
1078 1082
1079 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n"); 1083 ath_print(common, ATH_DBG_XMIT,
1084 "Unable to stop TxDMA. Reset HAL!\n");
1080 1085
1081 spin_lock_bh(&sc->sc_resetlock); 1086 spin_lock_bh(&sc->sc_resetlock);
1082 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); 1087 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
1083 if (r) 1088 if (r)
1084 DPRINTF(sc, ATH_DBG_FATAL, 1089 ath_print(common, ATH_DBG_FATAL,
1085 "Unable to reset hardware; reset status %d\n", 1090 "Unable to reset hardware; reset status %d\n",
1086 r); 1091 r);
1087 spin_unlock_bh(&sc->sc_resetlock); 1092 spin_unlock_bh(&sc->sc_resetlock);
1088 } 1093 }
1089 1094
@@ -1147,8 +1152,8 @@ int ath_tx_setup(struct ath_softc *sc, int haltype)
1147 struct ath_txq *txq; 1152 struct ath_txq *txq;
1148 1153
1149 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1154 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1150 DPRINTF(sc, ATH_DBG_FATAL, 1155 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1151 "HAL AC %u out of range, max %zu!\n", 1156 "HAL AC %u out of range, max %zu!\n",
1152 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 1157 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1153 return 0; 1158 return 0;
1154 } 1159 }
@@ -1172,6 +1177,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1172 struct list_head *head) 1177 struct list_head *head)
1173{ 1178{
1174 struct ath_hw *ah = sc->sc_ah; 1179 struct ath_hw *ah = sc->sc_ah;
1180 struct ath_common *common = ath9k_hw_common(ah);
1175 struct ath_buf *bf; 1181 struct ath_buf *bf;
1176 1182
1177 /* 1183 /*
@@ -1186,21 +1192,20 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1186 1192
1187 list_splice_tail_init(head, &txq->axq_q); 1193 list_splice_tail_init(head, &txq->axq_q);
1188 txq->axq_depth++; 1194 txq->axq_depth++;
1189 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
1190 1195
1191 DPRINTF(sc, ATH_DBG_QUEUE, 1196 ath_print(common, ATH_DBG_QUEUE,
1192 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1197 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1193 1198
1194 if (txq->axq_link == NULL) { 1199 if (txq->axq_link == NULL) {
1195 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1200 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1196 DPRINTF(sc, ATH_DBG_XMIT, 1201 ath_print(common, ATH_DBG_XMIT,
1197 "TXDP[%u] = %llx (%p)\n", 1202 "TXDP[%u] = %llx (%p)\n",
1198 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1203 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1199 } else { 1204 } else {
1200 *txq->axq_link = bf->bf_daddr; 1205 *txq->axq_link = bf->bf_daddr;
1201 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", 1206 ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1202 txq->axq_qnum, txq->axq_link, 1207 txq->axq_qnum, txq->axq_link,
1203 ito64(bf->bf_daddr), bf->bf_desc); 1208 ito64(bf->bf_daddr), bf->bf_desc);
1204 } 1209 }
1205 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); 1210 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1206 ath9k_hw_txstart(ah, txq->axq_qnum); 1211 ath9k_hw_txstart(ah, txq->axq_qnum);
@@ -1452,6 +1457,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1452 1457
1453static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1458static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1454{ 1459{
1460 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1455 const struct ath_rate_table *rt = sc->cur_rate_table; 1461 const struct ath_rate_table *rt = sc->cur_rate_table;
1456 struct ath9k_11n_rate_series series[4]; 1462 struct ath9k_11n_rate_series series[4];
1457 struct sk_buff *skb; 1463 struct sk_buff *skb;
@@ -1507,7 +1513,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1507 1513
1508 rix = rates[i].idx; 1514 rix = rates[i].idx;
1509 series[i].Tries = rates[i].count; 1515 series[i].Tries = rates[i].count;
1510 series[i].ChSel = sc->tx_chainmask; 1516 series[i].ChSel = common->tx_chainmask;
1511 1517
1512 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1518 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1513 series[i].Rate = rt->info[rix].ratecode | 1519 series[i].Rate = rt->info[rix].ratecode |
@@ -1546,24 +1552,29 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1546 struct ath_softc *sc = aphy->sc; 1552 struct ath_softc *sc = aphy->sc;
1547 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1553 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1548 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1554 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1549 struct ath_tx_info_priv *tx_info_priv;
1550 int hdrlen; 1555 int hdrlen;
1551 __le16 fc; 1556 __le16 fc;
1552 1557
1553 tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC); 1558 tx_info->pad[0] = 0;
1554 if (unlikely(!tx_info_priv)) 1559 switch (txctl->frame_type) {
1555 return -ENOMEM; 1560 case ATH9K_NOT_INTERNAL:
1556 tx_info->rate_driver_data[0] = tx_info_priv; 1561 break;
1557 tx_info_priv->aphy = aphy; 1562 case ATH9K_INT_PAUSE:
1558 tx_info_priv->frame_type = txctl->frame_type; 1563 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1564 /* fall through */
1565 case ATH9K_INT_UNPAUSE:
1566 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1567 break;
1568 }
1559 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1569 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1560 fc = hdr->frame_control; 1570 fc = hdr->frame_control;
1561 1571
1562 ATH_TXBUF_RESET(bf); 1572 ATH_TXBUF_RESET(bf);
1563 1573
1574 bf->aphy = aphy;
1564 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3); 1575 bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3);
1565 1576
1566 if (conf_is_ht(&sc->hw->conf) && !is_pae(skb)) 1577 if (conf_is_ht(&hw->conf) && !is_pae(skb))
1567 bf->bf_state.bf_type |= BUF_HT; 1578 bf->bf_state.bf_type |= BUF_HT;
1568 1579
1569 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1580 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq);
@@ -1585,9 +1596,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1585 skb->len, DMA_TO_DEVICE); 1596 skb->len, DMA_TO_DEVICE);
1586 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { 1597 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) {
1587 bf->bf_mpdu = NULL; 1598 bf->bf_mpdu = NULL;
1588 kfree(tx_info_priv); 1599 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1589 tx_info->rate_driver_data[0] = NULL; 1600 "dma_mapping_error() on TX\n");
1590 DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error() on TX\n");
1591 return -ENOMEM; 1601 return -ENOMEM;
1592 } 1602 }
1593 1603
@@ -1669,12 +1679,13 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1669{ 1679{
1670 struct ath_wiphy *aphy = hw->priv; 1680 struct ath_wiphy *aphy = hw->priv;
1671 struct ath_softc *sc = aphy->sc; 1681 struct ath_softc *sc = aphy->sc;
1682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1672 struct ath_buf *bf; 1683 struct ath_buf *bf;
1673 int r; 1684 int r;
1674 1685
1675 bf = ath_tx_get_buffer(sc); 1686 bf = ath_tx_get_buffer(sc);
1676 if (!bf) { 1687 if (!bf) {
1677 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n"); 1688 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1678 return -1; 1689 return -1;
1679 } 1690 }
1680 1691
@@ -1682,7 +1693,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1682 if (unlikely(r)) { 1693 if (unlikely(r)) {
1683 struct ath_txq *txq = txctl->txq; 1694 struct ath_txq *txq = txctl->txq;
1684 1695
1685 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1696 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1686 1697
1687 /* upon ath_tx_processq() this TX queue will be resumed, we 1698 /* upon ath_tx_processq() this TX queue will be resumed, we
1688 * guarantee this will happen by knowing beforehand that 1699 * guarantee this will happen by knowing beforehand that
@@ -1690,8 +1701,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1690 * on the queue */ 1701 * on the queue */
1691 spin_lock_bh(&txq->axq_lock); 1702 spin_lock_bh(&txq->axq_lock);
1692 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { 1703 if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) {
1693 ieee80211_stop_queue(sc->hw, 1704 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1694 skb_get_queue_mapping(skb));
1695 txq->stopped = 1; 1705 txq->stopped = 1;
1696 } 1706 }
1697 spin_unlock_bh(&txq->axq_lock); 1707 spin_unlock_bh(&txq->axq_lock);
@@ -1712,6 +1722,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1712{ 1722{
1713 struct ath_wiphy *aphy = hw->priv; 1723 struct ath_wiphy *aphy = hw->priv;
1714 struct ath_softc *sc = aphy->sc; 1724 struct ath_softc *sc = aphy->sc;
1725 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1715 int hdrlen, padsize; 1726 int hdrlen, padsize;
1716 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1727 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1717 struct ath_tx_control txctl; 1728 struct ath_tx_control txctl;
@@ -1736,7 +1747,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1736 if (hdrlen & 3) { 1747 if (hdrlen & 3) {
1737 padsize = hdrlen % 4; 1748 padsize = hdrlen % 4;
1738 if (skb_headroom(skb) < padsize) { 1749 if (skb_headroom(skb) < padsize) {
1739 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n"); 1750 ath_print(common, ATH_DBG_XMIT,
1751 "TX CABQ padding failed\n");
1740 dev_kfree_skb_any(skb); 1752 dev_kfree_skb_any(skb);
1741 return; 1753 return;
1742 } 1754 }
@@ -1746,10 +1758,11 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1746 1758
1747 txctl.txq = sc->beacon.cabq; 1759 txctl.txq = sc->beacon.cabq;
1748 1760
1749 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); 1761 ath_print(common, ATH_DBG_XMIT,
1762 "transmitting CABQ packet, skb: %p\n", skb);
1750 1763
1751 if (ath_tx_start(hw, skb, &txctl) != 0) { 1764 if (ath_tx_start(hw, skb, &txctl) != 0) {
1752 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n"); 1765 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
1753 goto exit; 1766 goto exit;
1754 } 1767 }
1755 1768
@@ -1763,26 +1776,17 @@ exit:
1763/*****************/ 1776/*****************/
1764 1777
1765static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1778static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1766 int tx_flags) 1779 struct ath_wiphy *aphy, int tx_flags)
1767{ 1780{
1768 struct ieee80211_hw *hw = sc->hw; 1781 struct ieee80211_hw *hw = sc->hw;
1769 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1782 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1770 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1783 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1771 int hdrlen, padsize; 1784 int hdrlen, padsize;
1772 int frame_type = ATH9K_NOT_INTERNAL;
1773 1785
1774 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1786 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1775 1787
1776 if (tx_info_priv) { 1788 if (aphy)
1777 hw = tx_info_priv->aphy->hw; 1789 hw = aphy->hw;
1778 frame_type = tx_info_priv->frame_type;
1779 }
1780
1781 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK ||
1782 tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
1783 kfree(tx_info_priv);
1784 tx_info->rate_driver_data[0] = NULL;
1785 }
1786 1790
1787 if (tx_flags & ATH_TX_BAR) 1791 if (tx_flags & ATH_TX_BAR)
1788 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1792 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -1805,18 +1809,19 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1805 1809
1806 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) { 1810 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
1807 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK; 1811 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
1808 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 1812 ath_print(common, ATH_DBG_PS,
1809 "received TX status (0x%x)\n", 1813 "Going back to sleep after having "
1814 "received TX status (0x%x)\n",
1810 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 1815 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
1811 SC_OP_WAIT_FOR_CAB | 1816 SC_OP_WAIT_FOR_CAB |
1812 SC_OP_WAIT_FOR_PSPOLL_DATA | 1817 SC_OP_WAIT_FOR_PSPOLL_DATA |
1813 SC_OP_WAIT_FOR_TX_ACK)); 1818 SC_OP_WAIT_FOR_TX_ACK));
1814 } 1819 }
1815 1820
1816 if (frame_type == ATH9K_NOT_INTERNAL) 1821 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
1817 ieee80211_tx_status(hw, skb);
1818 else
1819 ath9k_tx_status(hw, skb); 1822 ath9k_tx_status(hw, skb);
1823 else
1824 ieee80211_tx_status(hw, skb);
1820} 1825}
1821 1826
1822static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1827static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1839,7 +1844,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1839 } 1844 }
1840 1845
1841 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1846 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1842 ath_tx_complete(sc, skb, tx_flags); 1847 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1843 ath_debug_stat_tx(sc, txq, bf); 1848 ath_debug_stat_tx(sc, txq, bf);
1844 1849
1845 /* 1850 /*
@@ -1887,8 +1892,7 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
1887 struct sk_buff *skb = bf->bf_mpdu; 1892 struct sk_buff *skb = bf->bf_mpdu;
1888 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1893 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1889 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1894 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1890 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1895 struct ieee80211_hw *hw = bf->aphy->hw;
1891 struct ieee80211_hw *hw = tx_info_priv->aphy->hw;
1892 u8 i, tx_rateindex; 1896 u8 i, tx_rateindex;
1893 1897
1894 if (txok) 1898 if (txok)
@@ -1897,17 +1901,22 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
1897 tx_rateindex = ds->ds_txstat.ts_rateindex; 1901 tx_rateindex = ds->ds_txstat.ts_rateindex;
1898 WARN_ON(tx_rateindex >= hw->max_rates); 1902 WARN_ON(tx_rateindex >= hw->max_rates);
1899 1903
1900 tx_info_priv->update_rc = update_rc; 1904 if (update_rc)
1905 tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
1901 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) 1906 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1902 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1907 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1903 1908
1904 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && 1909 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
1905 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 1910 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1906 if (ieee80211_is_data(hdr->frame_control)) { 1911 if (ieee80211_is_data(hdr->frame_control)) {
1907 memcpy(&tx_info_priv->tx, &ds->ds_txstat, 1912 if (ds->ds_txstat.ts_flags &
1908 sizeof(tx_info_priv->tx)); 1913 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
1909 tx_info_priv->n_frames = bf->bf_nframes; 1914 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
1910 tx_info_priv->n_bad_frames = nbad; 1915 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) ||
1916 (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO))
1917 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
1918 tx_info->status.ampdu_len = bf->bf_nframes;
1919 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
1911 } 1920 }
1912 } 1921 }
1913 1922
@@ -1926,7 +1935,7 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
1926 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { 1935 sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) {
1927 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); 1936 qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc);
1928 if (qnum != -1) { 1937 if (qnum != -1) {
1929 ieee80211_wake_queue(sc->hw, qnum); 1938 ath_mac80211_start_queue(sc, qnum);
1930 txq->stopped = 0; 1939 txq->stopped = 0;
1931 } 1940 }
1932 } 1941 }
@@ -1936,21 +1945,21 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
1936static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 1945static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1937{ 1946{
1938 struct ath_hw *ah = sc->sc_ah; 1947 struct ath_hw *ah = sc->sc_ah;
1948 struct ath_common *common = ath9k_hw_common(ah);
1939 struct ath_buf *bf, *lastbf, *bf_held = NULL; 1949 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1940 struct list_head bf_head; 1950 struct list_head bf_head;
1941 struct ath_desc *ds; 1951 struct ath_desc *ds;
1942 int txok; 1952 int txok;
1943 int status; 1953 int status;
1944 1954
1945 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 1955 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1946 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 1956 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1947 txq->axq_link); 1957 txq->axq_link);
1948 1958
1949 for (;;) { 1959 for (;;) {
1950 spin_lock_bh(&txq->axq_lock); 1960 spin_lock_bh(&txq->axq_lock);
1951 if (list_empty(&txq->axq_q)) { 1961 if (list_empty(&txq->axq_q)) {
1952 txq->axq_link = NULL; 1962 txq->axq_link = NULL;
1953 txq->axq_linkbuf = NULL;
1954 spin_unlock_bh(&txq->axq_lock); 1963 spin_unlock_bh(&txq->axq_lock);
1955 break; 1964 break;
1956 } 1965 }
@@ -1984,10 +1993,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1984 spin_unlock_bh(&txq->axq_lock); 1993 spin_unlock_bh(&txq->axq_lock);
1985 break; 1994 break;
1986 } 1995 }
1987 if (bf->bf_desc == txq->axq_lastdsWithCTS)
1988 txq->axq_lastdsWithCTS = NULL;
1989 if (ds == txq->axq_gatingds)
1990 txq->axq_gatingds = NULL;
1991 1996
1992 /* 1997 /*
1993 * Remove ath_buf's of the same transmit unit from txq, 1998 * Remove ath_buf's of the same transmit unit from txq,
@@ -2001,9 +2006,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2001 &txq->axq_q, lastbf->list.prev); 2006 &txq->axq_q, lastbf->list.prev);
2002 2007
2003 txq->axq_depth--; 2008 txq->axq_depth--;
2004 if (bf_isaggr(bf))
2005 txq->axq_aggr_depth--;
2006
2007 txok = (ds->ds_txstat.ts_status == 0); 2009 txok = (ds->ds_txstat.ts_status == 0);
2008 txq->axq_tx_inprogress = false; 2010 txq->axq_tx_inprogress = false;
2009 spin_unlock_bh(&txq->axq_lock); 2011 spin_unlock_bh(&txq->axq_lock);
@@ -2064,8 +2066,11 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2064 } 2066 }
2065 2067
2066 if (needreset) { 2068 if (needreset) {
2067 DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n"); 2069 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2070 "tx hung, resetting the chip\n");
2071 ath9k_ps_wakeup(sc);
2068 ath_reset(sc, false); 2072 ath_reset(sc, false);
2073 ath9k_ps_restore(sc);
2069 } 2074 }
2070 2075
2071 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2076 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2093,6 +2098,7 @@ void ath_tx_tasklet(struct ath_softc *sc)
2093 2098
2094int ath_tx_init(struct ath_softc *sc, int nbufs) 2099int ath_tx_init(struct ath_softc *sc, int nbufs)
2095{ 2100{
2101 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2096 int error = 0; 2102 int error = 0;
2097 2103
2098 spin_lock_init(&sc->tx.txbuflock); 2104 spin_lock_init(&sc->tx.txbuflock);
@@ -2100,16 +2106,16 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2100 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2106 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2101 "tx", nbufs, 1); 2107 "tx", nbufs, 1);
2102 if (error != 0) { 2108 if (error != 0) {
2103 DPRINTF(sc, ATH_DBG_FATAL, 2109 ath_print(common, ATH_DBG_FATAL,
2104 "Failed to allocate tx descriptors: %d\n", error); 2110 "Failed to allocate tx descriptors: %d\n", error);
2105 goto err; 2111 goto err;
2106 } 2112 }
2107 2113
2108 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2114 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2109 "beacon", ATH_BCBUF, 1); 2115 "beacon", ATH_BCBUF, 1);
2110 if (error != 0) { 2116 if (error != 0) {
2111 DPRINTF(sc, ATH_DBG_FATAL, 2117 ath_print(common, ATH_DBG_FATAL,
2112 "Failed to allocate beacon descriptors: %d\n", error); 2118 "Failed to allocate beacon descriptors: %d\n", error);
2113 goto err; 2119 goto err;
2114 } 2120 }
2115 2121
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
new file mode 100644
index 000000000000..53e77bd131b9
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath.h"
18#include "debug.h"
19
20void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
21{
22 va_list args;
23
24 if (likely(!(common->debug_mask & dbg_mask)))
25 return;
26
27 va_start(args, fmt);
28 printk(KERN_DEBUG "ath: ");
29 vprintk(fmt, args);
30 va_end(args);
31}
32EXPORT_SYMBOL(ath_print);
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
new file mode 100644
index 000000000000..d6b685a06c5e
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH_DEBUG_H
18#define ATH_DEBUG_H
19
20#include "ath.h"
21
22/**
23 * enum ath_debug_level - atheros wireless debug level
24 *
25 * @ATH_DBG_RESET: reset processing
26 * @ATH_DBG_QUEUE: hardware queue management
27 * @ATH_DBG_EEPROM: eeprom processing
28 * @ATH_DBG_CALIBRATE: periodic calibration
29 * @ATH_DBG_INTERRUPT: interrupt processing
30 * @ATH_DBG_REGULATORY: regulatory processing
31 * @ATH_DBG_ANI: adaptive noise immunitive processing
32 * @ATH_DBG_XMIT: basic xmit operation
33 * @ATH_DBG_BEACON: beacon handling
34 * @ATH_DBG_CONFIG: configuration of the hardware
35 * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
36 * @ATH_DBG_PS: power save processing
37 * @ATH_DBG_HWTIMER: hardware timer handling
38 * @ATH_DBG_BTCOEX: bluetooth coexistance
39 * @ATH_DBG_ANY: enable all debugging
40 *
41 * The debug level is used to control the amount and type of debugging output
42 * we want to see. Each driver has its own method for enabling debugging and
43 * modifying debug level states -- but this is typically done through a
44 * module parameter 'debug' along with a respective 'debug' debugfs file
45 * entry.
46 */
47enum ATH_DEBUG {
48 ATH_DBG_RESET = 0x00000001,
49 ATH_DBG_QUEUE = 0x00000002,
50 ATH_DBG_EEPROM = 0x00000004,
51 ATH_DBG_CALIBRATE = 0x00000008,
52 ATH_DBG_INTERRUPT = 0x00000010,
53 ATH_DBG_REGULATORY = 0x00000020,
54 ATH_DBG_ANI = 0x00000040,
55 ATH_DBG_XMIT = 0x00000080,
56 ATH_DBG_BEACON = 0x00000100,
57 ATH_DBG_CONFIG = 0x00000200,
58 ATH_DBG_FATAL = 0x00000400,
59 ATH_DBG_PS = 0x00000800,
60 ATH_DBG_HWTIMER = 0x00001000,
61 ATH_DBG_BTCOEX = 0x00002000,
62 ATH_DBG_ANY = 0xffffffff
63};
64
65#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
66
67#ifdef CONFIG_ATH_DEBUG
68void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
69#else
70static inline void ath_print(struct ath_common *common,
71 int dbg_mask,
72 const char *fmt, ...)
73{
74}
75#endif /* CONFIG_ATH_DEBUG */
76
77#endif /* ATH_DEBUG_H */
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
new file mode 100644
index 000000000000..ecc9eb01f4fa
--- /dev/null
+++ b/drivers/net/wireless/ath/hw.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <asm/unaligned.h>
18
19#include "ath.h"
20#include "reg.h"
21
22#define REG_READ common->ops->read
23#define REG_WRITE common->ops->write
24
25/**
26 * ath_hw_set_bssid_mask - filter out bssids we listen
27 *
28 * @common: the ath_common struct for the device.
29 *
30 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
31 * which bits of the interface's MAC address should be looked at when trying
32 * to decide which packets to ACK. In station mode and AP mode with a single
33 * BSS every bit matters since we lock to only one BSS. In AP mode with
34 * multiple BSSes (virtual interfaces) not every bit matters because hw must
35 * accept frames for all BSSes and so we tweak some bits of our mac address
36 * in order to have multiple BSSes.
37 *
38 * NOTE: This is a simple filter and does *not* filter out all
39 * relevant frames. Some frames that are not for us might get ACKed from us
40 * by PCU because they just match the mask.
41 *
42 * When handling multiple BSSes you can get the BSSID mask by computing the
43 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
44 *
45 * When you do this you are essentially computing the common bits of all your
46 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
47 * the MAC address to obtain the relevant bits and compare the result with
48 * (frame's BSSID & mask) to see if they match.
49 *
50 * Simple example: on your card you have have two BSSes you have created with
51 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
52 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
53 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
54 *
55 * \
56 * MAC: 0001 |
57 * BSSID-01: 0100 | --> Belongs to us
58 * BSSID-02: 1001 |
59 * /
60 * -------------------
61 * BSSID-03: 0110 | --> External
62 * -------------------
63 *
64 * Our bssid_mask would then be:
65 *
66 * On loop iteration for BSSID-01:
67 * ~(0001 ^ 0100) -> ~(0101)
68 * -> 1010
69 * bssid_mask = 1010
70 *
71 * On loop iteration for BSSID-02:
72 * bssid_mask &= ~(0001 ^ 1001)
73 * bssid_mask = (1010) & ~(0001 ^ 1001)
74 * bssid_mask = (1010) & ~(1001)
75 * bssid_mask = (1010) & (0110)
76 * bssid_mask = 0010
77 *
78 * A bssid_mask of 0010 means "only pay attention to the second least
79 * significant bit". This is because its the only bit common
80 * amongst the MAC and all BSSIDs we support. To findout what the real
81 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
82 * or our MAC address (we assume the hardware uses the MAC address).
83 *
84 * Now, suppose there's an incoming frame for BSSID-03:
85 *
86 * IFRAME-01: 0110
87 *
88 * An easy eye-inspeciton of this already should tell you that this frame
89 * will not pass our check. This is beacuse the bssid_mask tells the
90 * hardware to only look at the second least significant bit and the
91 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
92 * as 1, which does not match 0.
93 *
94 * So with IFRAME-01 we *assume* the hardware will do:
95 *
96 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
97 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
98 * --> allow = (0010) == 0000 ? 1 : 0;
99 * --> allow = 0
100 *
101 * Lets now test a frame that should work:
102 *
103 * IFRAME-02: 0001 (we should allow)
104 *
105 * allow = (0001 & 1010) == 1010
106 *
107 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
108 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
109 * --> allow = (0010) == (0010)
110 * --> allow = 1
111 *
112 * Other examples:
113 *
114 * IFRAME-03: 0100 --> allowed
115 * IFRAME-04: 1001 --> allowed
116 * IFRAME-05: 1101 --> allowed but its not for us!!!
117 *
118 */
119void ath_hw_setbssidmask(struct ath_common *common)
120{
121 void *ah = common->ah;
122
123 REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL);
124 REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
125}
126EXPORT_SYMBOL(ath_hw_setbssidmask);
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
new file mode 100644
index 000000000000..dfe1fbec24f5
--- /dev/null
+++ b/drivers/net/wireless/ath/reg.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH_REGISTERS_H
18#define ATH_REGISTERS_H
19
20/*
21 * BSSID mask registers. See ath_hw_set_bssid_mask()
22 * for detailed documentation about these registers.
23 */
24#define AR_BSSMSKL 0x80e0
25#define AR_BSSMSKU 0x80e4
26
27#endif /* ATH_REGISTERS_H */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 077bcc142cde..039ac490465c 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -450,7 +450,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
450 const struct ieee80211_regdomain *regd; 450 const struct ieee80211_regdomain *regd;
451 451
452 wiphy->reg_notifier = reg_notifier; 452 wiphy->reg_notifier = reg_notifier;
453 wiphy->strict_regulatory = true; 453 wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
454 454
455 if (ath_is_world_regd(reg)) { 455 if (ath_is_world_regd(reg)) {
456 /* 456 /*
@@ -458,8 +458,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
458 * saved on the wiphy orig_* parameters 458 * saved on the wiphy orig_* parameters
459 */ 459 */
460 regd = ath_world_regdomain(reg); 460 regd = ath_world_regdomain(reg);
461 wiphy->custom_regulatory = true; 461 wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
462 wiphy->strict_regulatory = false;
463 } else { 462 } else {
464 /* 463 /*
465 * This gets applied in the case of the absense of CRDA, 464 * This gets applied in the case of the absense of CRDA,
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index c1dd857697a7..a1c39526161a 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -65,10 +65,13 @@ enum CountryCode {
65 CTRY_ALGERIA = 12, 65 CTRY_ALGERIA = 12,
66 CTRY_ARGENTINA = 32, 66 CTRY_ARGENTINA = 32,
67 CTRY_ARMENIA = 51, 67 CTRY_ARMENIA = 51,
68 CTRY_ARUBA = 533,
68 CTRY_AUSTRALIA = 36, 69 CTRY_AUSTRALIA = 36,
69 CTRY_AUSTRIA = 40, 70 CTRY_AUSTRIA = 40,
70 CTRY_AZERBAIJAN = 31, 71 CTRY_AZERBAIJAN = 31,
71 CTRY_BAHRAIN = 48, 72 CTRY_BAHRAIN = 48,
73 CTRY_BANGLADESH = 50,
74 CTRY_BARBADOS = 52,
72 CTRY_BELARUS = 112, 75 CTRY_BELARUS = 112,
73 CTRY_BELGIUM = 56, 76 CTRY_BELGIUM = 56,
74 CTRY_BELIZE = 84, 77 CTRY_BELIZE = 84,
@@ -77,6 +80,7 @@ enum CountryCode {
77 CTRY_BRAZIL = 76, 80 CTRY_BRAZIL = 76,
78 CTRY_BRUNEI_DARUSSALAM = 96, 81 CTRY_BRUNEI_DARUSSALAM = 96,
79 CTRY_BULGARIA = 100, 82 CTRY_BULGARIA = 100,
83 CTRY_CAMBODIA = 116,
80 CTRY_CANADA = 124, 84 CTRY_CANADA = 124,
81 CTRY_CHILE = 152, 85 CTRY_CHILE = 152,
82 CTRY_CHINA = 156, 86 CTRY_CHINA = 156,
@@ -97,7 +101,11 @@ enum CountryCode {
97 CTRY_GEORGIA = 268, 101 CTRY_GEORGIA = 268,
98 CTRY_GERMANY = 276, 102 CTRY_GERMANY = 276,
99 CTRY_GREECE = 300, 103 CTRY_GREECE = 300,
104 CTRY_GREENLAND = 304,
105 CTRY_GRENEDA = 308,
106 CTRY_GUAM = 316,
100 CTRY_GUATEMALA = 320, 107 CTRY_GUATEMALA = 320,
108 CTRY_HAITI = 332,
101 CTRY_HONDURAS = 340, 109 CTRY_HONDURAS = 340,
102 CTRY_HONG_KONG = 344, 110 CTRY_HONG_KONG = 344,
103 CTRY_HUNGARY = 348, 111 CTRY_HUNGARY = 348,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 9847af72208c..248c670fdfbe 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -288,13 +288,16 @@ static struct country_code_to_enum_rd allCountries[] = {
288 {CTRY_DEFAULT, FCC1_FCCA, "CO"}, 288 {CTRY_DEFAULT, FCC1_FCCA, "CO"},
289 {CTRY_ALBANIA, NULL1_WORLD, "AL"}, 289 {CTRY_ALBANIA, NULL1_WORLD, "AL"},
290 {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, 290 {CTRY_ALGERIA, NULL1_WORLD, "DZ"},
291 {CTRY_ARGENTINA, APL3_WORLD, "AR"}, 291 {CTRY_ARGENTINA, FCC3_WORLD, "AR"},
292 {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, 292 {CTRY_ARMENIA, ETSI4_WORLD, "AM"},
293 {CTRY_ARUBA, ETSI1_WORLD, "AW"},
293 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"}, 294 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
294 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, 295 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
295 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, 296 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
296 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, 297 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
297 {CTRY_BAHRAIN, APL6_WORLD, "BH"}, 298 {CTRY_BAHRAIN, APL6_WORLD, "BH"},
299 {CTRY_BANGLADESH, NULL1_WORLD, "BD"},
300 {CTRY_BARBADOS, FCC2_WORLD, "BB"},
298 {CTRY_BELARUS, ETSI1_WORLD, "BY"}, 301 {CTRY_BELARUS, ETSI1_WORLD, "BY"},
299 {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, 302 {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
300 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, 303 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
@@ -304,13 +307,14 @@ static struct country_code_to_enum_rd allCountries[] = {
304 {CTRY_BRAZIL, FCC3_WORLD, "BR"}, 307 {CTRY_BRAZIL, FCC3_WORLD, "BR"},
305 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"}, 308 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
306 {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, 309 {CTRY_BULGARIA, ETSI6_WORLD, "BG"},
307 {CTRY_CANADA, FCC2_FCCA, "CA"}, 310 {CTRY_CAMBODIA, ETSI1_WORLD, "KH"},
311 {CTRY_CANADA, FCC3_FCCA, "CA"},
308 {CTRY_CANADA2, FCC6_FCCA, "CA"}, 312 {CTRY_CANADA2, FCC6_FCCA, "CA"},
309 {CTRY_CHILE, APL6_WORLD, "CL"}, 313 {CTRY_CHILE, APL6_WORLD, "CL"},
310 {CTRY_CHINA, APL1_WORLD, "CN"}, 314 {CTRY_CHINA, APL1_WORLD, "CN"},
311 {CTRY_COLOMBIA, FCC1_FCCA, "CO"}, 315 {CTRY_COLOMBIA, FCC1_FCCA, "CO"},
312 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, 316 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
313 {CTRY_CROATIA, ETSI3_WORLD, "HR"}, 317 {CTRY_CROATIA, ETSI1_WORLD, "HR"},
314 {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, 318 {CTRY_CYPRUS, ETSI1_WORLD, "CY"},
315 {CTRY_CZECH, ETSI3_WORLD, "CZ"}, 319 {CTRY_CZECH, ETSI3_WORLD, "CZ"},
316 {CTRY_DENMARK, ETSI1_WORLD, "DK"}, 320 {CTRY_DENMARK, ETSI1_WORLD, "DK"},
@@ -324,18 +328,22 @@ static struct country_code_to_enum_rd allCountries[] = {
324 {CTRY_GEORGIA, ETSI4_WORLD, "GE"}, 328 {CTRY_GEORGIA, ETSI4_WORLD, "GE"},
325 {CTRY_GERMANY, ETSI1_WORLD, "DE"}, 329 {CTRY_GERMANY, ETSI1_WORLD, "DE"},
326 {CTRY_GREECE, ETSI1_WORLD, "GR"}, 330 {CTRY_GREECE, ETSI1_WORLD, "GR"},
331 {CTRY_GREENLAND, ETSI1_WORLD, "GL"},
332 {CTRY_GRENEDA, FCC3_FCCA, "GD"},
333 {CTRY_GUAM, FCC1_FCCA, "GU"},
327 {CTRY_GUATEMALA, FCC1_FCCA, "GT"}, 334 {CTRY_GUATEMALA, FCC1_FCCA, "GT"},
335 {CTRY_HAITI, ETSI1_WORLD, "HT"},
328 {CTRY_HONDURAS, NULL1_WORLD, "HN"}, 336 {CTRY_HONDURAS, NULL1_WORLD, "HN"},
329 {CTRY_HONG_KONG, FCC2_WORLD, "HK"}, 337 {CTRY_HONG_KONG, FCC3_WORLD, "HK"},
330 {CTRY_HUNGARY, ETSI1_WORLD, "HU"}, 338 {CTRY_HUNGARY, ETSI1_WORLD, "HU"},
331 {CTRY_ICELAND, ETSI1_WORLD, "IS"}, 339 {CTRY_ICELAND, ETSI1_WORLD, "IS"},
332 {CTRY_INDIA, APL6_WORLD, "IN"}, 340 {CTRY_INDIA, APL6_WORLD, "IN"},
333 {CTRY_INDONESIA, APL1_WORLD, "ID"}, 341 {CTRY_INDONESIA, NULL1_WORLD, "ID"},
334 {CTRY_IRAN, APL1_WORLD, "IR"}, 342 {CTRY_IRAN, APL1_WORLD, "IR"},
335 {CTRY_IRELAND, ETSI1_WORLD, "IE"}, 343 {CTRY_IRELAND, ETSI1_WORLD, "IE"},
336 {CTRY_ISRAEL, NULL1_WORLD, "IL"}, 344 {CTRY_ISRAEL, NULL1_WORLD, "IL"},
337 {CTRY_ITALY, ETSI1_WORLD, "IT"}, 345 {CTRY_ITALY, ETSI1_WORLD, "IT"},
338 {CTRY_JAMAICA, ETSI1_WORLD, "JM"}, 346 {CTRY_JAMAICA, FCC3_WORLD, "JM"},
339 347
340 {CTRY_JAPAN, MKK1_MKKA, "JP"}, 348 {CTRY_JAPAN, MKK1_MKKA, "JP"},
341 {CTRY_JAPAN1, MKK1_MKKB, "JP"}, 349 {CTRY_JAPAN1, MKK1_MKKB, "JP"},
@@ -402,7 +410,7 @@ static struct country_code_to_enum_rd allCountries[] = {
402 {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, 410 {CTRY_KOREA_ROC, APL9_WORLD, "KR"},
403 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, 411 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
404 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"}, 412 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
405 {CTRY_KUWAIT, NULL1_WORLD, "KW"}, 413 {CTRY_KUWAIT, ETSI3_WORLD, "KW"},
406 {CTRY_LATVIA, ETSI1_WORLD, "LV"}, 414 {CTRY_LATVIA, ETSI1_WORLD, "LV"},
407 {CTRY_LEBANON, NULL1_WORLD, "LB"}, 415 {CTRY_LEBANON, NULL1_WORLD, "LB"},
408 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"}, 416 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
@@ -414,13 +422,13 @@ static struct country_code_to_enum_rd allCountries[] = {
414 {CTRY_MALTA, ETSI1_WORLD, "MT"}, 422 {CTRY_MALTA, ETSI1_WORLD, "MT"},
415 {CTRY_MEXICO, FCC1_FCCA, "MX"}, 423 {CTRY_MEXICO, FCC1_FCCA, "MX"},
416 {CTRY_MONACO, ETSI4_WORLD, "MC"}, 424 {CTRY_MONACO, ETSI4_WORLD, "MC"},
417 {CTRY_MOROCCO, NULL1_WORLD, "MA"}, 425 {CTRY_MOROCCO, APL4_WORLD, "MA"},
418 {CTRY_NEPAL, APL1_WORLD, "NP"}, 426 {CTRY_NEPAL, APL1_WORLD, "NP"},
419 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, 427 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
420 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, 428 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
421 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, 429 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
422 {CTRY_NORWAY, ETSI1_WORLD, "NO"}, 430 {CTRY_NORWAY, ETSI1_WORLD, "NO"},
423 {CTRY_OMAN, APL6_WORLD, "OM"}, 431 {CTRY_OMAN, FCC3_WORLD, "OM"},
424 {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, 432 {CTRY_PAKISTAN, NULL1_WORLD, "PK"},
425 {CTRY_PANAMA, FCC1_FCCA, "PA"}, 433 {CTRY_PANAMA, FCC1_FCCA, "PA"},
426 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, 434 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
@@ -429,7 +437,7 @@ static struct country_code_to_enum_rd allCountries[] = {
429 {CTRY_POLAND, ETSI1_WORLD, "PL"}, 437 {CTRY_POLAND, ETSI1_WORLD, "PL"},
430 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, 438 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
431 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, 439 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
432 {CTRY_QATAR, NULL1_WORLD, "QA"}, 440 {CTRY_QATAR, APL1_WORLD, "QA"},
433 {CTRY_ROMANIA, NULL1_WORLD, "RO"}, 441 {CTRY_ROMANIA, NULL1_WORLD, "RO"},
434 {CTRY_RUSSIA, NULL1_WORLD, "RU"}, 442 {CTRY_RUSSIA, NULL1_WORLD, "RU"},
435 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, 443 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
@@ -445,7 +453,7 @@ static struct country_code_to_enum_rd allCountries[] = {
445 {CTRY_SYRIA, NULL1_WORLD, "SY"}, 453 {CTRY_SYRIA, NULL1_WORLD, "SY"},
446 {CTRY_TAIWAN, APL3_FCCA, "TW"}, 454 {CTRY_TAIWAN, APL3_FCCA, "TW"},
447 {CTRY_THAILAND, FCC3_WORLD, "TH"}, 455 {CTRY_THAILAND, FCC3_WORLD, "TH"},
448 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT"}, 456 {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
449 {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, 457 {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
450 {CTRY_TURKEY, ETSI3_WORLD, "TR"}, 458 {CTRY_TURKEY, ETSI3_WORLD, "TR"},
451 {CTRY_UKRAINE, NULL1_WORLD, "UA"}, 459 {CTRY_UKRAINE, NULL1_WORLD, "UA"},
@@ -456,7 +464,7 @@ static struct country_code_to_enum_rd allCountries[] = {
456 * would need to assign new special alpha2 to CRDA db as with the world 464 * would need to assign new special alpha2 to CRDA db as with the world
457 * regdomain and use another alpha2 */ 465 * regdomain and use another alpha2 */
458 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"}, 466 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
459 {CTRY_URUGUAY, APL2_WORLD, "UY"}, 467 {CTRY_URUGUAY, FCC3_WORLD, "UY"},
460 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"}, 468 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
461 {CTRY_VENEZUELA, APL2_ETSIC, "VE"}, 469 {CTRY_VENEZUELA, APL2_ETSIC, "VE"},
462 {CTRY_VIET_NAM, NULL1_WORLD, "VN"}, 470 {CTRY_VIET_NAM, NULL1_WORLD, "VN"},
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index cce188837d10..3edbbcf0f548 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -99,6 +99,22 @@ static struct {
99 { ATMEL_FW_TYPE_506, "atmel_at76c506", "bin" }, 99 { ATMEL_FW_TYPE_506, "atmel_at76c506", "bin" },
100 { ATMEL_FW_TYPE_NONE, NULL, NULL } 100 { ATMEL_FW_TYPE_NONE, NULL, NULL }
101}; 101};
102MODULE_FIRMWARE("atmel_at76c502-wpa.bin");
103MODULE_FIRMWARE("atmel_at76c502.bin");
104MODULE_FIRMWARE("atmel_at76c502d-wpa.bin");
105MODULE_FIRMWARE("atmel_at76c502d.bin");
106MODULE_FIRMWARE("atmel_at76c502e-wpa.bin");
107MODULE_FIRMWARE("atmel_at76c502e.bin");
108MODULE_FIRMWARE("atmel_at76c502_3com-wpa.bin");
109MODULE_FIRMWARE("atmel_at76c502_3com.bin");
110MODULE_FIRMWARE("atmel_at76c504-wpa.bin");
111MODULE_FIRMWARE("atmel_at76c504.bin");
112MODULE_FIRMWARE("atmel_at76c504_2958-wpa.bin");
113MODULE_FIRMWARE("atmel_at76c504_2958.bin");
114MODULE_FIRMWARE("atmel_at76c504a_2958-wpa.bin");
115MODULE_FIRMWARE("atmel_at76c504a_2958.bin");
116MODULE_FIRMWARE("atmel_at76c506-wpa.bin");
117MODULE_FIRMWARE("atmel_at76c506.bin");
102 118
103#define MAX_SSID_LENGTH 32 119#define MAX_SSID_LENGTH 32
104#define MGMT_JIFFIES (256 * HZ / 100) 120#define MGMT_JIFFIES (256 * HZ / 100)
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 54ea61c15d8b..64c12e1bced3 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,6 +1,6 @@
1config B43 1config B43
2 tristate "Broadcom 43xx wireless support (mac80211 stack)" 2 tristate "Broadcom 43xx wireless support (mac80211 stack)"
3 depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA 3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
4 select SSB 4 select SSB
5 select FW_LOADER 5 select FW_LOADER
6 ---help--- 6 ---help---
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 660716214d49..fe3bf9491997 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -26,8 +26,6 @@
26# define B43_DEBUG 0 26# define B43_DEBUG 0
27#endif 27#endif
28 28
29#define B43_RX_MAX_SSI 60
30
31/* MMIO offsets */ 29/* MMIO offsets */
32#define B43_MMIO_DMA0_REASON 0x20 30#define B43_MMIO_DMA0_REASON 0x20
33#define B43_MMIO_DMA0_IRQ_MASK 0x24 31#define B43_MMIO_DMA0_IRQ_MASK 0x24
@@ -749,12 +747,6 @@ struct b43_wldev {
749#endif 747#endif
750}; 748};
751 749
752/*
753 * Include goes here to avoid a dependency problem.
754 * A better fix would be to integrate xmit.h into b43.h.
755 */
756#include "xmit.h"
757
758/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ 750/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
759struct b43_wl { 751struct b43_wl {
760 /* Pointer to the active wireless device on this chip */ 752 /* Pointer to the active wireless device on this chip */
@@ -830,13 +822,9 @@ struct b43_wl {
830 struct b43_leds leds; 822 struct b43_leds leds;
831 823
832#ifdef CONFIG_B43_PIO 824#ifdef CONFIG_B43_PIO
833 /* 825 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
834 * RX/TX header/tail buffers used by the frame transmit functions. 826 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
835 */ 827 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
836 struct b43_rxhdr_fw4 rxhdr;
837 struct b43_txhdr txhdr;
838 u8 rx_tail[4];
839 u8 tx_tail[4];
840#endif /* CONFIG_B43_PIO */ 828#endif /* CONFIG_B43_PIO */
841}; 829};
842 830
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index de4e804bedf0..027be275e035 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -383,44 +383,160 @@ static inline
383 } 383 }
384} 384}
385 385
386/* Check if a DMA region fits the device constraints.
387 * Returns true, if the region is OK for usage with this device. */
388static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
389 dma_addr_t addr, size_t size)
390{
391 switch (ring->type) {
392 case B43_DMA_30BIT:
393 if ((u64)addr + size > (1ULL << 30))
394 return 0;
395 break;
396 case B43_DMA_32BIT:
397 if ((u64)addr + size > (1ULL << 32))
398 return 0;
399 break;
400 case B43_DMA_64BIT:
401 /* Currently we can't have addresses beyond
402 * 64bit in the kernel. */
403 break;
404 }
405 return 1;
406}
407
408#define is_4k_aligned(addr) (((u64)(addr) & 0x0FFFull) == 0)
409#define is_8k_aligned(addr) (((u64)(addr) & 0x1FFFull) == 0)
410
411static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
412 dma_addr_t dmaaddr, size_t size)
413{
414 ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
415 free_pages((unsigned long)base, get_order(size));
416}
417
418static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
419 dma_addr_t *dmaaddr, size_t size,
420 gfp_t gfp_flags)
421{
422 void *base;
423
424 base = (void *)__get_free_pages(gfp_flags, get_order(size));
425 if (!base)
426 return NULL;
427 memset(base, 0, size);
428 *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
429 DMA_TO_DEVICE);
430 if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
431 free_pages((unsigned long)base, get_order(size));
432 return NULL;
433 }
434
435 return base;
436}
437
438static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
439 dma_addr_t *dmaaddr, size_t size)
440{
441 void *base;
442
443 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
444 GFP_KERNEL);
445 if (!base) {
446 b43err(ring->dev->wl, "Failed to allocate or map pages "
447 "for DMA ringmemory\n");
448 return NULL;
449 }
450 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
451 /* The memory does not fit our device constraints.
452 * Retry with GFP_DMA set to get lower memory. */
453 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
454 base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
455 GFP_KERNEL | GFP_DMA);
456 if (!base) {
457 b43err(ring->dev->wl, "Failed to allocate or map pages "
458 "in the GFP_DMA region for DMA ringmemory\n");
459 return NULL;
460 }
461 if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
462 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
463 b43err(ring->dev->wl, "Failed to allocate DMA "
464 "ringmemory that fits device constraints\n");
465 return NULL;
466 }
467 }
468 /* We expect the memory to be 4k aligned, at least. */
469 if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
470 b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
471 return NULL;
472 }
473
474 return base;
475}
476
386static int alloc_ringmemory(struct b43_dmaring *ring) 477static int alloc_ringmemory(struct b43_dmaring *ring)
387{ 478{
388 gfp_t flags = GFP_KERNEL; 479 unsigned int required;
389 480 void *base;
390 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 481 dma_addr_t dmaaddr;
391 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing 482
392 * has shown that 4K is sufficient for the latter as long as the buffer 483 /* There are several requirements to the descriptor ring memory:
393 * does not cross an 8K boundary. 484 * - The memory region needs to fit the address constraints for the
394 * 485 * device (same as for frame buffers).
395 * For unknown reasons - possibly a hardware error - the BCM4311 rev 486 * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
396 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory, 487 * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
397 * which accounts for the GFP_DMA flag below.
398 *
399 * The flags here must match the flags in free_ringmemory below!
400 */ 488 */
489
401 if (ring->type == B43_DMA_64BIT) 490 if (ring->type == B43_DMA_64BIT)
402 flags |= GFP_DMA; 491 required = ring->nr_slots * sizeof(struct b43_dmadesc64);
403 ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev, 492 else
404 B43_DMA_RINGMEMSIZE, 493 required = ring->nr_slots * sizeof(struct b43_dmadesc32);
405 &(ring->dmabase), flags); 494 if (B43_WARN_ON(required > 0x1000))
406 if (!ring->descbase) { 495 return -ENOMEM;
407 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n"); 496
497 ring->alloc_descsize = 0x1000;
498 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
499 if (!base)
500 return -ENOMEM;
501 ring->alloc_descbase = base;
502 ring->alloc_dmabase = dmaaddr;
503
504 if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
505 /* We're on <=32bit DMA, or we already got 8k aligned memory.
506 * That's all we need, so we're fine. */
507 ring->descbase = base;
508 ring->dmabase = dmaaddr;
509 return 0;
510 }
511 b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
512
513 /* Ok, we failed at the 8k alignment requirement.
514 * Try to force-align the memory region now. */
515 ring->alloc_descsize = 0x2000;
516 base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
517 if (!base)
408 return -ENOMEM; 518 return -ENOMEM;
519 ring->alloc_descbase = base;
520 ring->alloc_dmabase = dmaaddr;
521
522 if (is_8k_aligned(dmaaddr)) {
523 /* We're already 8k aligned. That Ok, too. */
524 ring->descbase = base;
525 ring->dmabase = dmaaddr;
526 return 0;
409 } 527 }
410 memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE); 528 /* Force-align it to 8k */
529 ring->descbase = (void *)((u8 *)base + 0x1000);
530 ring->dmabase = dmaaddr + 0x1000;
531 B43_WARN_ON(!is_8k_aligned(ring->dmabase));
411 532
412 return 0; 533 return 0;
413} 534}
414 535
415static void free_ringmemory(struct b43_dmaring *ring) 536static void free_ringmemory(struct b43_dmaring *ring)
416{ 537{
417 gfp_t flags = GFP_KERNEL; 538 b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
418 539 ring->alloc_dmabase, ring->alloc_descsize);
419 if (ring->type == B43_DMA_64BIT)
420 flags |= GFP_DMA;
421
422 ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
423 ring->descbase, ring->dmabase, flags);
424} 540}
425 541
426/* Reset the RX DMA channel */ 542/* Reset the RX DMA channel */
@@ -530,29 +646,14 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
530 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr))) 646 if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
531 return 1; 647 return 1;
532 648
533 switch (ring->type) { 649 if (!b43_dma_address_ok(ring, addr, buffersize)) {
534 case B43_DMA_30BIT: 650 /* We can't support this address. Unmap it again. */
535 if ((u64)addr + buffersize > (1ULL << 30)) 651 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
536 goto address_error; 652 return 1;
537 break;
538 case B43_DMA_32BIT:
539 if ((u64)addr + buffersize > (1ULL << 32))
540 goto address_error;
541 break;
542 case B43_DMA_64BIT:
543 /* Currently we can't have addresses beyond
544 * 64bit in the kernel. */
545 break;
546 } 653 }
547 654
548 /* The address is OK. */ 655 /* The address is OK. */
549 return 0; 656 return 0;
550
551address_error:
552 /* We can't support this address. Unmap it again. */
553 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
554
555 return 1;
556} 657}
557 658
558static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb) 659static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -614,6 +715,9 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
614 meta->dmaaddr = dmaaddr; 715 meta->dmaaddr = dmaaddr;
615 ring->ops->fill_descriptor(ring, desc, dmaaddr, 716 ring->ops->fill_descriptor(ring, desc, dmaaddr,
616 ring->rx_buffersize, 0, 0, 0); 717 ring->rx_buffersize, 0, 0, 0);
718 ssb_dma_sync_single_for_device(ring->dev->dev,
719 ring->alloc_dmabase,
720 ring->alloc_descsize, DMA_TO_DEVICE);
617 721
618 return 0; 722 return 0;
619} 723}
@@ -770,7 +874,7 @@ static void free_all_descbuffers(struct b43_dmaring *ring)
770 for (i = 0; i < ring->nr_slots; i++) { 874 for (i = 0; i < ring->nr_slots; i++) {
771 desc = ring->ops->idx2desc(ring, i, &meta); 875 desc = ring->ops->idx2desc(ring, i, &meta);
772 876
773 if (!meta->skb) { 877 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
774 B43_WARN_ON(!ring->tx); 878 B43_WARN_ON(!ring->tx);
775 continue; 879 continue;
776 } 880 }
@@ -822,7 +926,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
822 enum b43_dmatype type) 926 enum b43_dmatype type)
823{ 927{
824 struct b43_dmaring *ring; 928 struct b43_dmaring *ring;
825 int err; 929 int i, err;
826 dma_addr_t dma_test; 930 dma_addr_t dma_test;
827 931
828 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 932 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -837,6 +941,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
837 GFP_KERNEL); 941 GFP_KERNEL);
838 if (!ring->meta) 942 if (!ring->meta)
839 goto err_kfree_ring; 943 goto err_kfree_ring;
944 for (i = 0; i < ring->nr_slots; i++)
945 ring->meta->skb = B43_DMA_PTR_POISON;
840 946
841 ring->type = type; 947 ring->type = type;
842 ring->dev = dev; 948 ring->dev = dev;
@@ -1147,28 +1253,29 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1147 case 0x5000: 1253 case 0x5000:
1148 ring = dma->tx_ring_mcast; 1254 ring = dma->tx_ring_mcast;
1149 break; 1255 break;
1150 default:
1151 B43_WARN_ON(1);
1152 } 1256 }
1153 *slot = (cookie & 0x0FFF); 1257 *slot = (cookie & 0x0FFF);
1154 B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots)); 1258 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1259 b43dbg(dev->wl, "TX-status contains "
1260 "invalid cookie: 0x%04X\n", cookie);
1261 return NULL;
1262 }
1155 1263
1156 return ring; 1264 return ring;
1157} 1265}
1158 1266
1159static int dma_tx_fragment(struct b43_dmaring *ring, 1267static int dma_tx_fragment(struct b43_dmaring *ring,
1160 struct sk_buff **in_skb) 1268 struct sk_buff *skb)
1161{ 1269{
1162 struct sk_buff *skb = *in_skb;
1163 const struct b43_dma_ops *ops = ring->ops; 1270 const struct b43_dma_ops *ops = ring->ops;
1164 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1271 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1272 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1165 u8 *header; 1273 u8 *header;
1166 int slot, old_top_slot, old_used_slots; 1274 int slot, old_top_slot, old_used_slots;
1167 int err; 1275 int err;
1168 struct b43_dmadesc_generic *desc; 1276 struct b43_dmadesc_generic *desc;
1169 struct b43_dmadesc_meta *meta; 1277 struct b43_dmadesc_meta *meta;
1170 struct b43_dmadesc_meta *meta_hdr; 1278 struct b43_dmadesc_meta *meta_hdr;
1171 struct sk_buff *bounce_skb;
1172 u16 cookie; 1279 u16 cookie;
1173 size_t hdrsize = b43_txhdr_size(ring->dev); 1280 size_t hdrsize = b43_txhdr_size(ring->dev);
1174 1281
@@ -1212,34 +1319,28 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1212 1319
1213 meta->skb = skb; 1320 meta->skb = skb;
1214 meta->is_last_fragment = 1; 1321 meta->is_last_fragment = 1;
1322 priv_info->bouncebuffer = NULL;
1215 1323
1216 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1324 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1217 /* create a bounce buffer in zone_dma on mapping failure. */ 1325 /* create a bounce buffer in zone_dma on mapping failure. */
1218 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1326 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1219 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 1327 priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
1220 if (!bounce_skb) { 1328 if (!priv_info->bouncebuffer) {
1221 ring->current_slot = old_top_slot; 1329 ring->current_slot = old_top_slot;
1222 ring->used_slots = old_used_slots; 1330 ring->used_slots = old_used_slots;
1223 err = -ENOMEM; 1331 err = -ENOMEM;
1224 goto out_unmap_hdr; 1332 goto out_unmap_hdr;
1225 } 1333 }
1334 memcpy(priv_info->bouncebuffer, skb->data, skb->len);
1226 1335
1227 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1336 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1228 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1229 bounce_skb->dev = skb->dev;
1230 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1231 info = IEEE80211_SKB_CB(bounce_skb);
1232
1233 dev_kfree_skb_any(skb);
1234 skb = bounce_skb;
1235 *in_skb = bounce_skb;
1236 meta->skb = skb;
1237 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1238 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1337 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1338 kfree(priv_info->bouncebuffer);
1339 priv_info->bouncebuffer = NULL;
1239 ring->current_slot = old_top_slot; 1340 ring->current_slot = old_top_slot;
1240 ring->used_slots = old_used_slots; 1341 ring->used_slots = old_used_slots;
1241 err = -EIO; 1342 err = -EIO;
1242 goto out_free_bounce; 1343 goto out_unmap_hdr;
1243 } 1344 }
1244 } 1345 }
1245 1346
@@ -1253,11 +1354,12 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1253 } 1354 }
1254 /* Now transfer the whole frame. */ 1355 /* Now transfer the whole frame. */
1255 wmb(); 1356 wmb();
1357 ssb_dma_sync_single_for_device(ring->dev->dev,
1358 ring->alloc_dmabase,
1359 ring->alloc_descsize, DMA_TO_DEVICE);
1256 ops->poke_tx(ring, next_slot(ring, slot)); 1360 ops->poke_tx(ring, next_slot(ring, slot));
1257 return 0; 1361 return 0;
1258 1362
1259out_free_bounce:
1260 dev_kfree_skb_any(skb);
1261out_unmap_hdr: 1363out_unmap_hdr:
1262 unmap_descbuffer(ring, meta_hdr->dmaaddr, 1364 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1263 hdrsize, 1); 1365 hdrsize, 1);
@@ -1362,11 +1464,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1362 * static, so we don't need to store it per frame. */ 1464 * static, so we don't need to store it per frame. */
1363 ring->queue_prio = skb_get_queue_mapping(skb); 1465 ring->queue_prio = skb_get_queue_mapping(skb);
1364 1466
1365 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing 1467 err = dma_tx_fragment(ring, skb);
1366 * into the skb data or cb now. */
1367 hdr = NULL;
1368 info = NULL;
1369 err = dma_tx_fragment(ring, &skb);
1370 if (unlikely(err == -ENOKEY)) { 1468 if (unlikely(err == -ENOKEY)) {
1371 /* Drop this packet, as we don't have the encryption key 1469 /* Drop this packet, as we don't have the encryption key
1372 * anymore and must not transmit it unencrypted. */ 1470 * anymore and must not transmit it unencrypted. */
@@ -1400,30 +1498,63 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1400 struct b43_dmaring *ring; 1498 struct b43_dmaring *ring;
1401 struct b43_dmadesc_generic *desc; 1499 struct b43_dmadesc_generic *desc;
1402 struct b43_dmadesc_meta *meta; 1500 struct b43_dmadesc_meta *meta;
1403 int slot; 1501 int slot, firstused;
1404 bool frame_succeed; 1502 bool frame_succeed;
1405 1503
1406 ring = parse_cookie(dev, status->cookie, &slot); 1504 ring = parse_cookie(dev, status->cookie, &slot);
1407 if (unlikely(!ring)) 1505 if (unlikely(!ring))
1408 return; 1506 return;
1409
1410 B43_WARN_ON(!ring->tx); 1507 B43_WARN_ON(!ring->tx);
1508
1509 /* Sanity check: TX packets are processed in-order on one ring.
1510 * Check if the slot deduced from the cookie really is the first
1511 * used slot. */
1512 firstused = ring->current_slot - ring->used_slots + 1;
1513 if (firstused < 0)
1514 firstused = ring->nr_slots + firstused;
1515 if (unlikely(slot != firstused)) {
1516 /* This possibly is a firmware bug and will result in
1517 * malfunction, memory leaks and/or stall of DMA functionality. */
1518 b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
1519 "Expected %d, but got %d\n",
1520 ring->index, firstused, slot);
1521 return;
1522 }
1523
1411 ops = ring->ops; 1524 ops = ring->ops;
1412 while (1) { 1525 while (1) {
1413 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1526 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1414 desc = ops->idx2desc(ring, slot, &meta); 1527 desc = ops->idx2desc(ring, slot, &meta);
1415 1528
1416 if (meta->skb) 1529 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1417 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1530 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1418 1); 1531 "on ring %d\n",
1419 else 1532 slot, firstused, ring->index);
1533 break;
1534 }
1535 if (meta->skb) {
1536 struct b43_private_tx_info *priv_info =
1537 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1538
1539 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1540 kfree(priv_info->bouncebuffer);
1541 priv_info->bouncebuffer = NULL;
1542 } else {
1420 unmap_descbuffer(ring, meta->dmaaddr, 1543 unmap_descbuffer(ring, meta->dmaaddr,
1421 b43_txhdr_size(dev), 1); 1544 b43_txhdr_size(dev), 1);
1545 }
1422 1546
1423 if (meta->is_last_fragment) { 1547 if (meta->is_last_fragment) {
1424 struct ieee80211_tx_info *info; 1548 struct ieee80211_tx_info *info;
1425 1549
1426 BUG_ON(!meta->skb); 1550 if (unlikely(!meta->skb)) {
1551 /* This is a scatter-gather fragment of a frame, so
1552 * the skb pointer must not be NULL. */
1553 b43dbg(dev->wl, "TX status unexpected NULL skb "
1554 "at slot %d (first=%d) on ring %d\n",
1555 slot, firstused, ring->index);
1556 break;
1557 }
1427 1558
1428 info = IEEE80211_SKB_CB(meta->skb); 1559 info = IEEE80211_SKB_CB(meta->skb);
1429 1560
@@ -1441,20 +1572,29 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1441#endif /* DEBUG */ 1572#endif /* DEBUG */
1442 ieee80211_tx_status(dev->wl->hw, meta->skb); 1573 ieee80211_tx_status(dev->wl->hw, meta->skb);
1443 1574
1444 /* skb is freed by ieee80211_tx_status() */ 1575 /* skb will be freed by ieee80211_tx_status().
1445 meta->skb = NULL; 1576 * Poison our pointer. */
1577 meta->skb = B43_DMA_PTR_POISON;
1446 } else { 1578 } else {
1447 /* No need to call free_descriptor_buffer here, as 1579 /* No need to call free_descriptor_buffer here, as
1448 * this is only the txhdr, which is not allocated. 1580 * this is only the txhdr, which is not allocated.
1449 */ 1581 */
1450 B43_WARN_ON(meta->skb); 1582 if (unlikely(meta->skb)) {
1583 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1584 "at slot %d (first=%d) on ring %d\n",
1585 slot, firstused, ring->index);
1586 break;
1587 }
1451 } 1588 }
1452 1589
1453 /* Everything unmapped and free'd. So it's not used anymore. */ 1590 /* Everything unmapped and free'd. So it's not used anymore. */
1454 ring->used_slots--; 1591 ring->used_slots--;
1455 1592
1456 if (meta->is_last_fragment) 1593 if (meta->is_last_fragment) {
1594 /* This is the last scatter-gather
1595 * fragment of the frame. We are done. */
1457 break; 1596 break;
1597 }
1458 slot = next_slot(ring, slot); 1598 slot = next_slot(ring, slot);
1459 } 1599 }
1460 if (ring->stopped) { 1600 if (ring->stopped) {
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index f0b0838fb5ba..e607b392314c 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -1,7 +1,7 @@
1#ifndef B43_DMA_H_ 1#ifndef B43_DMA_H_
2#define B43_DMA_H_ 2#define B43_DMA_H_
3 3
4#include <linux/ieee80211.h> 4#include <linux/err.h>
5 5
6#include "b43.h" 6#include "b43.h"
7 7
@@ -157,7 +157,6 @@ struct b43_dmadesc_generic {
157} __attribute__ ((__packed__)); 157} __attribute__ ((__packed__));
158 158
159/* Misc DMA constants */ 159/* Misc DMA constants */
160#define B43_DMA_RINGMEMSIZE PAGE_SIZE
161#define B43_DMA0_RX_FRAMEOFFSET 30 160#define B43_DMA0_RX_FRAMEOFFSET 30
162 161
163/* DMA engine tuning knobs */ 162/* DMA engine tuning knobs */
@@ -165,6 +164,10 @@ struct b43_dmadesc_generic {
165#define B43_RXRING_SLOTS 64 164#define B43_RXRING_SLOTS 64
166#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN 165#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN
167 166
167/* Pointer poison */
168#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
169#define b43_dma_ptr_is_poisoned(ptr) (unlikely((ptr) == B43_DMA_PTR_POISON))
170
168 171
169struct sk_buff; 172struct sk_buff;
170struct b43_private; 173struct b43_private;
@@ -243,6 +246,12 @@ struct b43_dmaring {
243 /* The QOS priority assigned to this ring. Only used for TX rings. 246 /* The QOS priority assigned to this ring. Only used for TX rings.
244 * This is the mac80211 "queue" value. */ 247 * This is the mac80211 "queue" value. */
245 u8 queue_prio; 248 u8 queue_prio;
249 /* Pointers and size of the originally allocated and mapped memory
250 * region for the descriptor ring. */
251 void *alloc_descbase;
252 dma_addr_t alloc_dmabase;
253 unsigned int alloc_descsize;
254 /* Pointer to our wireless device. */
246 struct b43_wldev *dev; 255 struct b43_wldev *dev;
247#ifdef CONFIG_B43_DEBUG 256#ifdef CONFIG_B43_DEBUG
248 /* Maximum number of used slots. */ 257 /* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 098dda1a67c1..077480c4916a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3573,7 +3573,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3573 if (conf->channel->hw_value != phy->channel) 3573 if (conf->channel->hw_value != phy->channel)
3574 b43_switch_channel(dev, conf->channel->hw_value); 3574 b43_switch_channel(dev, conf->channel->hw_value);
3575 3575
3576 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 3576 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
3577 3577
3578 /* Adjust the desired TX power level. */ 3578 /* Adjust the desired TX power level. */
3579 if (conf->power_level != 0) { 3579 if (conf->power_level != 0) {
@@ -4669,7 +4669,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4669{ 4669{
4670 struct b43_wl *wl = dev->wl; 4670 struct b43_wl *wl = dev->wl;
4671 struct ssb_bus *bus = dev->dev->bus; 4671 struct ssb_bus *bus = dev->dev->bus;
4672 struct pci_dev *pdev = bus->host_pci; 4672 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
4673 int err; 4673 int err;
4674 bool have_2ghz_phy = 0, have_5ghz_phy = 0; 4674 bool have_2ghz_phy = 0, have_5ghz_phy = 0;
4675 u32 tmp; 4675 u32 tmp;
@@ -4802,7 +4802,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4802 4802
4803 if (!list_empty(&wl->devlist)) { 4803 if (!list_empty(&wl->devlist)) {
4804 /* We are not the first core on this chip. */ 4804 /* We are not the first core on this chip. */
4805 pdev = dev->bus->host_pci; 4805 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
4806 /* Only special chips support more than one wireless 4806 /* Only special chips support more than one wireless
4807 * core, although some of the other chips have more than 4807 * core, although some of the other chips have more than
4808 * one wireless core as well. Check for this and 4808 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 1e318d815a5b..3e046ec1ff86 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -67,6 +67,7 @@ static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
67 struct b43_phy_lp *lpphy = phy->lp; 67 struct b43_phy_lp *lpphy = phy->lp;
68 68
69 memset(lpphy, 0, sizeof(*lpphy)); 69 memset(lpphy, 0, sizeof(*lpphy));
70 lpphy->antenna = B43_ANTENNA_DEFAULT;
70 71
71 //TODO 72 //TODO
72} 73}
@@ -751,11 +752,17 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
751 } 752 }
752} 753}
753 754
755static void lpphy_set_trsw_over(struct b43_wldev *dev, bool tx, bool rx)
756{
757 u16 trsw = (tx << 1) | rx;
758 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, trsw);
759 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
760}
761
754static void lpphy_disable_crs(struct b43_wldev *dev, bool user) 762static void lpphy_disable_crs(struct b43_wldev *dev, bool user)
755{ 763{
756 lpphy_set_deaf(dev, user); 764 lpphy_set_deaf(dev, user);
757 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, 0x1); 765 lpphy_set_trsw_over(dev, false, true);
758 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
759 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB); 766 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB);
760 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4); 767 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4);
761 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7); 768 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7);
@@ -790,6 +797,60 @@ static void lpphy_restore_crs(struct b43_wldev *dev, bool user)
790 797
791struct lpphy_tx_gains { u16 gm, pga, pad, dac; }; 798struct lpphy_tx_gains { u16 gm, pga, pad, dac; };
792 799
800static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
801{
802 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
803 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
804 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
805 if (dev->phy.rev >= 2) {
806 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
807 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
808 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
809 b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
810 }
811 } else {
812 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
813 }
814}
815
816static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
817{
818 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
819 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
820 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
821 if (dev->phy.rev >= 2) {
822 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
823 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
824 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
825 b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
826 }
827 } else {
828 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
829 }
830}
831
832static void lpphy_disable_tx_gain_override(struct b43_wldev *dev)
833{
834 if (dev->phy.rev < 2)
835 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
836 else {
837 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F);
838 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF);
839 }
840 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF);
841}
842
843static void lpphy_enable_tx_gain_override(struct b43_wldev *dev)
844{
845 if (dev->phy.rev < 2)
846 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
847 else {
848 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x80);
849 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x4000);
850 }
851 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x40);
852}
853
793static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev) 854static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev)
794{ 855{
795 struct lpphy_tx_gains gains; 856 struct lpphy_tx_gains gains;
@@ -819,6 +880,17 @@ static void lpphy_set_dac_gain(struct b43_wldev *dev, u16 dac)
819 b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl); 880 b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl);
820} 881}
821 882
883static u16 lpphy_get_pa_gain(struct b43_wldev *dev)
884{
885 return b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x7F;
886}
887
888static void lpphy_set_pa_gain(struct b43_wldev *dev, u16 gain)
889{
890 b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0xE03F, gain << 6);
891 b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x80FF, gain << 8);
892}
893
822static void lpphy_set_tx_gains(struct b43_wldev *dev, 894static void lpphy_set_tx_gains(struct b43_wldev *dev,
823 struct lpphy_tx_gains gains) 895 struct lpphy_tx_gains gains)
824{ 896{
@@ -829,25 +901,22 @@ static void lpphy_set_tx_gains(struct b43_wldev *dev,
829 b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 901 b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
830 0xF800, rf_gain); 902 0xF800, rf_gain);
831 } else { 903 } else {
832 pa_gain = b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x1FC0; 904 pa_gain = lpphy_get_pa_gain(dev);
833 pa_gain <<= 2;
834 b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 905 b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
835 (gains.pga << 8) | gains.gm); 906 (gains.pga << 8) | gains.gm);
907 /*
908 * SPEC FIXME The spec calls for (pa_gain << 8) here, but that
909 * conflicts with the spec for set_pa_gain! Vendor driver bug?
910 */
836 b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 911 b43_phy_maskset(dev, B43_PHY_OFDM(0xFB),
837 0x8000, gains.pad | pa_gain); 912 0x8000, gains.pad | (pa_gain << 6));
838 b43_phy_write(dev, B43_PHY_OFDM(0xFC), 913 b43_phy_write(dev, B43_PHY_OFDM(0xFC),
839 (gains.pga << 8) | gains.gm); 914 (gains.pga << 8) | gains.gm);
840 b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 915 b43_phy_maskset(dev, B43_PHY_OFDM(0xFD),
841 0x8000, gains.pad | pa_gain); 916 0x8000, gains.pad | (pa_gain << 8));
842 } 917 }
843 lpphy_set_dac_gain(dev, gains.dac); 918 lpphy_set_dac_gain(dev, gains.dac);
844 if (dev->phy.rev < 2) { 919 lpphy_enable_tx_gain_override(dev);
845 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF, 1 << 8);
846 } else {
847 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F, 1 << 7);
848 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF, 1 << 14);
849 }
850 b43_phy_maskset(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF, 1 << 6);
851} 920}
852 921
853static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain) 922static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain)
@@ -887,38 +956,6 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
887 } 956 }
888} 957}
889 958
890static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
891{
892 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
893 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
894 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
895 if (dev->phy.rev >= 2) {
896 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
897 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
898 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
899 b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
900 }
901 } else {
902 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
903 }
904}
905
906static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
907{
908 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
909 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
910 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
911 if (dev->phy.rev >= 2) {
912 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
913 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
914 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
915 b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
916 }
917 } else {
918 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
919 }
920}
921
922static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain) 959static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain)
923{ 960{
924 if (dev->phy.rev < 2) 961 if (dev->phy.rev < 2)
@@ -1003,8 +1040,7 @@ static int lpphy_loopback(struct b43_wldev *dev)
1003 1040
1004 memset(&iq_est, 0, sizeof(iq_est)); 1041 memset(&iq_est, 0, sizeof(iq_est));
1005 1042
1006 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, 0x3); 1043 lpphy_set_trsw_over(dev, true, true);
1007 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
1008 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1); 1044 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1);
1009 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE); 1045 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
1010 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800); 1046 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
@@ -1126,7 +1162,7 @@ static void lpphy_set_tx_power_control(struct b43_wldev *dev,
1126 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 1162 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM,
1127 0x8FFF, ((u16)lpphy->tssi_npt << 16)); 1163 0x8FFF, ((u16)lpphy->tssi_npt << 16));
1128 //TODO Set "TSSI Transmit Count" variable to total transmitted frame count 1164 //TODO Set "TSSI Transmit Count" variable to total transmitted frame count
1129 //TODO Disable TX gain override 1165 lpphy_disable_tx_gain_override(dev);
1130 lpphy->tx_pwr_idx_over = -1; 1166 lpphy->tx_pwr_idx_over = -1;
1131 } 1167 }
1132 } 1168 }
@@ -1312,15 +1348,73 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
1312 } 1348 }
1313} 1349}
1314 1350
1351static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
1352{
1353 if (dev->phy.rev >= 2)
1354 return; // rev2+ doesn't support antenna diversity
1355
1356 if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
1357 return;
1358
1359 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP);
1360
1361 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
1362 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
1363
1364 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP);
1365
1366 dev->phy.lp->antenna = antenna;
1367}
1368
1369static void lpphy_set_tx_iqcc(struct b43_wldev *dev, u16 a, u16 b)
1370{
1371 u16 tmp[2];
1372
1373 tmp[0] = a;
1374 tmp[1] = b;
1375 b43_lptab_write_bulk(dev, B43_LPTAB16(0, 80), 2, tmp);
1376}
1377
1315static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index) 1378static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index)
1316{ 1379{
1317 struct b43_phy_lp *lpphy = dev->phy.lp; 1380 struct b43_phy_lp *lpphy = dev->phy.lp;
1381 struct lpphy_tx_gains gains;
1382 u32 iq_comp, tx_gain, coeff, rf_power;
1318 1383
1319 lpphy->tx_pwr_idx_over = index; 1384 lpphy->tx_pwr_idx_over = index;
1385 lpphy_read_tx_pctl_mode_from_hardware(dev);
1320 if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF) 1386 if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF)
1321 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW); 1387 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW);
1322 1388 if (dev->phy.rev >= 2) {
1323 //TODO 1389 iq_comp = b43_lptab_read(dev, B43_LPTAB32(7, index + 320));
1390 tx_gain = b43_lptab_read(dev, B43_LPTAB32(7, index + 192));
1391 gains.pad = (tx_gain >> 16) & 0xFF;
1392 gains.gm = tx_gain & 0xFF;
1393 gains.pga = (tx_gain >> 8) & 0xFF;
1394 gains.dac = (iq_comp >> 28) & 0xFF;
1395 lpphy_set_tx_gains(dev, gains);
1396 } else {
1397 iq_comp = b43_lptab_read(dev, B43_LPTAB32(10, index + 320));
1398 tx_gain = b43_lptab_read(dev, B43_LPTAB32(10, index + 192));
1399 b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
1400 0xF800, (tx_gain >> 4) & 0x7FFF);
1401 lpphy_set_dac_gain(dev, tx_gain & 0x7);
1402 lpphy_set_pa_gain(dev, (tx_gain >> 24) & 0x7F);
1403 }
1404 lpphy_set_bb_mult(dev, (iq_comp >> 20) & 0xFF);
1405 lpphy_set_tx_iqcc(dev, (iq_comp >> 10) & 0x3FF, iq_comp & 0x3FF);
1406 if (dev->phy.rev >= 2) {
1407 coeff = b43_lptab_read(dev, B43_LPTAB32(7, index + 448));
1408 } else {
1409 coeff = b43_lptab_read(dev, B43_LPTAB32(10, index + 448));
1410 }
1411 b43_lptab_write(dev, B43_LPTAB16(0, 85), coeff & 0xFFFF);
1412 if (dev->phy.rev >= 2) {
1413 rf_power = b43_lptab_read(dev, B43_LPTAB32(7, index + 576));
1414 b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00,
1415 rf_power & 0xFFFF);//SPEC FIXME mask & set != 0
1416 }
1417 lpphy_enable_tx_gain_override(dev);
1324} 1418}
1325 1419
1326static void lpphy_btcoex_override(struct b43_wldev *dev) 1420static void lpphy_btcoex_override(struct b43_wldev *dev)
@@ -1329,58 +1423,45 @@ static void lpphy_btcoex_override(struct b43_wldev *dev)
1329 b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF); 1423 b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF);
1330} 1424}
1331 1425
1332static void lpphy_pr41573_workaround(struct b43_wldev *dev) 1426static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
1427 bool blocked)
1333{ 1428{
1334 struct b43_phy_lp *lpphy = dev->phy.lp; 1429 //TODO check MAC control register
1335 u32 *saved_tab; 1430 if (blocked) {
1336 const unsigned int saved_tab_size = 256; 1431 if (dev->phy.rev >= 2) {
1337 enum b43_lpphy_txpctl_mode txpctl_mode; 1432 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x83FF);
1338 s8 tx_pwr_idx_over; 1433 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
1339 u16 tssi_npt, tssi_idx; 1434 b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0x80FF);
1340 1435 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xDFFF);
1341 saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL); 1436 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0808);
1342 if (!saved_tab) { 1437 } else {
1343 b43err(dev->wl, "PR41573 failed. Out of memory!\n"); 1438 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xE0FF);
1344 return; 1439 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
1345 } 1440 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFCFF);
1346 1441 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0018);
1347 lpphy_read_tx_pctl_mode_from_hardware(dev); 1442 }
1348 txpctl_mode = lpphy->txpctl_mode;
1349 tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
1350 tssi_npt = lpphy->tssi_npt;
1351 tssi_idx = lpphy->tssi_idx;
1352
1353 if (dev->phy.rev < 2) {
1354 b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
1355 saved_tab_size, saved_tab);
1356 } else { 1443 } else {
1357 b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140), 1444 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xE0FF);
1358 saved_tab_size, saved_tab); 1445 if (dev->phy.rev >= 2)
1446 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xF7F7);
1447 else
1448 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFFE7);
1359 } 1449 }
1360 //TODO
1361
1362 kfree(saved_tab);
1363} 1450}
1364 1451
1365static void lpphy_calibration(struct b43_wldev *dev) 1452/* This was previously called lpphy_japan_filter */
1453static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
1366{ 1454{
1367 struct b43_phy_lp *lpphy = dev->phy.lp; 1455 struct b43_phy_lp *lpphy = dev->phy.lp;
1368 enum b43_lpphy_txpctl_mode saved_pctl_mode; 1456 u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
1369
1370 b43_mac_suspend(dev);
1371
1372 lpphy_btcoex_override(dev);
1373 lpphy_read_tx_pctl_mode_from_hardware(dev);
1374 saved_pctl_mode = lpphy->txpctl_mode;
1375 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
1376 //TODO Perform transmit power table I/Q LO calibration
1377 if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
1378 lpphy_pr41573_workaround(dev);
1379 //TODO If a full calibration has not been performed on this channel yet, perform PAPD TX-power calibration
1380 lpphy_set_tx_power_control(dev, saved_pctl_mode);
1381 //TODO Perform I/Q calibration with a single control value set
1382 1457
1383 b43_mac_enable(dev); 1458 if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
1459 b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
1460 if ((dev->phy.rev == 1) && (lpphy->rc_cap))
1461 lpphy_set_rc_cap(dev);
1462 } else {
1463 b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
1464 }
1384} 1465}
1385 1466
1386static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode) 1467static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode)
@@ -1489,6 +1570,473 @@ static void lpphy_tx_pctl_init(struct b43_wldev *dev)
1489 } 1570 }
1490} 1571}
1491 1572
1573static void lpphy_pr41573_workaround(struct b43_wldev *dev)
1574{
1575 struct b43_phy_lp *lpphy = dev->phy.lp;
1576 u32 *saved_tab;
1577 const unsigned int saved_tab_size = 256;
1578 enum b43_lpphy_txpctl_mode txpctl_mode;
1579 s8 tx_pwr_idx_over;
1580 u16 tssi_npt, tssi_idx;
1581
1582 saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL);
1583 if (!saved_tab) {
1584 b43err(dev->wl, "PR41573 failed. Out of memory!\n");
1585 return;
1586 }
1587
1588 lpphy_read_tx_pctl_mode_from_hardware(dev);
1589 txpctl_mode = lpphy->txpctl_mode;
1590 tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
1591 tssi_npt = lpphy->tssi_npt;
1592 tssi_idx = lpphy->tssi_idx;
1593
1594 if (dev->phy.rev < 2) {
1595 b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
1596 saved_tab_size, saved_tab);
1597 } else {
1598 b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140),
1599 saved_tab_size, saved_tab);
1600 }
1601 //FIXME PHY reset
1602 lpphy_table_init(dev); //FIXME is table init needed?
1603 lpphy_baseband_init(dev);
1604 lpphy_tx_pctl_init(dev);
1605 b43_lpphy_op_software_rfkill(dev, false);
1606 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
1607 if (dev->phy.rev < 2) {
1608 b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0x140),
1609 saved_tab_size, saved_tab);
1610 } else {
1611 b43_lptab_write_bulk(dev, B43_LPTAB32(7, 0x140),
1612 saved_tab_size, saved_tab);
1613 }
1614 b43_write16(dev, B43_MMIO_CHANNEL, lpphy->channel);
1615 lpphy->tssi_npt = tssi_npt;
1616 lpphy->tssi_idx = tssi_idx;
1617 lpphy_set_analog_filter(dev, lpphy->channel);
1618 if (tx_pwr_idx_over != -1)
1619 lpphy_set_tx_power_by_index(dev, tx_pwr_idx_over);
1620 if (lpphy->rc_cap)
1621 lpphy_set_rc_cap(dev);
1622 b43_lpphy_op_set_rx_antenna(dev, lpphy->antenna);
1623 lpphy_set_tx_power_control(dev, txpctl_mode);
1624 kfree(saved_tab);
1625}
1626
1627struct lpphy_rx_iq_comp { u8 chan; s8 c1, c0; };
1628
1629static const struct lpphy_rx_iq_comp lpphy_5354_iq_table[] = {
1630 { .chan = 1, .c1 = -66, .c0 = 15, },
1631 { .chan = 2, .c1 = -66, .c0 = 15, },
1632 { .chan = 3, .c1 = -66, .c0 = 15, },
1633 { .chan = 4, .c1 = -66, .c0 = 15, },
1634 { .chan = 5, .c1 = -66, .c0 = 15, },
1635 { .chan = 6, .c1 = -66, .c0 = 15, },
1636 { .chan = 7, .c1 = -66, .c0 = 14, },
1637 { .chan = 8, .c1 = -66, .c0 = 14, },
1638 { .chan = 9, .c1 = -66, .c0 = 14, },
1639 { .chan = 10, .c1 = -66, .c0 = 14, },
1640 { .chan = 11, .c1 = -66, .c0 = 14, },
1641 { .chan = 12, .c1 = -66, .c0 = 13, },
1642 { .chan = 13, .c1 = -66, .c0 = 13, },
1643 { .chan = 14, .c1 = -66, .c0 = 13, },
1644};
1645
1646static const struct lpphy_rx_iq_comp lpphy_rev0_1_iq_table[] = {
1647 { .chan = 1, .c1 = -64, .c0 = 13, },
1648 { .chan = 2, .c1 = -64, .c0 = 13, },
1649 { .chan = 3, .c1 = -64, .c0 = 13, },
1650 { .chan = 4, .c1 = -64, .c0 = 13, },
1651 { .chan = 5, .c1 = -64, .c0 = 12, },
1652 { .chan = 6, .c1 = -64, .c0 = 12, },
1653 { .chan = 7, .c1 = -64, .c0 = 12, },
1654 { .chan = 8, .c1 = -64, .c0 = 12, },
1655 { .chan = 9, .c1 = -64, .c0 = 12, },
1656 { .chan = 10, .c1 = -64, .c0 = 11, },
1657 { .chan = 11, .c1 = -64, .c0 = 11, },
1658 { .chan = 12, .c1 = -64, .c0 = 11, },
1659 { .chan = 13, .c1 = -64, .c0 = 11, },
1660 { .chan = 14, .c1 = -64, .c0 = 10, },
1661 { .chan = 34, .c1 = -62, .c0 = 24, },
1662 { .chan = 38, .c1 = -62, .c0 = 24, },
1663 { .chan = 42, .c1 = -62, .c0 = 24, },
1664 { .chan = 46, .c1 = -62, .c0 = 23, },
1665 { .chan = 36, .c1 = -62, .c0 = 24, },
1666 { .chan = 40, .c1 = -62, .c0 = 24, },
1667 { .chan = 44, .c1 = -62, .c0 = 23, },
1668 { .chan = 48, .c1 = -62, .c0 = 23, },
1669 { .chan = 52, .c1 = -62, .c0 = 23, },
1670 { .chan = 56, .c1 = -62, .c0 = 22, },
1671 { .chan = 60, .c1 = -62, .c0 = 22, },
1672 { .chan = 64, .c1 = -62, .c0 = 22, },
1673 { .chan = 100, .c1 = -62, .c0 = 16, },
1674 { .chan = 104, .c1 = -62, .c0 = 16, },
1675 { .chan = 108, .c1 = -62, .c0 = 15, },
1676 { .chan = 112, .c1 = -62, .c0 = 14, },
1677 { .chan = 116, .c1 = -62, .c0 = 14, },
1678 { .chan = 120, .c1 = -62, .c0 = 13, },
1679 { .chan = 124, .c1 = -62, .c0 = 12, },
1680 { .chan = 128, .c1 = -62, .c0 = 12, },
1681 { .chan = 132, .c1 = -62, .c0 = 12, },
1682 { .chan = 136, .c1 = -62, .c0 = 11, },
1683 { .chan = 140, .c1 = -62, .c0 = 10, },
1684 { .chan = 149, .c1 = -61, .c0 = 9, },
1685 { .chan = 153, .c1 = -61, .c0 = 9, },
1686 { .chan = 157, .c1 = -61, .c0 = 9, },
1687 { .chan = 161, .c1 = -61, .c0 = 8, },
1688 { .chan = 165, .c1 = -61, .c0 = 8, },
1689 { .chan = 184, .c1 = -62, .c0 = 25, },
1690 { .chan = 188, .c1 = -62, .c0 = 25, },
1691 { .chan = 192, .c1 = -62, .c0 = 25, },
1692 { .chan = 196, .c1 = -62, .c0 = 25, },
1693 { .chan = 200, .c1 = -62, .c0 = 25, },
1694 { .chan = 204, .c1 = -62, .c0 = 25, },
1695 { .chan = 208, .c1 = -62, .c0 = 25, },
1696 { .chan = 212, .c1 = -62, .c0 = 25, },
1697 { .chan = 216, .c1 = -62, .c0 = 26, },
1698};
1699
1700static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
1701 .chan = 0,
1702 .c1 = -64,
1703 .c0 = 0,
1704};
1705
1706static u8 lpphy_nbits(s32 val)
1707{
1708 u32 tmp = abs(val);
1709 u8 nbits = 0;
1710
1711 while (tmp != 0) {
1712 nbits++;
1713 tmp >>= 1;
1714 }
1715
1716 return nbits;
1717}
1718
1719static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1720{
1721 struct lpphy_iq_est iq_est;
1722 u16 c0, c1;
1723 int prod, ipwr, qpwr, prod_msb, q_msb, tmp1, tmp2, tmp3, tmp4, ret;
1724
1725 c1 = b43_phy_read(dev, B43_LPPHY_RX_COMP_COEFF_S);
1726 c0 = c1 >> 8;
1727 c1 |= 0xFF;
1728
1729 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, 0x00C0);
1730 b43_phy_mask(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF);
1731
1732 ret = lpphy_rx_iq_est(dev, samples, 32, &iq_est);
1733 if (!ret)
1734 goto out;
1735
1736 prod = iq_est.iq_prod;
1737 ipwr = iq_est.i_pwr;
1738 qpwr = iq_est.q_pwr;
1739
1740 if (ipwr + qpwr < 2) {
1741 ret = 0;
1742 goto out;
1743 }
1744
1745 prod_msb = lpphy_nbits(prod);
1746 q_msb = lpphy_nbits(qpwr);
1747 tmp1 = prod_msb - 20;
1748
1749 if (tmp1 >= 0) {
1750 tmp3 = ((prod << (30 - prod_msb)) + (ipwr >> (1 + tmp1))) /
1751 (ipwr >> tmp1);
1752 } else {
1753 tmp3 = ((prod << (30 - prod_msb)) + (ipwr << (-1 - tmp1))) /
1754 (ipwr << -tmp1);
1755 }
1756
1757 tmp2 = q_msb - 11;
1758
1759 if (tmp2 >= 0)
1760 tmp4 = (qpwr << (31 - q_msb)) / (ipwr >> tmp2);
1761 else
1762 tmp4 = (qpwr << (31 - q_msb)) / (ipwr << -tmp2);
1763
1764 tmp4 -= tmp3 * tmp3;
1765 tmp4 = -int_sqrt(tmp4);
1766
1767 c0 = tmp3 >> 3;
1768 c1 = tmp4 >> 4;
1769
1770out:
1771 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, c1);
1772 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, c0 << 8);
1773 return ret;
1774}
1775
1776/* Complex number using 2 32-bit signed integers */
1777typedef struct {s32 i, q;} lpphy_c32;
1778
1779static lpphy_c32 lpphy_cordic(int theta)
1780{
1781 u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
1782 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
1783 229, 115, 57, 29, };
1784 int i, tmp, signx = 1, angle = 0;
1785 lpphy_c32 ret = { .i = 39797, .q = 0, };
1786
1787 theta = clamp_t(int, theta, -180, 180);
1788
1789 if (theta > 90) {
1790 theta -= 180;
1791 signx = -1;
1792 } else if (theta < -90) {
1793 theta += 180;
1794 signx = -1;
1795 }
1796
1797 for (i = 0; i <= 17; i++) {
1798 if (theta > angle) {
1799 tmp = ret.i - (ret.q >> i);
1800 ret.q += ret.i >> i;
1801 ret.i = tmp;
1802 angle += arctg[i];
1803 } else {
1804 tmp = ret.i + (ret.q >> i);
1805 ret.q -= ret.i >> i;
1806 ret.i = tmp;
1807 angle -= arctg[i];
1808 }
1809 }
1810
1811 ret.i *= signx;
1812 ret.q *= signx;
1813
1814 return ret;
1815}
1816
1817static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
1818 u16 wait)
1819{
1820 b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL,
1821 0xFFC0, samples - 1);
1822 if (loops != 0xFFFF)
1823 loops--;
1824 b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000, loops);
1825 b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0x3F, wait << 6);
1826 b43_phy_set(dev, B43_LPPHY_A_PHY_CTL_ADDR, 0x1);
1827}
1828
1829//SPEC FIXME what does a negative freq mean?
1830static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
1831{
1832 struct b43_phy_lp *lpphy = dev->phy.lp;
1833 u16 buf[64];
1834 int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
1835 lpphy_c32 sample;
1836
1837 lpphy->tx_tone_freq = freq;
1838
1839 if (freq) {
1840 /* Find i for which abs(freq) integrally divides 20000 * i */
1841 for (i = 1; samples * abs(freq) != 20000 * i; i++) {
1842 samples = (20000 * i) / abs(freq);
1843 if(B43_WARN_ON(samples > 63))
1844 return;
1845 }
1846 } else {
1847 samples = 2;
1848 }
1849
1850 for (i = 0; i < samples; i++) {
1851 sample = lpphy_cordic(angle);
1852 angle += rotation;
1853 buf[i] = ((sample.i * max) & 0xFF) << 8;
1854 buf[i] |= (sample.q * max) & 0xFF;
1855 }
1856
1857 b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
1858
1859 lpphy_run_samples(dev, samples, 0xFFFF, 0);
1860}
1861
1862static void lpphy_stop_tx_tone(struct b43_wldev *dev)
1863{
1864 struct b43_phy_lp *lpphy = dev->phy.lp;
1865 int i;
1866
1867 lpphy->tx_tone_freq = 0;
1868
1869 b43_phy_mask(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000);
1870 for (i = 0; i < 31; i++) {
1871 if (!(b43_phy_read(dev, B43_LPPHY_A_PHY_CTL_ADDR) & 0x1))
1872 break;
1873 udelay(100);
1874 }
1875}
1876
1877
1878static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
1879 int mode, bool useindex, u8 index)
1880{
1881 //TODO
1882}
1883
1884static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
1885{
1886 struct b43_phy_lp *lpphy = dev->phy.lp;
1887 struct ssb_bus *bus = dev->dev->bus;
1888 struct lpphy_tx_gains gains, oldgains;
1889 int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
1890
1891 lpphy_read_tx_pctl_mode_from_hardware(dev);
1892 old_txpctl = lpphy->txpctl_mode;
1893 old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
1894 if (old_afe_ovr)
1895 oldgains = lpphy_get_tx_gains(dev);
1896 old_rf = b43_phy_read(dev, B43_LPPHY_RF_PWR_OVERRIDE) & 0xFF;
1897 old_bbmult = lpphy_get_bb_mult(dev);
1898
1899 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
1900
1901 if (bus->chip_id == 0x4325 && bus->chip_rev == 0)
1902 lpphy_papd_cal(dev, gains, 0, 1, 30);
1903 else
1904 lpphy_papd_cal(dev, gains, 0, 1, 65);
1905
1906 if (old_afe_ovr)
1907 lpphy_set_tx_gains(dev, oldgains);
1908 lpphy_set_bb_mult(dev, old_bbmult);
1909 lpphy_set_tx_power_control(dev, old_txpctl);
1910 b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, old_rf);
1911}
1912
1913static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
1914 bool rx, bool pa, struct lpphy_tx_gains *gains)
1915{
1916 struct b43_phy_lp *lpphy = dev->phy.lp;
1917 struct ssb_bus *bus = dev->dev->bus;
1918 const struct lpphy_rx_iq_comp *iqcomp = NULL;
1919 struct lpphy_tx_gains nogains, oldgains;
1920 u16 tmp;
1921 int i, ret;
1922
1923 memset(&nogains, 0, sizeof(nogains));
1924 memset(&oldgains, 0, sizeof(oldgains));
1925
1926 if (bus->chip_id == 0x5354) {
1927 for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) {
1928 if (lpphy_5354_iq_table[i].chan == lpphy->channel) {
1929 iqcomp = &lpphy_5354_iq_table[i];
1930 }
1931 }
1932 } else if (dev->phy.rev >= 2) {
1933 iqcomp = &lpphy_rev2plus_iq_comp;
1934 } else {
1935 for (i = 0; i < ARRAY_SIZE(lpphy_rev0_1_iq_table); i++) {
1936 if (lpphy_rev0_1_iq_table[i].chan == lpphy->channel) {
1937 iqcomp = &lpphy_rev0_1_iq_table[i];
1938 }
1939 }
1940 }
1941
1942 if (B43_WARN_ON(!iqcomp))
1943 return 0;
1944
1945 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, iqcomp->c1);
1946 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S,
1947 0x00FF, iqcomp->c0 << 8);
1948
1949 if (noise) {
1950 tx = true;
1951 rx = false;
1952 pa = false;
1953 }
1954
1955 lpphy_set_trsw_over(dev, tx, rx);
1956
1957 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1958 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
1959 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
1960 0xFFF7, pa << 3);
1961 } else {
1962 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20);
1963 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
1964 0xFFDF, pa << 5);
1965 }
1966
1967 tmp = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
1968
1969 if (noise)
1970 lpphy_set_rx_gain(dev, 0x2D5D);
1971 else {
1972 if (tmp)
1973 oldgains = lpphy_get_tx_gains(dev);
1974 if (!gains)
1975 gains = &nogains;
1976 lpphy_set_tx_gains(dev, *gains);
1977 }
1978
1979 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
1980 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
1981 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
1982 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800);
1983 lpphy_set_deaf(dev, false);
1984 if (noise)
1985 ret = lpphy_calc_rx_iq_comp(dev, 0xFFF0);
1986 else {
1987 lpphy_start_tx_tone(dev, 4000, 100);
1988 ret = lpphy_calc_rx_iq_comp(dev, 0x4000);
1989 lpphy_stop_tx_tone(dev);
1990 }
1991 lpphy_clear_deaf(dev, false);
1992 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFC);
1993 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFF7);
1994 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFDF);
1995 if (!noise) {
1996 if (tmp)
1997 lpphy_set_tx_gains(dev, oldgains);
1998 else
1999 lpphy_disable_tx_gain_override(dev);
2000 }
2001 lpphy_disable_rx_gain_override(dev);
2002 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
2003 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xF7FF);
2004 return ret;
2005}
2006
2007static void lpphy_calibration(struct b43_wldev *dev)
2008{
2009 struct b43_phy_lp *lpphy = dev->phy.lp;
2010 enum b43_lpphy_txpctl_mode saved_pctl_mode;
2011 bool full_cal = false;
2012
2013 if (lpphy->full_calib_chan != lpphy->channel) {
2014 full_cal = true;
2015 lpphy->full_calib_chan = lpphy->channel;
2016 }
2017
2018 b43_mac_suspend(dev);
2019
2020 lpphy_btcoex_override(dev);
2021 if (dev->phy.rev >= 2)
2022 lpphy_save_dig_flt_state(dev);
2023 lpphy_read_tx_pctl_mode_from_hardware(dev);
2024 saved_pctl_mode = lpphy->txpctl_mode;
2025 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
2026 //TODO Perform transmit power table I/Q LO calibration
2027 if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
2028 lpphy_pr41573_workaround(dev);
2029 if ((dev->phy.rev >= 2) && full_cal) {
2030 lpphy_papd_cal_txpwr(dev);
2031 }
2032 lpphy_set_tx_power_control(dev, saved_pctl_mode);
2033 if (dev->phy.rev >= 2)
2034 lpphy_restore_dig_flt_state(dev);
2035 lpphy_rx_iq_cal(dev, true, true, false, false, NULL);
2036
2037 b43_mac_enable(dev);
2038}
2039
1492static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg) 2040static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg)
1493{ 2041{
1494 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); 2042 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
@@ -1533,12 +2081,6 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
1533 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); 2081 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
1534} 2082}
1535 2083
1536static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
1537 bool blocked)
1538{
1539 //TODO
1540}
1541
1542struct b206x_channel { 2084struct b206x_channel {
1543 u8 channel; 2085 u8 channel;
1544 u16 freq; 2086 u16 freq;
@@ -2004,22 +2546,6 @@ static int lpphy_b2062_tune(struct b43_wldev *dev,
2004 return err; 2546 return err;
2005} 2547}
2006 2548
2007
2008/* This was previously called lpphy_japan_filter */
2009static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
2010{
2011 struct b43_phy_lp *lpphy = dev->phy.lp;
2012 u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
2013
2014 if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
2015 b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
2016 if ((dev->phy.rev == 1) && (lpphy->rc_cap))
2017 lpphy_set_rc_cap(dev);
2018 } else {
2019 b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
2020 }
2021}
2022
2023static void lpphy_b2063_vco_calib(struct b43_wldev *dev) 2549static void lpphy_b2063_vco_calib(struct b43_wldev *dev)
2024{ 2550{
2025 u16 tmp; 2551 u16 tmp;
@@ -2204,18 +2730,6 @@ static int b43_lpphy_op_init(struct b43_wldev *dev)
2204 return 0; 2730 return 0;
2205} 2731}
2206 2732
2207static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
2208{
2209 if (dev->phy.rev >= 2)
2210 return; // rev2+ doesn't support antenna diversity
2211
2212 if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
2213 return;
2214
2215 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
2216 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
2217}
2218
2219static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev) 2733static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
2220{ 2734{
2221 //TODO 2735 //TODO
@@ -2238,6 +2752,11 @@ void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
2238 } 2752 }
2239} 2753}
2240 2754
2755static void b43_lpphy_op_pwork_15sec(struct b43_wldev *dev)
2756{
2757 //TODO
2758}
2759
2241const struct b43_phy_operations b43_phyops_lp = { 2760const struct b43_phy_operations b43_phyops_lp = {
2242 .allocate = b43_lpphy_op_allocate, 2761 .allocate = b43_lpphy_op_allocate,
2243 .free = b43_lpphy_op_free, 2762 .free = b43_lpphy_op_free,
@@ -2255,4 +2774,6 @@ const struct b43_phy_operations b43_phyops_lp = {
2255 .set_rx_antenna = b43_lpphy_op_set_rx_antenna, 2774 .set_rx_antenna = b43_lpphy_op_set_rx_antenna,
2256 .recalc_txpower = b43_lpphy_op_recalc_txpower, 2775 .recalc_txpower = b43_lpphy_op_recalc_txpower,
2257 .adjust_txpower = b43_lpphy_op_adjust_txpower, 2776 .adjust_txpower = b43_lpphy_op_adjust_txpower,
2777 .pwork_15sec = b43_lpphy_op_pwork_15sec,
2778 .pwork_60sec = lpphy_calibration,
2258}; 2779};
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
index c3232c17b60a..62737f700cbc 100644
--- a/drivers/net/wireless/b43/phy_lp.h
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -286,6 +286,7 @@
286#define B43_LPPHY_TR_LOOKUP_6 B43_PHY_OFDM(0xC8) /* TR Lookup 6 */ 286#define B43_LPPHY_TR_LOOKUP_6 B43_PHY_OFDM(0xC8) /* TR Lookup 6 */
287#define B43_LPPHY_TR_LOOKUP_7 B43_PHY_OFDM(0xC9) /* TR Lookup 7 */ 287#define B43_LPPHY_TR_LOOKUP_7 B43_PHY_OFDM(0xC9) /* TR Lookup 7 */
288#define B43_LPPHY_TR_LOOKUP_8 B43_PHY_OFDM(0xCA) /* TR Lookup 8 */ 288#define B43_LPPHY_TR_LOOKUP_8 B43_PHY_OFDM(0xCA) /* TR Lookup 8 */
289#define B43_LPPHY_RF_PWR_OVERRIDE B43_PHY_OFDM(0xD3) /* RF power override */
289 290
290 291
291 292
@@ -871,12 +872,12 @@ struct b43_phy_lp {
871 u8 rssi_gs; 872 u8 rssi_gs;
872 873
873 /* RC cap */ 874 /* RC cap */
874 u8 rc_cap; /* FIXME initial value? */ 875 u8 rc_cap;
875 /* BX arch */ 876 /* BX arch */
876 u8 bx_arch; 877 u8 bx_arch;
877 878
878 /* Full calibration channel */ 879 /* Full calibration channel */
879 u8 full_calib_chan; /* FIXME initial value? */ 880 u8 full_calib_chan;
880 881
881 /* Transmit iqlocal best coeffs */ 882 /* Transmit iqlocal best coeffs */
882 bool tx_iqloc_best_coeffs_valid; 883 bool tx_iqloc_best_coeffs_valid;
@@ -891,6 +892,12 @@ struct b43_phy_lp {
891 892
892 /* The channel we are tuned to */ 893 /* The channel we are tuned to */
893 u8 channel; 894 u8 channel;
895
896 /* The active antenna diversity mode */
897 int antenna;
898
899 /* Frequency of the active TX tone */
900 int tx_tone_freq;
894}; 901};
895 902
896enum tssi_mux_mode { 903enum tssi_mux_mode {
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 9b9044400218..c01b8e02412f 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -342,12 +342,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
342 q->mmio_base + B43_PIO_TXDATA, 342 q->mmio_base + B43_PIO_TXDATA,
343 sizeof(u16)); 343 sizeof(u16));
344 if (data_len & 1) { 344 if (data_len & 1) {
345 u8 *tail = wl->pio_tailspace;
346 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
347
345 /* Write the last byte. */ 348 /* Write the last byte. */
346 ctl &= ~B43_PIO_TXCTL_WRITEHI; 349 ctl &= ~B43_PIO_TXCTL_WRITEHI;
347 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 350 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
348 wl->tx_tail[0] = data[data_len - 1]; 351 tail[0] = data[data_len - 1];
349 wl->tx_tail[1] = 0; 352 tail[1] = 0;
350 ssb_block_write(dev->dev, wl->tx_tail, 2, 353 ssb_block_write(dev->dev, tail, 2,
351 q->mmio_base + B43_PIO_TXDATA, 354 q->mmio_base + B43_PIO_TXDATA,
352 sizeof(u16)); 355 sizeof(u16));
353 } 356 }
@@ -393,31 +396,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
393 q->mmio_base + B43_PIO8_TXDATA, 396 q->mmio_base + B43_PIO8_TXDATA,
394 sizeof(u32)); 397 sizeof(u32));
395 if (data_len & 3) { 398 if (data_len & 3) {
396 wl->tx_tail[3] = 0; 399 u8 *tail = wl->pio_tailspace;
400 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
401
402 memset(tail, 0, 4);
397 /* Write the last few bytes. */ 403 /* Write the last few bytes. */
398 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 404 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
399 B43_PIO8_TXCTL_24_31); 405 B43_PIO8_TXCTL_24_31);
400 switch (data_len & 3) { 406 switch (data_len & 3) {
401 case 3: 407 case 3:
402 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; 408 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
403 wl->tx_tail[0] = data[data_len - 3]; 409 tail[0] = data[data_len - 3];
404 wl->tx_tail[1] = data[data_len - 2]; 410 tail[1] = data[data_len - 2];
405 wl->tx_tail[2] = data[data_len - 1]; 411 tail[2] = data[data_len - 1];
406 break; 412 break;
407 case 2: 413 case 2:
408 ctl |= B43_PIO8_TXCTL_8_15; 414 ctl |= B43_PIO8_TXCTL_8_15;
409 wl->tx_tail[0] = data[data_len - 2]; 415 tail[0] = data[data_len - 2];
410 wl->tx_tail[1] = data[data_len - 1]; 416 tail[1] = data[data_len - 1];
411 wl->tx_tail[2] = 0;
412 break; 417 break;
413 case 1: 418 case 1:
414 wl->tx_tail[0] = data[data_len - 1]; 419 tail[0] = data[data_len - 1];
415 wl->tx_tail[1] = 0;
416 wl->tx_tail[2] = 0;
417 break; 420 break;
418 } 421 }
419 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 422 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
420 ssb_block_write(dev->dev, wl->tx_tail, 4, 423 ssb_block_write(dev->dev, tail, 4,
421 q->mmio_base + B43_PIO8_TXDATA, 424 q->mmio_base + B43_PIO8_TXDATA,
422 sizeof(u32)); 425 sizeof(u32));
423 } 426 }
@@ -456,6 +459,7 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
456 int err; 459 int err;
457 unsigned int hdrlen; 460 unsigned int hdrlen;
458 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 461 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
462 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
459 463
460 B43_WARN_ON(list_empty(&q->packets_list)); 464 B43_WARN_ON(list_empty(&q->packets_list));
461 pack = list_entry(q->packets_list.next, 465 pack = list_entry(q->packets_list.next,
@@ -463,7 +467,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
463 467
464 cookie = generate_cookie(q, pack); 468 cookie = generate_cookie(q, pack);
465 hdrlen = b43_txhdr_size(dev); 469 hdrlen = b43_txhdr_size(dev);
466 err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb, 470 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
471 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
472 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
467 info, cookie); 473 info, cookie);
468 if (err) 474 if (err)
469 return err; 475 return err;
@@ -477,9 +483,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
477 483
478 pack->skb = skb; 484 pack->skb = skb;
479 if (q->rev >= 8) 485 if (q->rev >= 8)
480 pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); 486 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
481 else 487 else
482 pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); 488 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
483 489
484 /* Remove it from the list of available packet slots. 490 /* Remove it from the list of available packet slots.
485 * It will be put back when we receive the status report. */ 491 * It will be put back when we receive the status report. */
@@ -625,8 +631,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
625 unsigned int i, padding; 631 unsigned int i, padding;
626 struct sk_buff *skb; 632 struct sk_buff *skb;
627 const char *err_msg = NULL; 633 const char *err_msg = NULL;
634 struct b43_rxhdr_fw4 *rxhdr =
635 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
628 636
629 memset(&wl->rxhdr, 0, sizeof(wl->rxhdr)); 637 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
638 memset(rxhdr, 0, sizeof(*rxhdr));
630 639
631 /* Check if we have data and wait for it to get ready. */ 640 /* Check if we have data and wait for it to get ready. */
632 if (q->rev >= 8) { 641 if (q->rev >= 8) {
@@ -664,16 +673,16 @@ data_ready:
664 673
665 /* Get the preamble (RX header) */ 674 /* Get the preamble (RX header) */
666 if (q->rev >= 8) { 675 if (q->rev >= 8) {
667 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), 676 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
668 q->mmio_base + B43_PIO8_RXDATA, 677 q->mmio_base + B43_PIO8_RXDATA,
669 sizeof(u32)); 678 sizeof(u32));
670 } else { 679 } else {
671 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), 680 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
672 q->mmio_base + B43_PIO_RXDATA, 681 q->mmio_base + B43_PIO_RXDATA,
673 sizeof(u16)); 682 sizeof(u16));
674 } 683 }
675 /* Sanity checks. */ 684 /* Sanity checks. */
676 len = le16_to_cpu(wl->rxhdr.frame_len); 685 len = le16_to_cpu(rxhdr->frame_len);
677 if (unlikely(len > 0x700)) { 686 if (unlikely(len > 0x700)) {
678 err_msg = "len > 0x700"; 687 err_msg = "len > 0x700";
679 goto rx_error; 688 goto rx_error;
@@ -683,7 +692,7 @@ data_ready:
683 goto rx_error; 692 goto rx_error;
684 } 693 }
685 694
686 macstat = le32_to_cpu(wl->rxhdr.mac_status); 695 macstat = le32_to_cpu(rxhdr->mac_status);
687 if (macstat & B43_RX_MAC_FCSERR) { 696 if (macstat & B43_RX_MAC_FCSERR) {
688 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { 697 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
689 /* Drop frames with failed FCS. */ 698 /* Drop frames with failed FCS. */
@@ -708,22 +717,25 @@ data_ready:
708 q->mmio_base + B43_PIO8_RXDATA, 717 q->mmio_base + B43_PIO8_RXDATA,
709 sizeof(u32)); 718 sizeof(u32));
710 if (len & 3) { 719 if (len & 3) {
720 u8 *tail = wl->pio_tailspace;
721 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
722
711 /* Read the last few bytes. */ 723 /* Read the last few bytes. */
712 ssb_block_read(dev->dev, wl->rx_tail, 4, 724 ssb_block_read(dev->dev, tail, 4,
713 q->mmio_base + B43_PIO8_RXDATA, 725 q->mmio_base + B43_PIO8_RXDATA,
714 sizeof(u32)); 726 sizeof(u32));
715 switch (len & 3) { 727 switch (len & 3) {
716 case 3: 728 case 3:
717 skb->data[len + padding - 3] = wl->rx_tail[0]; 729 skb->data[len + padding - 3] = tail[0];
718 skb->data[len + padding - 2] = wl->rx_tail[1]; 730 skb->data[len + padding - 2] = tail[1];
719 skb->data[len + padding - 1] = wl->rx_tail[2]; 731 skb->data[len + padding - 1] = tail[2];
720 break; 732 break;
721 case 2: 733 case 2:
722 skb->data[len + padding - 2] = wl->rx_tail[0]; 734 skb->data[len + padding - 2] = tail[0];
723 skb->data[len + padding - 1] = wl->rx_tail[1]; 735 skb->data[len + padding - 1] = tail[1];
724 break; 736 break;
725 case 1: 737 case 1:
726 skb->data[len + padding - 1] = wl->rx_tail[0]; 738 skb->data[len + padding - 1] = tail[0];
727 break; 739 break;
728 } 740 }
729 } 741 }
@@ -732,22 +744,29 @@ data_ready:
732 q->mmio_base + B43_PIO_RXDATA, 744 q->mmio_base + B43_PIO_RXDATA,
733 sizeof(u16)); 745 sizeof(u16));
734 if (len & 1) { 746 if (len & 1) {
747 u8 *tail = wl->pio_tailspace;
748 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
749
735 /* Read the last byte. */ 750 /* Read the last byte. */
736 ssb_block_read(dev->dev, wl->rx_tail, 2, 751 ssb_block_read(dev->dev, tail, 2,
737 q->mmio_base + B43_PIO_RXDATA, 752 q->mmio_base + B43_PIO_RXDATA,
738 sizeof(u16)); 753 sizeof(u16));
739 skb->data[len + padding - 1] = wl->rx_tail[0]; 754 skb->data[len + padding - 1] = tail[0];
740 } 755 }
741 } 756 }
742 757
743 b43_rx(q->dev, skb, &wl->rxhdr); 758 b43_rx(q->dev, skb, rxhdr);
744 759
745 return 1; 760 return 1;
746 761
747rx_error: 762rx_error:
748 if (err_msg) 763 if (err_msg)
749 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); 764 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
750 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); 765 if (q->rev >= 8)
766 b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
767 else
768 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
769
751 return 1; 770 return 1;
752} 771}
753 772
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index f4e9695ec186..eda06529ef5f 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
27 27
28*/ 28*/
29 29
30#include "b43.h" 30#include "xmit.h"
31#include "phy_common.h" 31#include "phy_common.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
@@ -621,7 +621,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
621 (phystat0 & B43_RX_PHYST0_OFDM), 621 (phystat0 & B43_RX_PHYST0_OFDM),
622 (phystat0 & B43_RX_PHYST0_GAINCTL), 622 (phystat0 & B43_RX_PHYST0_GAINCTL),
623 (phystat3 & B43_RX_PHYST3_TRSTATE)); 623 (phystat3 & B43_RX_PHYST3_TRSTATE));
624 status.qual = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
625 } 624 }
626 625
627 if (phystat0 & B43_RX_PHYST0_OFDM) 626 if (phystat0 & B43_RX_PHYST0_OFDM)
@@ -690,10 +689,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
690 } 689 }
691 690
692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 691 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
693 692 ieee80211_rx_ni(dev->wl->hw, skb);
694 local_bh_disable();
695 ieee80211_rx(dev->wl->hw, skb);
696 local_bh_enable();
697 693
698#if B43_DEBUG 694#if B43_DEBUG
699 dev->rx_count++; 695 dev->rx_count++;
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 3530de871873..d23ff9fe0c9e 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -2,6 +2,8 @@
2#define B43_XMIT_H_ 2#define B43_XMIT_H_
3 3
4#include "main.h" 4#include "main.h"
5#include <net/mac80211.h>
6
5 7
6#define _b43_declare_plcp_hdr(size) \ 8#define _b43_declare_plcp_hdr(size) \
7 struct b43_plcp_hdr##size { \ 9 struct b43_plcp_hdr##size { \
@@ -332,4 +334,21 @@ static inline u8 b43_kidx_to_raw(struct b43_wldev *dev, u8 firmware_kidx)
332 return raw_kidx; 334 return raw_kidx;
333} 335}
334 336
337/* struct b43_private_tx_info - TX info private to b43.
338 * The structure is placed in (struct ieee80211_tx_info *)->rate_driver_data
339 *
340 * @bouncebuffer: DMA Bouncebuffer (if used)
341 */
342struct b43_private_tx_info {
343 void *bouncebuffer;
344};
345
346static inline struct b43_private_tx_info *
347b43_get_priv_tx_info(struct ieee80211_tx_info *info)
348{
349 BUILD_BUG_ON(sizeof(struct b43_private_tx_info) >
350 sizeof(info->rate_driver_data));
351 return (struct b43_private_tx_info *)info->rate_driver_data;
352}
353
335#endif /* B43_XMIT_H_ */ 354#endif /* B43_XMIT_H_ */
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig
index 94a463478053..1ffa28835c58 100644
--- a/drivers/net/wireless/b43legacy/Kconfig
+++ b/drivers/net/wireless/b43legacy/Kconfig
@@ -1,6 +1,6 @@
1config B43LEGACY 1config B43LEGACY
2 tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" 2 tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)"
3 depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA 3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
4 select SSB 4 select SSB
5 select FW_LOADER 5 select FW_LOADER
6 ---help--- 6 ---help---
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 038baa8869e2..89fe2f972c72 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -29,8 +29,6 @@
29 29
30#define B43legacy_IRQWAIT_MAX_RETRIES 20 30#define B43legacy_IRQWAIT_MAX_RETRIES 20
31 31
32#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */
33
34/* MMIO offsets */ 32/* MMIO offsets */
35#define B43legacy_MMIO_DMA0_REASON 0x20 33#define B43legacy_MMIO_DMA0_REASON 0x20
36#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24 34#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 866403415811..0a86bdf53154 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1240,8 +1240,9 @@ struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1240} 1240}
1241 1241
1242static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1242static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1243 struct sk_buff *skb) 1243 struct sk_buff **in_skb)
1244{ 1244{
1245 struct sk_buff *skb = *in_skb;
1245 const struct b43legacy_dma_ops *ops = ring->ops; 1246 const struct b43legacy_dma_ops *ops = ring->ops;
1246 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1247 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1247 u8 *header; 1248 u8 *header;
@@ -1305,8 +1306,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1305 } 1306 }
1306 1307
1307 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1308 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1309 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1310 bounce_skb->dev = skb->dev;
1311 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1312 info = IEEE80211_SKB_CB(bounce_skb);
1313
1308 dev_kfree_skb_any(skb); 1314 dev_kfree_skb_any(skb);
1309 skb = bounce_skb; 1315 skb = bounce_skb;
1316 *in_skb = bounce_skb;
1310 meta->skb = skb; 1317 meta->skb = skb;
1311 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1318 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1312 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1319 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
@@ -1360,8 +1367,10 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1360 struct sk_buff *skb) 1367 struct sk_buff *skb)
1361{ 1368{
1362 struct b43legacy_dmaring *ring; 1369 struct b43legacy_dmaring *ring;
1370 struct ieee80211_hdr *hdr;
1363 int err = 0; 1371 int err = 0;
1364 unsigned long flags; 1372 unsigned long flags;
1373 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1365 1374
1366 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); 1375 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1367 spin_lock_irqsave(&ring->lock, flags); 1376 spin_lock_irqsave(&ring->lock, flags);
@@ -1386,7 +1395,11 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1386 goto out_unlock; 1395 goto out_unlock;
1387 } 1396 }
1388 1397
1389 err = dma_tx_fragment(ring, skb); 1398 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1399 * into the skb data or cb now. */
1400 hdr = NULL;
1401 info = NULL;
1402 err = dma_tx_fragment(ring, &skb);
1390 if (unlikely(err == -ENOKEY)) { 1403 if (unlikely(err == -ENOKEY)) {
1391 /* Drop this packet, as we don't have the encryption key 1404 /* Drop this packet, as we don't have the encryption key
1392 * anymore and must not transmit it unencrypted. */ 1405 * anymore and must not transmit it unencrypted. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4b60148a5e61..ab6a18c2e9d9 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2677,7 +2677,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2677 if (conf->channel->hw_value != phy->channel) 2677 if (conf->channel->hw_value != phy->channel)
2678 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0); 2678 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0);
2679 2679
2680 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 2680 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
2681 2681
2682 /* Adjust the desired TX power level. */ 2682 /* Adjust the desired TX power level. */
2683 if (conf->power_level != 0) { 2683 if (conf->power_level != 0) {
@@ -3593,7 +3593,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
3593{ 3593{
3594 struct b43legacy_wl *wl = dev->wl; 3594 struct b43legacy_wl *wl = dev->wl;
3595 struct ssb_bus *bus = dev->dev->bus; 3595 struct ssb_bus *bus = dev->dev->bus;
3596 struct pci_dev *pdev = bus->host_pci; 3596 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
3597 int err; 3597 int err;
3598 int have_bphy = 0; 3598 int have_bphy = 0;
3599 int have_gphy = 0; 3599 int have_gphy = 0;
@@ -3707,7 +3707,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
3707 3707
3708 if (!list_empty(&wl->devlist)) { 3708 if (!list_empty(&wl->devlist)) {
3709 /* We are not the first core on this chip. */ 3709 /* We are not the first core on this chip. */
3710 pdev = dev->bus->host_pci; 3710 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
3711 /* Only special chips support more than one wireless 3711 /* Only special chips support more than one wireless
3712 * core, although some of the other chips have more than 3712 * core, although some of the other chips have more than
3713 * one wireless core as well. Check for this and 3713 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 103f3c9e7f58..9c8882d9275e 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -549,7 +549,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL), 549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
551 status.noise = dev->stats.link_noise; 551 status.noise = dev->stats.link_noise;
552 status.qual = (jssi * 100) / B43legacy_RX_MAX_SSI;
553 /* change to support A PHY */ 552 /* change to support A PHY */
554 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 553 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
555 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); 554 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index c15db2293515..287d82728bc3 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -1,7 +1,8 @@
1config HOSTAP 1config HOSTAP
2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" 2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
3 depends on WLAN_80211
4 select WIRELESS_EXT 3 select WIRELESS_EXT
4 select WEXT_SPY
5 select WEXT_PRIV
5 select CRYPTO 6 select CRYPTO
6 select CRYPTO_ARC4 7 select CRYPTO_ARC4
7 select CRYPTO_ECB 8 select CRYPTO_ECB
diff --git a/drivers/net/wireless/i82593.h b/drivers/net/wireless/i82593.h
deleted file mode 100644
index afac5c7a323d..000000000000
--- a/drivers/net/wireless/i82593.h
+++ /dev/null
@@ -1,229 +0,0 @@
1/*
2 * Definitions for Intel 82593 CSMA/CD Core LAN Controller
3 * The definitions are taken from the 1992 users manual with Intel
4 * order number 297125-001.
5 *
6 * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
7 *
8 * Copyright 1994, Anders Klemets <klemets@it.kth.se>
9 *
10 * HISTORY
11 * i82593.h,v
12 * Revision 1.4 2005/11/4 09:15:00 baroniunas
13 * Modified copyright with permission of author as follows:
14 *
15 * "If I82539.H is the only file with my copyright statement
16 * that is included in the Source Forge project, then you have
17 * my approval to change the copyright statement to be a GPL
18 * license, in the way you proposed on October 10."
19 *
20 * Revision 1.1 1996/07/17 15:23:12 root
21 * Initial revision
22 *
23 * Revision 1.3 1995/04/05 15:13:58 adj
24 * Initial alpha release
25 *
26 * Revision 1.2 1994/06/16 23:57:31 klemets
27 * Mirrored all the fields in the configuration block.
28 *
29 * Revision 1.1 1994/06/02 20:25:34 klemets
30 * Initial revision
31 *
32 *
33 */
34#ifndef _I82593_H
35#define _I82593_H
36
37/* Intel 82593 CSMA/CD Core LAN Controller */
38
39/* Port 0 Command Register definitions */
40
41/* Execution operations */
42#define OP0_NOP 0 /* CHNL = 0 */
43#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
44#define OP0_IA_SETUP 1
45#define OP0_CONFIGURE 2
46#define OP0_MC_SETUP 3
47#define OP0_TRANSMIT 4
48#define OP0_TDR 5
49#define OP0_DUMP 6
50#define OP0_DIAGNOSE 7
51#define OP0_TRANSMIT_NO_CRC 9
52#define OP0_RETRANSMIT 12
53#define OP0_ABORT 13
54/* Reception operations */
55#define OP0_RCV_ENABLE 8
56#define OP0_RCV_DISABLE 10
57#define OP0_STOP_RCV 11
58/* Status pointer control operations */
59#define OP0_FIX_PTR 15 /* CHNL = 1 */
60#define OP0_RLS_PTR 15 /* CHNL = 0 */
61#define OP0_RESET 14
62
63#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
64#define CR0_STATUS_0 0x00
65#define CR0_STATUS_1 0x20
66#define CR0_STATUS_2 0x40
67#define CR0_STATUS_3 0x60
68#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
69
70/* Port 0 Status Register definitions */
71
72#define SR0_NO_RESULT 0 /* dummy */
73#define SR0_EVENT_MASK 0x0f
74#define SR0_IA_SETUP_DONE 1
75#define SR0_CONFIGURE_DONE 2
76#define SR0_MC_SETUP_DONE 3
77#define SR0_TRANSMIT_DONE 4
78#define SR0_TDR_DONE 5
79#define SR0_DUMP_DONE 6
80#define SR0_DIAGNOSE_PASSED 7
81#define SR0_TRANSMIT_NO_CRC_DONE 9
82#define SR0_RETRANSMIT_DONE 12
83#define SR0_EXECUTION_ABORTED 13
84#define SR0_END_OF_FRAME 8
85#define SR0_RECEPTION_ABORTED 10
86#define SR0_DIAGNOSE_FAILED 15
87#define SR0_STOP_REG_HIT 11
88
89#define SR0_CHNL (1 << 4)
90#define SR0_EXECUTION (1 << 5)
91#define SR0_RECEPTION (1 << 6)
92#define SR0_INTERRUPT (1 << 7)
93#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
94
95#define SR3_EXEC_STATE_MASK 0x03
96#define SR3_EXEC_IDLE 0
97#define SR3_TX_ABORT_IN_PROGRESS 1
98#define SR3_EXEC_ACTIVE 2
99#define SR3_ABORT_IN_PROGRESS 3
100#define SR3_EXEC_CHNL (1 << 2)
101#define SR3_STP_ON_NO_RSRC (1 << 3)
102#define SR3_RCVING_NO_RSRC (1 << 4)
103#define SR3_RCV_STATE_MASK 0x60
104#define SR3_RCV_IDLE 0x00
105#define SR3_RCV_READY 0x20
106#define SR3_RCV_ACTIVE 0x40
107#define SR3_RCV_STOP_IN_PROG 0x60
108#define SR3_RCV_CHNL (1 << 7)
109
110/* Port 1 Command Register definitions */
111
112#define OP1_NOP 0
113#define OP1_SWIT_TO_PORT_0 1
114#define OP1_INT_DISABLE 2
115#define OP1_INT_ENABLE 3
116#define OP1_SET_TS 5
117#define OP1_RST_TS 7
118#define OP1_POWER_DOWN 8
119#define OP1_RESET_RING_MNGMT 11
120#define OP1_RESET 14
121#define OP1_SEL_RST 15
122
123#define CR1_STATUS_4 0x00
124#define CR1_STATUS_5 0x20
125#define CR1_STATUS_6 0x40
126#define CR1_STOP_REG_UPDATE (1 << 7)
127
128/* Receive frame status bits */
129
130#define RX_RCLD (1 << 0)
131#define RX_IA_MATCH (1 << 1)
132#define RX_NO_AD_MATCH (1 << 2)
133#define RX_NO_SFD (1 << 3)
134#define RX_SRT_FRM (1 << 7)
135#define RX_OVRRUN (1 << 8)
136#define RX_ALG_ERR (1 << 10)
137#define RX_CRC_ERR (1 << 11)
138#define RX_LEN_ERR (1 << 12)
139#define RX_RCV_OK (1 << 13)
140#define RX_TYP_LEN (1 << 15)
141
142/* Transmit status bits */
143
144#define TX_NCOL_MASK 0x0f
145#define TX_FRTL (1 << 4)
146#define TX_MAX_COL (1 << 5)
147#define TX_HRT_BEAT (1 << 6)
148#define TX_DEFER (1 << 7)
149#define TX_UND_RUN (1 << 8)
150#define TX_LOST_CTS (1 << 9)
151#define TX_LOST_CRS (1 << 10)
152#define TX_LTCOL (1 << 11)
153#define TX_OK (1 << 13)
154#define TX_COLL (1 << 15)
155
156struct i82593_conf_block {
157 u_char fifo_limit : 4,
158 forgnesi : 1,
159 fifo_32 : 1,
160 d6mod : 1,
161 throttle_enb : 1;
162 u_char throttle : 6,
163 cntrxint : 1,
164 contin : 1;
165 u_char addr_len : 3,
166 acloc : 1,
167 preamb_len : 2,
168 loopback : 2;
169 u_char lin_prio : 3,
170 tbofstop : 1,
171 exp_prio : 3,
172 bof_met : 1;
173 u_char : 4,
174 ifrm_spc : 4;
175 u_char : 5,
176 slottim_low : 3;
177 u_char slottim_hi : 3,
178 : 1,
179 max_retr : 4;
180 u_char prmisc : 1,
181 bc_dis : 1,
182 : 1,
183 crs_1 : 1,
184 nocrc_ins : 1,
185 crc_1632 : 1,
186 : 1,
187 crs_cdt : 1;
188 u_char cs_filter : 3,
189 crs_src : 1,
190 cd_filter : 3,
191 : 1;
192 u_char : 2,
193 min_fr_len : 6;
194 u_char lng_typ : 1,
195 lng_fld : 1,
196 rxcrc_xf : 1,
197 artx : 1,
198 sarec : 1,
199 tx_jabber : 1, /* why is this called max_len in the manual? */
200 hash_1 : 1,
201 lbpkpol : 1;
202 u_char : 6,
203 fdx : 1,
204 : 1;
205 u_char dummy_6 : 6, /* supposed to be ones */
206 mult_ia : 1,
207 dis_bof : 1;
208 u_char dummy_1 : 1, /* supposed to be one */
209 tx_ifs_retrig : 2,
210 mc_all : 1,
211 rcv_mon : 2,
212 frag_acpt : 1,
213 tstrttrs : 1;
214 u_char fretx : 1,
215 runt_eop : 1,
216 hw_sw_pin : 1,
217 big_endn : 1,
218 syncrqs : 1,
219 sttlen : 1,
220 tx_eop : 1,
221 rx_eop : 1;
222 u_char rbuf_size : 5,
223 rcvstop : 1,
224 : 2;
225};
226
227#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
228
229#endif /* _I82593_H */
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index a8131384c6b9..2715b101aded 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -4,8 +4,10 @@
4 4
5config IPW2100 5config IPW2100
6 tristate "Intel PRO/Wireless 2100 Network Connection" 6 tristate "Intel PRO/Wireless 2100 Network Connection"
7 depends on PCI && WLAN_80211 && CFG80211 7 depends on PCI && CFG80211
8 select WIRELESS_EXT 8 select WIRELESS_EXT
9 select WEXT_SPY
10 select WEXT_PRIV
9 select FW_LOADER 11 select FW_LOADER
10 select LIB80211 12 select LIB80211
11 select LIBIPW 13 select LIBIPW
@@ -63,8 +65,10 @@ config IPW2100_DEBUG
63 65
64config IPW2200 66config IPW2200
65 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 67 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
66 depends on PCI && WLAN_80211 && CFG80211 68 depends on PCI && CFG80211 && CFG80211_WEXT
67 select WIRELESS_EXT 69 select WIRELESS_EXT
70 select WEXT_SPY
71 select WEXT_PRIV
68 select FW_LOADER 72 select FW_LOADER
69 select LIB80211 73 select LIB80211
70 select LIBIPW 74 select LIBIPW
@@ -150,8 +154,9 @@ config IPW2200_DEBUG
150 154
151config LIBIPW 155config LIBIPW
152 tristate 156 tristate
153 depends on PCI && WLAN_80211 && CFG80211 157 depends on PCI && CFG80211
154 select WIRELESS_EXT 158 select WIRELESS_EXT
159 select WEXT_SPY
155 select CRYPTO 160 select CRYPTO
156 select CRYPTO_ARC4 161 select CRYPTO_ARC4
157 select CRYPTO_ECB 162 select CRYPTO_ECB
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 6e2fc0cb6f8a..6c836c892e43 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -296,6 +296,33 @@ static const char *command_types[] = {
296}; 296};
297#endif 297#endif
298 298
299#define WEXT_USECHANNELS 1
300
301static const long ipw2100_frequencies[] = {
302 2412, 2417, 2422, 2427,
303 2432, 2437, 2442, 2447,
304 2452, 2457, 2462, 2467,
305 2472, 2484
306};
307
308#define FREQ_COUNT ARRAY_SIZE(ipw2100_frequencies)
309
310static const long ipw2100_rates_11b[] = {
311 1000000,
312 2000000,
313 5500000,
314 11000000
315};
316
317static struct ieee80211_rate ipw2100_bg_rates[] = {
318 { .bitrate = 10 },
319 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
320 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
321 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
322};
323
324#define RATE_COUNT ARRAY_SIZE(ipw2100_rates_11b)
325
299/* Pre-decl until we get the code solid and then we can clean it up */ 326/* Pre-decl until we get the code solid and then we can clean it up */
300static void ipw2100_tx_send_commands(struct ipw2100_priv *priv); 327static void ipw2100_tx_send_commands(struct ipw2100_priv *priv);
301static void ipw2100_tx_send_data(struct ipw2100_priv *priv); 328static void ipw2100_tx_send_data(struct ipw2100_priv *priv);
@@ -1141,6 +1168,7 @@ static int rf_kill_active(struct ipw2100_priv *priv)
1141 int i; 1168 int i;
1142 1169
1143 if (!(priv->hw_features & HW_FEATURE_RFKILL)) { 1170 if (!(priv->hw_features & HW_FEATURE_RFKILL)) {
1171 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1144 priv->status &= ~STATUS_RF_KILL_HW; 1172 priv->status &= ~STATUS_RF_KILL_HW;
1145 return 0; 1173 return 0;
1146 } 1174 }
@@ -1151,10 +1179,13 @@ static int rf_kill_active(struct ipw2100_priv *priv)
1151 value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1); 1179 value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1);
1152 } 1180 }
1153 1181
1154 if (value == 0) 1182 if (value == 0) {
1183 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1155 priv->status |= STATUS_RF_KILL_HW; 1184 priv->status |= STATUS_RF_KILL_HW;
1156 else 1185 } else {
1186 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1157 priv->status &= ~STATUS_RF_KILL_HW; 1187 priv->status &= ~STATUS_RF_KILL_HW;
1188 }
1158 1189
1159 return (value == 0); 1190 return (value == 0);
1160} 1191}
@@ -1814,13 +1845,6 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1814 return rc; 1845 return rc;
1815} 1846}
1816 1847
1817/* Called by register_netdev() */
1818static int ipw2100_net_init(struct net_device *dev)
1819{
1820 struct ipw2100_priv *priv = libipw_priv(dev);
1821 return ipw2100_up(priv, 1);
1822}
1823
1824static void ipw2100_down(struct ipw2100_priv *priv) 1848static void ipw2100_down(struct ipw2100_priv *priv)
1825{ 1849{
1826 unsigned long flags; 1850 unsigned long flags;
@@ -1875,6 +1899,64 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1875 netif_stop_queue(priv->net_dev); 1899 netif_stop_queue(priv->net_dev);
1876} 1900}
1877 1901
1902/* Called by register_netdev() */
1903static int ipw2100_net_init(struct net_device *dev)
1904{
1905 struct ipw2100_priv *priv = libipw_priv(dev);
1906 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
1907 struct wireless_dev *wdev = &priv->ieee->wdev;
1908 int ret;
1909 int i;
1910
1911 ret = ipw2100_up(priv, 1);
1912 if (ret)
1913 return ret;
1914
1915 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
1916
1917 /* fill-out priv->ieee->bg_band */
1918 if (geo->bg_channels) {
1919 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
1920
1921 bg_band->band = IEEE80211_BAND_2GHZ;
1922 bg_band->n_channels = geo->bg_channels;
1923 bg_band->channels =
1924 kzalloc(geo->bg_channels *
1925 sizeof(struct ieee80211_channel), GFP_KERNEL);
1926 /* translate geo->bg to bg_band.channels */
1927 for (i = 0; i < geo->bg_channels; i++) {
1928 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
1929 bg_band->channels[i].center_freq = geo->bg[i].freq;
1930 bg_band->channels[i].hw_value = geo->bg[i].channel;
1931 bg_band->channels[i].max_power = geo->bg[i].max_power;
1932 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
1933 bg_band->channels[i].flags |=
1934 IEEE80211_CHAN_PASSIVE_SCAN;
1935 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
1936 bg_band->channels[i].flags |=
1937 IEEE80211_CHAN_NO_IBSS;
1938 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
1939 bg_band->channels[i].flags |=
1940 IEEE80211_CHAN_RADAR;
1941 /* No equivalent for LIBIPW_CH_80211H_RULES,
1942 LIBIPW_CH_UNIFORM_SPREADING, or
1943 LIBIPW_CH_B_ONLY... */
1944 }
1945 /* point at bitrate info */
1946 bg_band->bitrates = ipw2100_bg_rates;
1947 bg_band->n_bitrates = RATE_COUNT;
1948
1949 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
1950 }
1951
1952 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
1953 if (wiphy_register(wdev->wiphy)) {
1954 ipw2100_down(priv);
1955 return -EIO;
1956 }
1957 return 0;
1958}
1959
1878static void ipw2100_reset_adapter(struct work_struct *work) 1960static void ipw2100_reset_adapter(struct work_struct *work)
1879{ 1961{
1880 struct ipw2100_priv *priv = 1962 struct ipw2100_priv *priv =
@@ -2090,6 +2172,7 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2090 priv->net_dev->name); 2172 priv->net_dev->name);
2091 2173
2092 /* RF_KILL is now enabled (else we wouldn't be here) */ 2174 /* RF_KILL is now enabled (else we wouldn't be here) */
2175 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2093 priv->status |= STATUS_RF_KILL_HW; 2176 priv->status |= STATUS_RF_KILL_HW;
2094 2177
2095 /* Make sure the RF Kill check timer is running */ 2178 /* Make sure the RF Kill check timer is running */
@@ -6029,7 +6112,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6029 struct ipw2100_priv *priv; 6112 struct ipw2100_priv *priv;
6030 struct net_device *dev; 6113 struct net_device *dev;
6031 6114
6032 dev = alloc_ieee80211(sizeof(struct ipw2100_priv)); 6115 dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
6033 if (!dev) 6116 if (!dev)
6034 return NULL; 6117 return NULL;
6035 priv = libipw_priv(dev); 6118 priv = libipw_priv(dev);
@@ -6342,7 +6425,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6342 sysfs_remove_group(&pci_dev->dev.kobj, 6425 sysfs_remove_group(&pci_dev->dev.kobj,
6343 &ipw2100_attribute_group); 6426 &ipw2100_attribute_group);
6344 6427
6345 free_ieee80211(dev); 6428 free_ieee80211(dev, 0);
6346 pci_set_drvdata(pci_dev, NULL); 6429 pci_set_drvdata(pci_dev, NULL);
6347 } 6430 }
6348 6431
@@ -6400,7 +6483,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6400 if (dev->base_addr) 6483 if (dev->base_addr)
6401 iounmap((void __iomem *)dev->base_addr); 6484 iounmap((void __iomem *)dev->base_addr);
6402 6485
6403 free_ieee80211(dev); 6486 /* wiphy_unregister needs to be here, before free_ieee80211 */
6487 wiphy_unregister(priv->ieee->wdev.wiphy);
6488 kfree(priv->ieee->bg_band.channels);
6489 free_ieee80211(dev, 0);
6404 } 6490 }
6405 6491
6406 pci_release_regions(pci_dev); 6492 pci_release_regions(pci_dev);
@@ -6601,26 +6687,6 @@ static void __exit ipw2100_exit(void)
6601module_init(ipw2100_init); 6687module_init(ipw2100_init);
6602module_exit(ipw2100_exit); 6688module_exit(ipw2100_exit);
6603 6689
6604#define WEXT_USECHANNELS 1
6605
6606static const long ipw2100_frequencies[] = {
6607 2412, 2417, 2422, 2427,
6608 2432, 2437, 2442, 2447,
6609 2452, 2457, 2462, 2467,
6610 2472, 2484
6611};
6612
6613#define FREQ_COUNT ARRAY_SIZE(ipw2100_frequencies)
6614
6615static const long ipw2100_rates_11b[] = {
6616 1000000,
6617 2000000,
6618 5500000,
6619 11000000
6620};
6621
6622#define RATE_COUNT ARRAY_SIZE(ipw2100_rates_11b)
6623
6624static int ipw2100_wx_get_name(struct net_device *dev, 6690static int ipw2100_wx_get_name(struct net_device *dev,
6625 struct iw_request_info *info, 6691 struct iw_request_info *info,
6626 union iwreq_data *wrqu, char *extra) 6692 union iwreq_data *wrqu, char *extra)
@@ -8462,6 +8528,12 @@ static int ipw2100_get_firmware(struct ipw2100_priv *priv,
8462 return 0; 8528 return 0;
8463} 8529}
8464 8530
8531MODULE_FIRMWARE(IPW2100_FW_NAME("-i"));
8532#ifdef CONFIG_IPW2100_MONITOR
8533MODULE_FIRMWARE(IPW2100_FW_NAME("-p"));
8534#endif
8535MODULE_FIRMWARE(IPW2100_FW_NAME(""));
8536
8465static void ipw2100_release_firmware(struct ipw2100_priv *priv, 8537static void ipw2100_release_firmware(struct ipw2100_priv *priv,
8466 struct ipw2100_fw *fw) 8538 struct ipw2100_fw *fw)
8467{ 8539{
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index a6ca536e44f8..c28984ae46ff 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -81,6 +81,11 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
81MODULE_VERSION(DRV_VERSION); 81MODULE_VERSION(DRV_VERSION);
82MODULE_AUTHOR(DRV_COPYRIGHT); 82MODULE_AUTHOR(DRV_COPYRIGHT);
83MODULE_LICENSE("GPL"); 83MODULE_LICENSE("GPL");
84MODULE_FIRMWARE("ipw2200-ibss.fw");
85#ifdef CONFIG_IPW2200_MONITOR
86MODULE_FIRMWARE("ipw2200-sniffer.fw");
87#endif
88MODULE_FIRMWARE("ipw2200-bss.fw");
84 89
85static int cmdlog = 0; 90static int cmdlog = 0;
86static int debug = 0; 91static int debug = 0;
@@ -104,6 +109,25 @@ static int antenna = CFG_SYS_ANTENNA_BOTH;
104static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */ 109static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
105#endif 110#endif
106 111
112static struct ieee80211_rate ipw2200_rates[] = {
113 { .bitrate = 10 },
114 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
115 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
116 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
117 { .bitrate = 60 },
118 { .bitrate = 90 },
119 { .bitrate = 120 },
120 { .bitrate = 180 },
121 { .bitrate = 240 },
122 { .bitrate = 360 },
123 { .bitrate = 480 },
124 { .bitrate = 540 }
125};
126
127#define ipw2200_a_rates (ipw2200_rates + 4)
128#define ipw2200_num_a_rates 8
129#define ipw2200_bg_rates (ipw2200_rates + 0)
130#define ipw2200_num_bg_rates 12
107 131
108#ifdef CONFIG_IPW2200_QOS 132#ifdef CONFIG_IPW2200_QOS
109static int qos_enable = 0; 133static int qos_enable = 0;
@@ -1734,10 +1758,13 @@ static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1734 1758
1735static int rf_kill_active(struct ipw_priv *priv) 1759static int rf_kill_active(struct ipw_priv *priv)
1736{ 1760{
1737 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) 1761 if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
1738 priv->status |= STATUS_RF_KILL_HW; 1762 priv->status |= STATUS_RF_KILL_HW;
1739 else 1763 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
1764 } else {
1740 priv->status &= ~STATUS_RF_KILL_HW; 1765 priv->status &= ~STATUS_RF_KILL_HW;
1766 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
1767 }
1741 1768
1742 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0; 1769 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1743} 1770}
@@ -2020,6 +2047,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
2020 if (inta & IPW_INTA_BIT_RF_KILL_DONE) { 2047 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2021 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n"); 2048 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2022 priv->status |= STATUS_RF_KILL_HW; 2049 priv->status |= STATUS_RF_KILL_HW;
2050 wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
2023 wake_up_interruptible(&priv->wait_command_queue); 2051 wake_up_interruptible(&priv->wait_command_queue);
2024 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); 2052 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2025 cancel_delayed_work(&priv->request_scan); 2053 cancel_delayed_work(&priv->request_scan);
@@ -8655,24 +8683,6 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
8655 * 8683 *
8656 */ 8684 */
8657 8685
8658static int ipw_wx_get_name(struct net_device *dev,
8659 struct iw_request_info *info,
8660 union iwreq_data *wrqu, char *extra)
8661{
8662 struct ipw_priv *priv = libipw_priv(dev);
8663 mutex_lock(&priv->mutex);
8664 if (priv->status & STATUS_RF_KILL_MASK)
8665 strcpy(wrqu->name, "radio off");
8666 else if (!(priv->status & STATUS_ASSOCIATED))
8667 strcpy(wrqu->name, "unassociated");
8668 else
8669 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8670 ipw_modes[priv->assoc_request.ieee_mode]);
8671 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8672 mutex_unlock(&priv->mutex);
8673 return 0;
8674}
8675
8676static int ipw_set_channel(struct ipw_priv *priv, u8 channel) 8686static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8677{ 8687{
8678 if (channel == 0) { 8688 if (channel == 0) {
@@ -9972,7 +9982,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9972/* Rebase the WE IOCTLs to zero for the handler array */ 9982/* Rebase the WE IOCTLs to zero for the handler array */
9973#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT] 9983#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9974static iw_handler ipw_wx_handlers[] = { 9984static iw_handler ipw_wx_handlers[] = {
9975 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name, 9985 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
9976 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 9986 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9977 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 9987 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9978 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 9988 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@ -11275,6 +11285,7 @@ static int ipw_up(struct ipw_priv *priv)
11275 if (!(priv->config & CFG_CUSTOM_MAC)) 11285 if (!(priv->config & CFG_CUSTOM_MAC))
11276 eeprom_parse_mac(priv, priv->mac_addr); 11286 eeprom_parse_mac(priv, priv->mac_addr);
11277 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11287 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11288 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11278 11289
11279 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11290 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11280 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], 11291 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
@@ -11416,16 +11427,100 @@ static void ipw_bg_down(struct work_struct *work)
11416/* Called by register_netdev() */ 11427/* Called by register_netdev() */
11417static int ipw_net_init(struct net_device *dev) 11428static int ipw_net_init(struct net_device *dev)
11418{ 11429{
11430 int i, rc = 0;
11419 struct ipw_priv *priv = libipw_priv(dev); 11431 struct ipw_priv *priv = libipw_priv(dev);
11432 const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
11433 struct wireless_dev *wdev = &priv->ieee->wdev;
11420 mutex_lock(&priv->mutex); 11434 mutex_lock(&priv->mutex);
11421 11435
11422 if (ipw_up(priv)) { 11436 if (ipw_up(priv)) {
11423 mutex_unlock(&priv->mutex); 11437 rc = -EIO;
11424 return -EIO; 11438 goto out;
11439 }
11440
11441 memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
11442
11443 /* fill-out priv->ieee->bg_band */
11444 if (geo->bg_channels) {
11445 struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
11446
11447 bg_band->band = IEEE80211_BAND_2GHZ;
11448 bg_band->n_channels = geo->bg_channels;
11449 bg_band->channels =
11450 kzalloc(geo->bg_channels *
11451 sizeof(struct ieee80211_channel), GFP_KERNEL);
11452 /* translate geo->bg to bg_band.channels */
11453 for (i = 0; i < geo->bg_channels; i++) {
11454 bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
11455 bg_band->channels[i].center_freq = geo->bg[i].freq;
11456 bg_band->channels[i].hw_value = geo->bg[i].channel;
11457 bg_band->channels[i].max_power = geo->bg[i].max_power;
11458 if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11459 bg_band->channels[i].flags |=
11460 IEEE80211_CHAN_PASSIVE_SCAN;
11461 if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
11462 bg_band->channels[i].flags |=
11463 IEEE80211_CHAN_NO_IBSS;
11464 if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
11465 bg_band->channels[i].flags |=
11466 IEEE80211_CHAN_RADAR;
11467 /* No equivalent for LIBIPW_CH_80211H_RULES,
11468 LIBIPW_CH_UNIFORM_SPREADING, or
11469 LIBIPW_CH_B_ONLY... */
11470 }
11471 /* point at bitrate info */
11472 bg_band->bitrates = ipw2200_bg_rates;
11473 bg_band->n_bitrates = ipw2200_num_bg_rates;
11474
11475 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
11476 }
11477
11478 /* fill-out priv->ieee->a_band */
11479 if (geo->a_channels) {
11480 struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
11481
11482 a_band->band = IEEE80211_BAND_5GHZ;
11483 a_band->n_channels = geo->a_channels;
11484 a_band->channels =
11485 kzalloc(geo->a_channels *
11486 sizeof(struct ieee80211_channel), GFP_KERNEL);
11487 /* translate geo->bg to a_band.channels */
11488 for (i = 0; i < geo->a_channels; i++) {
11489 a_band->channels[i].band = IEEE80211_BAND_2GHZ;
11490 a_band->channels[i].center_freq = geo->a[i].freq;
11491 a_band->channels[i].hw_value = geo->a[i].channel;
11492 a_band->channels[i].max_power = geo->a[i].max_power;
11493 if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
11494 a_band->channels[i].flags |=
11495 IEEE80211_CHAN_PASSIVE_SCAN;
11496 if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
11497 a_band->channels[i].flags |=
11498 IEEE80211_CHAN_NO_IBSS;
11499 if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
11500 a_band->channels[i].flags |=
11501 IEEE80211_CHAN_RADAR;
11502 /* No equivalent for LIBIPW_CH_80211H_RULES,
11503 LIBIPW_CH_UNIFORM_SPREADING, or
11504 LIBIPW_CH_B_ONLY... */
11505 }
11506 /* point at bitrate info */
11507 a_band->bitrates = ipw2200_a_rates;
11508 a_band->n_bitrates = ipw2200_num_a_rates;
11509
11510 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11511 }
11512
11513 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11514
11515 /* With that information in place, we can now register the wiphy... */
11516 if (wiphy_register(wdev->wiphy)) {
11517 rc = -EIO;
11518 goto out;
11425 } 11519 }
11426 11520
11521out:
11427 mutex_unlock(&priv->mutex); 11522 mutex_unlock(&priv->mutex);
11428 return 0; 11523 return rc;
11429} 11524}
11430 11525
11431/* PCI driver stuff */ 11526/* PCI driver stuff */
@@ -11556,7 +11651,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11556 if (priv->prom_net_dev) 11651 if (priv->prom_net_dev)
11557 return -EPERM; 11652 return -EPERM;
11558 11653
11559 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv)); 11654 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
11560 if (priv->prom_net_dev == NULL) 11655 if (priv->prom_net_dev == NULL)
11561 return -ENOMEM; 11656 return -ENOMEM;
11562 11657
@@ -11575,7 +11670,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11575 11670
11576 rc = register_netdev(priv->prom_net_dev); 11671 rc = register_netdev(priv->prom_net_dev);
11577 if (rc) { 11672 if (rc) {
11578 free_ieee80211(priv->prom_net_dev); 11673 free_ieee80211(priv->prom_net_dev, 1);
11579 priv->prom_net_dev = NULL; 11674 priv->prom_net_dev = NULL;
11580 return rc; 11675 return rc;
11581 } 11676 }
@@ -11589,7 +11684,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
11589 return; 11684 return;
11590 11685
11591 unregister_netdev(priv->prom_net_dev); 11686 unregister_netdev(priv->prom_net_dev);
11592 free_ieee80211(priv->prom_net_dev); 11687 free_ieee80211(priv->prom_net_dev, 1);
11593 11688
11594 priv->prom_net_dev = NULL; 11689 priv->prom_net_dev = NULL;
11595} 11690}
@@ -11617,7 +11712,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11617 struct ipw_priv *priv; 11712 struct ipw_priv *priv;
11618 int i; 11713 int i;
11619 11714
11620 net_dev = alloc_ieee80211(sizeof(struct ipw_priv)); 11715 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
11621 if (net_dev == NULL) { 11716 if (net_dev == NULL) {
11622 err = -ENOMEM; 11717 err = -ENOMEM;
11623 goto out; 11718 goto out;
@@ -11765,7 +11860,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11765 pci_disable_device(pdev); 11860 pci_disable_device(pdev);
11766 pci_set_drvdata(pdev, NULL); 11861 pci_set_drvdata(pdev, NULL);
11767 out_free_ieee80211: 11862 out_free_ieee80211:
11768 free_ieee80211(priv->net_dev); 11863 free_ieee80211(priv->net_dev, 0);
11769 out: 11864 out:
11770 return err; 11865 return err;
11771} 11866}
@@ -11832,7 +11927,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11832 pci_release_regions(pdev); 11927 pci_release_regions(pdev);
11833 pci_disable_device(pdev); 11928 pci_disable_device(pdev);
11834 pci_set_drvdata(pdev, NULL); 11929 pci_set_drvdata(pdev, NULL);
11835 free_ieee80211(priv->net_dev); 11930 /* wiphy_unregister needs to be here, before free_ieee80211 */
11931 wiphy_unregister(priv->ieee->wdev.wiphy);
11932 kfree(priv->ieee->a_band.channels);
11933 kfree(priv->ieee->bg_band.channels);
11934 free_ieee80211(priv->net_dev, 0);
11836 free_firmware(); 11935 free_firmware();
11837} 11936}
11838 11937
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 1e334ff6bd52..bf45391172f3 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -31,6 +31,7 @@
31#include <linux/ieee80211.h> 31#include <linux/ieee80211.h>
32 32
33#include <net/lib80211.h> 33#include <net/lib80211.h>
34#include <net/cfg80211.h>
34 35
35#define LIBIPW_VERSION "git-1.1.13" 36#define LIBIPW_VERSION "git-1.1.13"
36 37
@@ -783,12 +784,15 @@ struct libipw_geo {
783 784
784struct libipw_device { 785struct libipw_device {
785 struct net_device *dev; 786 struct net_device *dev;
787 struct wireless_dev wdev;
786 struct libipw_security sec; 788 struct libipw_security sec;
787 789
788 /* Bookkeeping structures */ 790 /* Bookkeeping structures */
789 struct libipw_stats ieee_stats; 791 struct libipw_stats ieee_stats;
790 792
791 struct libipw_geo geo; 793 struct libipw_geo geo;
794 struct ieee80211_supported_band bg_band;
795 struct ieee80211_supported_band a_band;
792 796
793 /* Probe / Beacon management */ 797 /* Probe / Beacon management */
794 struct list_head network_free_list; 798 struct list_head network_free_list;
@@ -1014,8 +1018,8 @@ static inline int libipw_is_cck_rate(u8 rate)
1014} 1018}
1015 1019
1016/* ieee80211.c */ 1020/* ieee80211.c */
1017extern void free_ieee80211(struct net_device *dev); 1021extern void free_ieee80211(struct net_device *dev, int monitor);
1018extern struct net_device *alloc_ieee80211(int sizeof_priv); 1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
1019extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1020 1024
1021extern void libipw_networks_age(struct libipw_device *ieee, 1025extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index eb2b60834c17..e8a1ac5f8e11 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -62,6 +62,9 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
62MODULE_AUTHOR(DRV_COPYRIGHT); 62MODULE_AUTHOR(DRV_COPYRIGHT);
63MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
64 64
65struct cfg80211_ops libipw_config_ops = { };
66void *libipw_wiphy_privid = &libipw_wiphy_privid;
67
65static int libipw_networks_allocate(struct libipw_device *ieee) 68static int libipw_networks_allocate(struct libipw_device *ieee)
66{ 69{
67 if (ieee->networks) 70 if (ieee->networks)
@@ -140,7 +143,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
140} 143}
141EXPORT_SYMBOL(libipw_change_mtu); 144EXPORT_SYMBOL(libipw_change_mtu);
142 145
143struct net_device *alloc_ieee80211(int sizeof_priv) 146struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
144{ 147{
145 struct libipw_device *ieee; 148 struct libipw_device *ieee;
146 struct net_device *dev; 149 struct net_device *dev;
@@ -157,10 +160,31 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
157 160
158 ieee->dev = dev; 161 ieee->dev = dev;
159 162
163 if (!monitor) {
164 ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
165 if (!ieee->wdev.wiphy) {
166 LIBIPW_ERROR("Unable to allocate wiphy.\n");
167 goto failed_free_netdev;
168 }
169
170 ieee->dev->ieee80211_ptr = &ieee->wdev;
171 ieee->wdev.iftype = NL80211_IFTYPE_STATION;
172
173 /* Fill-out wiphy structure bits we know... Not enough info
174 here to call set_wiphy_dev or set MAC address or channel info
175 -- have to do that in ->ndo_init... */
176 ieee->wdev.wiphy->privid = libipw_wiphy_privid;
177
178 ieee->wdev.wiphy->max_scan_ssids = 1;
179 ieee->wdev.wiphy->max_scan_ie_len = 0;
180 ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
181 | BIT(NL80211_IFTYPE_ADHOC);
182 }
183
160 err = libipw_networks_allocate(ieee); 184 err = libipw_networks_allocate(ieee);
161 if (err) { 185 if (err) {
162 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err); 186 LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
163 goto failed_free_netdev; 187 goto failed_free_wiphy;
164 } 188 }
165 libipw_networks_initialize(ieee); 189 libipw_networks_initialize(ieee);
166 190
@@ -193,19 +217,27 @@ struct net_device *alloc_ieee80211(int sizeof_priv)
193 217
194 return dev; 218 return dev;
195 219
220failed_free_wiphy:
221 if (!monitor)
222 wiphy_free(ieee->wdev.wiphy);
196failed_free_netdev: 223failed_free_netdev:
197 free_netdev(dev); 224 free_netdev(dev);
198failed: 225failed:
199 return NULL; 226 return NULL;
200} 227}
201 228
202void free_ieee80211(struct net_device *dev) 229void free_ieee80211(struct net_device *dev, int monitor)
203{ 230{
204 struct libipw_device *ieee = netdev_priv(dev); 231 struct libipw_device *ieee = netdev_priv(dev);
205 232
206 lib80211_crypt_info_free(&ieee->crypt_info); 233 lib80211_crypt_info_free(&ieee->crypt_info);
207 234
208 libipw_networks_free(ieee); 235 libipw_networks_free(ieee);
236
237 /* free cfg80211 resources */
238 if (!monitor)
239 wiphy_free(ieee->wdev.wiphy);
240
209 free_netdev(dev); 241 free_netdev(dev);
210} 242}
211 243
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 99310c033253..b16b06c2031f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,17 +1,7 @@
1config IWLWIFI 1config IWLWIFI
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless Wifi"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on PCI && MAC80211 && EXPERIMENTAL
4 select LIB80211
5 select FW_LOADER 4 select FW_LOADER
6 select MAC80211_LEDS if IWLWIFI_LEDS
7 select LEDS_CLASS if IWLWIFI_LEDS
8
9config IWLWIFI_LEDS
10 bool "Enable LED support in iwlagn and iwl3945 drivers"
11 depends on IWLWIFI
12 default y
13 ---help---
14 Select this if you want LED support.
15 5
16config IWLWIFI_SPECTRUM_MEASUREMENT 6config IWLWIFI_SPECTRUM_MEASUREMENT
17 bool "Enable Spectrum Measurement in iwlagn driver" 7 bool "Enable Spectrum Measurement in iwlagn driver"
@@ -50,6 +40,24 @@ config IWLWIFI_DEBUGFS
50 ---help--- 40 ---help---
51 Enable creation of debugfs files for the iwlwifi drivers. 41 Enable creation of debugfs files for the iwlwifi drivers.
52 42
43config IWLWIFI_DEVICE_TRACING
44 bool "iwlwifi device access tracing"
45 depends on IWLWIFI
46 depends on EVENT_TRACING
47 help
48 Say Y here to trace all commands, including TX frames and IO
49 accesses, sent to the device. If you say yes, iwlwifi will
50 register with the ftrace framework for event tracing and dump
51 all this information to the ringbuffer, you may need to
52 increase the ringbuffer size. See the ftrace documentation
53 for more information.
54
55 When tracing is not enabled, this option still has some
56 (though rather small) overhead.
57
58 If unsure, say Y so we can help you better when problems
59 occur.
60
53config IWLAGN 61config IWLAGN
54 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)" 62 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
55 depends on IWLWIFI 63 depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1d4e0a226fd4..7f82044af242 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,20 +1,22 @@
1obj-$(CONFIG_IWLWIFI) += iwlcore.o 1obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o 3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
4iwlcore-objs += iwl-scan.o 4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
7iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o 6iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
7iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
8 8
9CFLAGS_iwl-devtrace.o := -I$(src)
10
11# AGN
9obj-$(CONFIG_IWLAGN) += iwlagn.o 12obj-$(CONFIG_IWLAGN) += iwlagn.o
10iwlagn-objs := iwl-agn.o iwl-agn-rs.o 13iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
11 14
12iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 15iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
13iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 16iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
14iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 17iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
15iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 18iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
16 19
20# 3945
17obj-$(CONFIG_IWL3945) += iwl3945.o 21obj-$(CONFIG_IWL3945) += iwl3945.o
18iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
19
20
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 950267ab556a..8414178bcff4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -44,6 +44,7 @@
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 46#include "iwl-5000-hw.h"
47#include "iwl-agn-led.h"
47 48
48/* Highest firmware API version supported */ 49/* Highest firmware API version supported */
49#define IWL1000_UCODE_API_MAX 3 50#define IWL1000_UCODE_API_MAX 3
@@ -76,7 +77,10 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
76/* NIC configuration for 1000 series */ 77/* NIC configuration for 1000 series */
77static void iwl1000_nic_config(struct iwl_priv *priv) 78static void iwl1000_nic_config(struct iwl_priv *priv)
78{ 79{
79 iwl5000_nic_config(priv); 80 /* set CSR_HW_CONFIG_REG for uCode use */
81 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
82 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
83 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
80 84
81 /* Setting digital SVR for 1000 card to 1.32V */ 85 /* Setting digital SVR for 1000 card to 1.32V */
82 /* locking is acquired in iwl_set_bits_mask_prph() function */ 86 /* locking is acquired in iwl_set_bits_mask_prph() function */
@@ -106,9 +110,8 @@ static struct iwl_lib_ops iwl1000_lib = {
106 .send_tx_power = iwl5000_send_tx_power, 110 .send_tx_power = iwl5000_send_tx_power,
107 .update_chain_flags = iwl_update_chain_flags, 111 .update_chain_flags = iwl_update_chain_flags,
108 .apm_ops = { 112 .apm_ops = {
109 .init = iwl5000_apm_init, 113 .init = iwl_apm_init,
110 .reset = iwl5000_apm_reset, 114 .stop = iwl_apm_stop,
111 .stop = iwl5000_apm_stop,
112 .config = iwl1000_nic_config, 115 .config = iwl1000_nic_config,
113 .set_pwr_src = iwl_set_pwr_src, 116 .set_pwr_src = iwl_set_pwr_src,
114 }, 117 },
@@ -142,6 +145,7 @@ static struct iwl_ops iwl1000_ops = {
142 .lib = &iwl1000_lib, 145 .lib = &iwl1000_lib,
143 .hcmd = &iwl5000_hcmd, 146 .hcmd = &iwl5000_hcmd,
144 .utils = &iwl5000_hcmd_utils, 147 .utils = &iwl5000_hcmd_utils,
148 .led = &iwlagn_led_ops,
145}; 149};
146 150
147struct iwl_cfg iwl1000_bgn_cfg = { 151struct iwl_cfg iwl1000_bgn_cfg = {
@@ -152,15 +156,50 @@ struct iwl_cfg iwl1000_bgn_cfg = {
152 .sku = IWL_SKU_G|IWL_SKU_N, 156 .sku = IWL_SKU_G|IWL_SKU_N,
153 .ops = &iwl1000_ops, 157 .ops = &iwl1000_ops,
154 .eeprom_size = OTP_LOW_IMAGE_SIZE, 158 .eeprom_size = OTP_LOW_IMAGE_SIZE,
155 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 159 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
156 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 160 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
161 .num_of_queues = IWL50_NUM_QUEUES,
162 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
157 .mod_params = &iwl50_mod_params, 163 .mod_params = &iwl50_mod_params,
158 .valid_tx_ant = ANT_A, 164 .valid_tx_ant = ANT_A,
159 .valid_rx_ant = ANT_AB, 165 .valid_rx_ant = ANT_AB,
160 .need_pll_cfg = true, 166 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
167 .set_l0s = true,
168 .use_bsm = false,
161 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 169 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
162 .shadow_ram_support = false, 170 .shadow_ram_support = false,
163 .ht_greenfield_support = true, 171 .ht_greenfield_support = true,
172 .led_compensation = 51,
164 .use_rts_for_ht = true, /* use rts/cts protection */ 173 .use_rts_for_ht = true, /* use rts/cts protection */
174 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
175 .support_ct_kill_exit = true,
176 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
165}; 177};
166 178
179struct iwl_cfg iwl1000_bg_cfg = {
180 .name = "1000 Series BG",
181 .fw_name_pre = IWL1000_FW_PRE,
182 .ucode_api_max = IWL1000_UCODE_API_MAX,
183 .ucode_api_min = IWL1000_UCODE_API_MIN,
184 .sku = IWL_SKU_G,
185 .ops = &iwl1000_ops,
186 .eeprom_size = OTP_LOW_IMAGE_SIZE,
187 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
188 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
189 .num_of_queues = IWL50_NUM_QUEUES,
190 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
191 .mod_params = &iwl50_mod_params,
192 .valid_tx_ant = ANT_A,
193 .valid_rx_ant = ANT_AB,
194 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
195 .set_l0s = true,
196 .use_bsm = false,
197 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
198 .shadow_ram_support = false,
199 .ht_greenfield_support = true,
200 .led_compensation = 51,
201 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
202 .support_ct_kill_exit = true,
203};
204
205MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 16772780c5b0..6fd10d443ba3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,12 +71,6 @@
71 71
72#include "iwl-eeprom.h" 72#include "iwl-eeprom.h"
73 73
74/*
75 * uCode queue management definitions ...
76 * Queue #4 is the command queue for 3945 and 4965.
77 */
78#define IWL_CMD_QUEUE_NUM 4
79
80/* Time constants */ 74/* Time constants */
81#define SHORT_SLOT_TIME 9 75#define SHORT_SLOT_TIME 9
82#define LONG_SLOT_TIME 20 76#define LONG_SLOT_TIME 20
@@ -254,12 +248,6 @@ struct iwl3945_eeprom {
254#define TFD_CTL_PAD_SET(n) (n << 28) 248#define TFD_CTL_PAD_SET(n) (n << 28)
255#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) 249#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
256 250
257/*
258 * RX related structures and functions
259 */
260#define RX_FREE_BUFFERS 64
261#define RX_LOW_WATERMARK 8
262
263/* Sizes and addresses for instruction and data memory (SRAM) in 251/* Sizes and addresses for instruction and data memory (SRAM) in
264 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 252 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
265#define IWL39_RTC_INST_LOWER_BOUND (0x000000) 253#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 8c29ded7d02c..a871d09d598f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -24,8 +24,6 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#ifdef CONFIG_IWLWIFI_LEDS
28
29#include <linux/kernel.h> 27#include <linux/kernel.h>
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/init.h> 29#include <linux/init.h>
@@ -43,388 +41,51 @@
43#include "iwl-3945.h" 41#include "iwl-3945.h"
44#include "iwl-core.h" 42#include "iwl-core.h"
45#include "iwl-dev.h" 43#include "iwl-dev.h"
44#include "iwl-3945-led.h"
46 45
47#ifdef CONFIG_IWLWIFI_DEBUG
48static const char *led_type_str[] = {
49 __stringify(IWL_LED_TRG_TX),
50 __stringify(IWL_LED_TRG_RX),
51 __stringify(IWL_LED_TRG_ASSOC),
52 __stringify(IWL_LED_TRG_RADIO),
53 NULL
54};
55#endif /* CONFIG_IWLWIFI_DEBUG */
56
57static const struct {
58 u16 brightness;
59 u8 on_time;
60 u8 off_time;
61} blink_tbl[] =
62{
63 {300, 25, 25},
64 {200, 40, 40},
65 {100, 55, 55},
66 {70, 65, 65},
67 {50, 75, 75},
68 {20, 85, 85},
69 {15, 95, 95 },
70 {10, 110, 110},
71 {5, 130, 130},
72 {0, 167, 167},
73 /* SOLID_ON */
74 {-1, IWL_LED_SOLID, 0}
75};
76
77#define IWL_1MB_RATE (128 * 1024)
78#define IWL_LED_THRESHOLD (16)
79#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /*Exclude Solid on*/
80#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
81
82static void iwl3945_led_cmd_callback(struct iwl_priv *priv,
83 struct iwl_device_cmd *cmd,
84 struct sk_buff *skb)
85{
86}
87
88static inline int iwl3945_brightness_to_idx(enum led_brightness brightness)
89{
90 return fls(0x000000FF & (u32)brightness);
91}
92 46
93/* Send led command */ 47/* Send led command */
94static int iwl_send_led_cmd(struct iwl_priv *priv, 48static int iwl3945_send_led_cmd(struct iwl_priv *priv,
95 struct iwl_led_cmd *led_cmd) 49 struct iwl_led_cmd *led_cmd)
96{ 50{
97 struct iwl_host_cmd cmd = { 51 struct iwl_host_cmd cmd = {
98 .id = REPLY_LEDS_CMD, 52 .id = REPLY_LEDS_CMD,
99 .len = sizeof(struct iwl_led_cmd), 53 .len = sizeof(struct iwl_led_cmd),
100 .data = led_cmd, 54 .data = led_cmd,
101 .flags = CMD_ASYNC, 55 .flags = CMD_ASYNC,
102 .callback = iwl3945_led_cmd_callback, 56 .callback = NULL,
103 }; 57 };
104 58
105 return iwl_send_cmd(priv, &cmd); 59 return iwl_send_cmd(priv, &cmd);
106} 60}
107 61
108
109
110/* Set led on command */
111static int iwl3945_led_pattern(struct iwl_priv *priv, int led_id,
112 unsigned int idx)
113{
114 struct iwl_led_cmd led_cmd = {
115 .id = led_id,
116 .interval = IWL_DEF_LED_INTRVL
117 };
118
119 BUG_ON(idx > IWL_MAX_BLINK_TBL);
120
121 led_cmd.on = blink_tbl[idx].on_time;
122 led_cmd.off = blink_tbl[idx].off_time;
123
124 return iwl_send_led_cmd(priv, &led_cmd);
125}
126
127
128/* Set led on command */ 62/* Set led on command */
129static int iwl3945_led_on(struct iwl_priv *priv, int led_id) 63static int iwl3945_led_on(struct iwl_priv *priv)
130{ 64{
131 struct iwl_led_cmd led_cmd = { 65 struct iwl_led_cmd led_cmd = {
132 .id = led_id, 66 .id = IWL_LED_LINK,
133 .on = IWL_LED_SOLID, 67 .on = IWL_LED_SOLID,
134 .off = 0, 68 .off = 0,
135 .interval = IWL_DEF_LED_INTRVL 69 .interval = IWL_DEF_LED_INTRVL
136 }; 70 };
137 return iwl_send_led_cmd(priv, &led_cmd); 71 return iwl3945_send_led_cmd(priv, &led_cmd);
138} 72}
139 73
140/* Set led off command */ 74/* Set led off command */
141static int iwl3945_led_off(struct iwl_priv *priv, int led_id) 75static int iwl3945_led_off(struct iwl_priv *priv)
142{ 76{
143 struct iwl_led_cmd led_cmd = { 77 struct iwl_led_cmd led_cmd = {
144 .id = led_id, 78 .id = IWL_LED_LINK,
145 .on = 0, 79 .on = 0,
146 .off = 0, 80 .off = 0,
147 .interval = IWL_DEF_LED_INTRVL 81 .interval = IWL_DEF_LED_INTRVL
148 }; 82 };
149 IWL_DEBUG_LED(priv, "led off %d\n", led_id); 83 IWL_DEBUG_LED(priv, "led off\n");
150 return iwl_send_led_cmd(priv, &led_cmd); 84 return iwl3945_send_led_cmd(priv, &led_cmd);
151} 85}
152 86
153/* 87const struct iwl_led_ops iwl3945_led_ops = {
154 * Set led on in case of association 88 .cmd = iwl3945_send_led_cmd,
155 * */ 89 .on = iwl3945_led_on,
156static int iwl3945_led_associate(struct iwl_priv *priv, int led_id) 90 .off = iwl3945_led_off,
157{ 91};
158 IWL_DEBUG_LED(priv, "Associated\n");
159
160 priv->allow_blinking = 1;
161 return iwl3945_led_on(priv, led_id);
162}
163/* Set Led off in case of disassociation */
164static int iwl3945_led_disassociate(struct iwl_priv *priv, int led_id)
165{
166 IWL_DEBUG_LED(priv, "Disassociated\n");
167
168 priv->allow_blinking = 0;
169
170 return 0;
171}
172
173/*
174 * brightness call back function for Tx/Rx LED
175 */
176static int iwl3945_led_associated(struct iwl_priv *priv, int led_id)
177{
178 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
179 !test_bit(STATUS_READY, &priv->status))
180 return 0;
181
182
183 /* start counting Tx/Rx bytes */
184 if (!priv->last_blink_time && priv->allow_blinking)
185 priv->last_blink_time = jiffies;
186 return 0;
187}
188
189/*
190 * brightness call back for association and radio
191 */
192static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
193 enum led_brightness brightness)
194{
195 struct iwl_led *led = container_of(led_cdev,
196 struct iwl_led, led_dev);
197 struct iwl_priv *priv = led->priv;
198
199 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
200 return;
201
202 IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
203 led_type_str[led->type], brightness);
204
205 switch (brightness) {
206 case LED_FULL:
207 if (led->led_on)
208 led->led_on(priv, IWL_LED_LINK);
209 break;
210 case LED_OFF:
211 if (led->led_off)
212 led->led_off(priv, IWL_LED_LINK);
213 break;
214 default:
215 if (led->led_pattern) {
216 int idx = iwl3945_brightness_to_idx(brightness);
217 led->led_pattern(priv, IWL_LED_LINK, idx);
218 }
219 break;
220 }
221}
222
223/*
224 * Register led class with the system
225 */
226static int iwl3945_led_register_led(struct iwl_priv *priv,
227 struct iwl_led *led,
228 enum led_type type, u8 set_led,
229 char *trigger)
230{
231 struct device *device = wiphy_dev(priv->hw->wiphy);
232 int ret;
233
234 led->led_dev.name = led->name;
235 led->led_dev.brightness_set = iwl3945_led_brightness_set;
236 led->led_dev.default_trigger = trigger;
237
238 led->priv = priv;
239 led->type = type;
240
241 ret = led_classdev_register(device, &led->led_dev);
242 if (ret) {
243 IWL_ERR(priv, "Error: failed to register led handler.\n");
244 return ret;
245 }
246
247 led->registered = 1;
248
249 if (set_led && led->led_on)
250 led->led_on(priv, IWL_LED_LINK);
251 return 0;
252}
253
254
255/*
256 * calculate blink rate according to last 2 sec Tx/Rx activities
257 */
258static inline u8 get_blink_rate(struct iwl_priv *priv)
259{
260 int index;
261 s64 tpt = priv->rxtxpackets;
262
263 if (tpt < 0)
264 tpt = -tpt;
265
266 IWL_DEBUG_LED(priv, "tpt %lld \n", (long long)tpt);
267
268 if (!priv->allow_blinking)
269 index = IWL_MAX_BLINK_TBL;
270 else
271 for (index = 0; index < IWL_MAX_BLINK_TBL; index++)
272 if (tpt > (blink_tbl[index].brightness * IWL_1MB_RATE))
273 break;
274
275 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", index);
276 return index;
277}
278
279/*
280 * this function called from handler. Since setting Led command can
281 * happen very frequent we postpone led command to be called from
282 * REPLY handler so we know ucode is up
283 */
284void iwl3945_led_background(struct iwl_priv *priv)
285{
286 u8 blink_idx;
287
288 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
289 priv->last_blink_time = 0;
290 return;
291 }
292 if (iwl_is_rfkill(priv)) {
293 priv->last_blink_time = 0;
294 return;
295 }
296
297 if (!priv->allow_blinking) {
298 priv->last_blink_time = 0;
299 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
300 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
301 iwl3945_led_pattern(priv, IWL_LED_LINK,
302 IWL_SOLID_BLINK_IDX);
303 }
304 return;
305 }
306 if (!priv->last_blink_time ||
307 !time_after(jiffies, priv->last_blink_time +
308 msecs_to_jiffies(1000)))
309 return;
310
311 blink_idx = get_blink_rate(priv);
312
313 /* call only if blink rate change */
314 if (blink_idx != priv->last_blink_rate)
315 iwl3945_led_pattern(priv, IWL_LED_LINK, blink_idx);
316
317 priv->last_blink_time = jiffies;
318 priv->last_blink_rate = blink_idx;
319 priv->rxtxpackets = 0;
320}
321
322
323/* Register all led handler */
324int iwl3945_led_register(struct iwl_priv *priv)
325{
326 char *trigger;
327 int ret;
328
329 priv->last_blink_rate = 0;
330 priv->rxtxpackets = 0;
331 priv->led_tpt = 0;
332 priv->last_blink_time = 0;
333 priv->allow_blinking = 0;
334
335 trigger = ieee80211_get_radio_led_name(priv->hw);
336 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
337 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
338 wiphy_name(priv->hw->wiphy));
339
340 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
341 priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off;
342 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
343
344 ret = iwl3945_led_register_led(priv,
345 &priv->led[IWL_LED_TRG_RADIO],
346 IWL_LED_TRG_RADIO, 1, trigger);
347
348 if (ret)
349 goto exit_fail;
350
351 trigger = ieee80211_get_assoc_led_name(priv->hw);
352 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
353 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
354 wiphy_name(priv->hw->wiphy));
355
356 ret = iwl3945_led_register_led(priv,
357 &priv->led[IWL_LED_TRG_ASSOC],
358 IWL_LED_TRG_ASSOC, 0, trigger);
359
360 /* for assoc always turn led on */
361 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_associate;
362 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_disassociate;
363 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
364
365 if (ret)
366 goto exit_fail;
367
368 trigger = ieee80211_get_rx_led_name(priv->hw);
369 snprintf(priv->led[IWL_LED_TRG_RX].name,
370 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
371 wiphy_name(priv->hw->wiphy));
372
373 ret = iwl3945_led_register_led(priv,
374 &priv->led[IWL_LED_TRG_RX],
375 IWL_LED_TRG_RX, 0, trigger);
376
377 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
378 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
379 priv->led[IWL_LED_TRG_RX].led_pattern = iwl3945_led_pattern;
380
381 if (ret)
382 goto exit_fail;
383
384 trigger = ieee80211_get_tx_led_name(priv->hw);
385 snprintf(priv->led[IWL_LED_TRG_TX].name,
386 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
387 wiphy_name(priv->hw->wiphy));
388
389 ret = iwl3945_led_register_led(priv,
390 &priv->led[IWL_LED_TRG_TX],
391 IWL_LED_TRG_TX, 0, trigger);
392
393 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
394 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
395 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
396
397 if (ret)
398 goto exit_fail;
399
400 return 0;
401
402exit_fail:
403 iwl3945_led_unregister(priv);
404 return ret;
405}
406
407
408/* unregister led class */
409static void iwl3945_led_unregister_led(struct iwl_led *led, u8 set_led)
410{
411 if (!led->registered)
412 return;
413
414 led_classdev_unregister(&led->led_dev);
415
416 if (set_led)
417 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
418 led->registered = 0;
419}
420
421/* Unregister all led handlers */
422void iwl3945_led_unregister(struct iwl_priv *priv)
423{
424 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
425 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
426 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
427 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
428}
429
430#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 3b65642258ca..5a1033ca7aaa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -24,23 +24,9 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#ifndef IWL3945_LEDS_H 27#ifndef __iwl_3945_led_h__
28#define IWL3945_LEDS_H 28#define __iwl_3945_led_h__
29 29
30struct iwl_priv; 30extern const struct iwl_led_ops iwl3945_led_ops;
31 31
32#ifdef CONFIG_IWLWIFI_LEDS 32#endif /* __iwl_3945_led_h__ */
33
34#include "iwl-led.h"
35
36extern int iwl3945_led_register(struct iwl_priv *priv);
37extern void iwl3945_led_unregister(struct iwl_priv *priv);
38extern void iwl3945_led_background(struct iwl_priv *priv);
39
40#else
41static inline int iwl3945_led_register(struct iwl_priv *priv) { return 0; }
42static inline void iwl3945_led_unregister(struct iwl_priv *priv) {}
43static inline void iwl3945_led_background(struct iwl_priv *priv) {}
44
45#endif /* IWLWIFI_LEDS*/
46#endif /* IWL3945_LEDS_H */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index cbb0585083a9..d4b49883b30e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -42,38 +42,6 @@
42 42
43#define RS_NAME "iwl-3945-rs" 43#define RS_NAME "iwl-3945-rs"
44 44
45struct iwl3945_rate_scale_data {
46 u64 data;
47 s32 success_counter;
48 s32 success_ratio;
49 s32 counter;
50 s32 average_tpt;
51 unsigned long stamp;
52};
53
54struct iwl3945_rs_sta {
55 spinlock_t lock;
56 struct iwl_priv *priv;
57 s32 *expected_tpt;
58 unsigned long last_partial_flush;
59 unsigned long last_flush;
60 u32 flush_time;
61 u32 last_tx_packets;
62 u32 tx_packets;
63 u8 tgg;
64 u8 flush_pending;
65 u8 start_rate;
66 u8 ibss_sta_added;
67 struct timer_list rate_scale_flush;
68 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
69#ifdef CONFIG_MAC80211_DEBUGFS
70 struct dentry *rs_sta_dbgfs_stats_table_file;
71#endif
72
73 /* used to be in sta_info */
74 int last_txrate_idx;
75};
76
77static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = { 45static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
78 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 46 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
79}; 47};
@@ -370,6 +338,28 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
370 338
371 IWL_DEBUG_RATE(priv, "enter\n"); 339 IWL_DEBUG_RATE(priv, "enter\n");
372 340
341 spin_lock_init(&rs_sta->lock);
342
343 rs_sta->priv = priv;
344
345 rs_sta->start_rate = IWL_RATE_INVALID;
346
347 /* default to just 802.11b */
348 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
349
350 rs_sta->last_partial_flush = jiffies;
351 rs_sta->last_flush = jiffies;
352 rs_sta->flush_time = IWL_RATE_FLUSH;
353 rs_sta->last_tx_packets = 0;
354 rs_sta->ibss_sta_added = 0;
355
356 init_timer(&rs_sta->rate_scale_flush);
357 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
358 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
359
360 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
361 iwl3945_clear_window(&rs_sta->win[i]);
362
373 /* TODO: what is a good starting rate for STA? About middle? Maybe not 363 /* TODO: what is a good starting rate for STA? About middle? Maybe not
374 * the lowest or the highest rate.. Could consider using RSSI from 364 * the lowest or the highest rate.. Could consider using RSSI from
375 * previous packets? Need to have IEEE 802.1X auth succeed immediately 365 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -409,45 +399,11 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{ 399{
410 struct iwl3945_rs_sta *rs_sta; 400 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 401 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
412 struct iwl_priv *priv = iwl_priv; 402 struct iwl_priv *priv __maybe_unused = iwl_priv;
413 int i;
414
415 /*
416 * XXX: If it's using sta->drv_priv anyway, it might
417 * as well just put all the information there.
418 */
419 403
420 IWL_DEBUG_RATE(priv, "enter\n"); 404 IWL_DEBUG_RATE(priv, "enter\n");
421 405
422 rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp); 406 rs_sta = &psta->rs_sta;
423 if (!rs_sta) {
424 IWL_DEBUG_RATE(priv, "leave: ENOMEM\n");
425 return NULL;
426 }
427
428 psta->rs_sta = rs_sta;
429
430 spin_lock_init(&rs_sta->lock);
431
432 rs_sta->priv = priv;
433
434 rs_sta->start_rate = IWL_RATE_INVALID;
435
436 /* default to just 802.11b */
437 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
438
439 rs_sta->last_partial_flush = jiffies;
440 rs_sta->last_flush = jiffies;
441 rs_sta->flush_time = IWL_RATE_FLUSH;
442 rs_sta->last_tx_packets = 0;
443 rs_sta->ibss_sta_added = 0;
444
445 init_timer(&rs_sta->rate_scale_flush);
446 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
447 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush;
448
449 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
450 iwl3945_clear_window(&rs_sta->win[i]);
451 407
452 IWL_DEBUG_RATE(priv, "leave\n"); 408 IWL_DEBUG_RATE(priv, "leave\n");
453 409
@@ -458,14 +414,11 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
458 void *priv_sta) 414 void *priv_sta)
459{ 415{
460 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 416 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
461 struct iwl3945_rs_sta *rs_sta = priv_sta; 417 struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
462 struct iwl_priv *priv __maybe_unused = rs_sta->priv; 418 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
463 419
464 psta->rs_sta = NULL;
465
466 IWL_DEBUG_RATE(priv, "enter\n"); 420 IWL_DEBUG_RATE(priv, "enter\n");
467 del_timer_sync(&rs_sta->rate_scale_flush); 421 del_timer_sync(&rs_sta->rate_scale_flush);
468 kfree(rs_sta);
469 IWL_DEBUG_RATE(priv, "leave\n"); 422 IWL_DEBUG_RATE(priv, "leave\n");
470} 423}
471 424
@@ -960,14 +913,15 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
960 913
961 rcu_read_lock(); 914 rcu_read_lock();
962 915
963 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr); 916 sta = ieee80211_find_sta(priv->vif,
917 priv->stations[sta_id].sta.sta.addr);
964 if (!sta) { 918 if (!sta) {
965 rcu_read_unlock(); 919 rcu_read_unlock();
966 return; 920 return;
967 } 921 }
968 922
969 psta = (void *) sta->drv_priv; 923 psta = (void *) sta->drv_priv;
970 rs_sta = psta->rs_sta; 924 rs_sta = &psta->rs_sta;
971 925
972 spin_lock_irqsave(&rs_sta->lock, flags); 926 spin_lock_irqsave(&rs_sta->lock, flags);
973 927
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index f059b49dc691..7da1dab933d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -47,7 +47,8 @@
47#include "iwl-eeprom.h" 47#include "iwl-eeprom.h"
48#include "iwl-helpers.h" 48#include "iwl-helpers.h"
49#include "iwl-core.h" 49#include "iwl-core.h"
50#include "iwl-agn-rs.h" 50#include "iwl-led.h"
51#include "iwl-3945-led.h"
51 52
52#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 53#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
53 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 54 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
293static void iwl3945_rx_reply_tx(struct iwl_priv *priv, 294static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
294 struct iwl_rx_mem_buffer *rxb) 295 struct iwl_rx_mem_buffer *rxb)
295{ 296{
296 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 297 struct iwl_rx_packet *pkt = rxb_addr(rxb);
297 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 298 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
298 int txq_id = SEQ_TO_QUEUE(sequence); 299 int txq_id = SEQ_TO_QUEUE(sequence);
299 int index = SEQ_TO_INDEX(sequence); 300 int index = SEQ_TO_INDEX(sequence);
@@ -353,16 +354,12 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
353void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 354void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
354 struct iwl_rx_mem_buffer *rxb) 355 struct iwl_rx_mem_buffer *rxb)
355{ 356{
356 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 357 struct iwl_rx_packet *pkt = rxb_addr(rxb);
357 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 358 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
358 (int)sizeof(struct iwl3945_notif_statistics), 359 (int)sizeof(struct iwl3945_notif_statistics),
359 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 360 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
360 361
361 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39)); 362 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
362
363 iwl3945_led_background(priv);
364
365 priv->last_statistics_time = jiffies;
366} 363}
367 364
368/****************************************************************************** 365/******************************************************************************
@@ -545,14 +542,18 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
545 struct iwl_rx_mem_buffer *rxb, 542 struct iwl_rx_mem_buffer *rxb,
546 struct ieee80211_rx_status *stats) 543 struct ieee80211_rx_status *stats)
547{ 544{
548 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 545 struct iwl_rx_packet *pkt = rxb_addr(rxb);
549 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 546 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
550 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 547 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
551 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 548 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
552 short len = le16_to_cpu(rx_hdr->len); 549 u16 len = le16_to_cpu(rx_hdr->len);
550 struct sk_buff *skb;
551 int ret;
552 __le16 fc = hdr->frame_control;
553 553
554 /* We received data from the HW, so stop the watchdog */ 554 /* We received data from the HW, so stop the watchdog */
555 if (unlikely((len + IWL39_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 555 if (unlikely(len + IWL39_RX_FRAME_SIZE >
556 PAGE_SIZE << priv->hw_params.rx_page_order)) {
556 IWL_DEBUG_DROP(priv, "Corruption detected!\n"); 557 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
557 return; 558 return;
558 } 559 }
@@ -564,24 +565,50 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
564 return; 565 return;
565 } 566 }
566 567
567 skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt); 568 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
568 /* Set the size of the skb to the size of the frame */ 569 if (!skb) {
569 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); 570 IWL_ERR(priv, "alloc_skb failed\n");
571 return;
572 }
570 573
571 if (!iwl3945_mod_params.sw_crypto) 574 if (!iwl3945_mod_params.sw_crypto)
572 iwl_set_decrypted_flag(priv, 575 iwl_set_decrypted_flag(priv,
573 (struct ieee80211_hdr *)rxb->skb->data, 576 (struct ieee80211_hdr *)rxb_addr(rxb),
574 le32_to_cpu(rx_end->status), stats); 577 le32_to_cpu(rx_end->status), stats);
575 578
576#ifdef CONFIG_IWLWIFI_LEDS 579 skb_reserve(skb, IWL_LINK_HDR_MAX);
577 if (ieee80211_is_data(hdr->frame_control)) 580 skb_add_rx_frag(skb, 0, rxb->page,
578 priv->rxtxpackets += len; 581 (void *)rx_hdr->payload - (void *)pkt, len);
579#endif 582
580 iwl_update_stats(priv, false, hdr->frame_control, len); 583 /* mac80211 currently doesn't support paged SKB. Convert it to
584 * linear SKB for management frame and data frame requires
585 * software decryption or software defragementation. */
586 if (ieee80211_is_mgmt(fc) ||
587 ieee80211_has_protected(fc) ||
588 ieee80211_has_morefrags(fc) ||
589 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
590 ret = skb_linearize(skb);
591 else
592 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
593 0 : -ENOMEM;
594
595 if (ret) {
596 kfree_skb(skb);
597 goto out;
598 }
581 599
582 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 600 /*
583 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 601 * XXX: We cannot touch the page and its virtual memory (pkt) after
584 rxb->skb = NULL; 602 * here. It might have already been freed by the above skb change.
603 */
604
605 iwl_update_stats(priv, false, fc, len);
606 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
607
608 ieee80211_rx(priv->hw, skb);
609 out:
610 priv->alloc_rxb_page--;
611 rxb->page = NULL;
585} 612}
586 613
587#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 614#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
@@ -591,7 +618,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
591{ 618{
592 struct ieee80211_hdr *header; 619 struct ieee80211_hdr *header;
593 struct ieee80211_rx_status rx_status; 620 struct ieee80211_rx_status rx_status;
594 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 621 struct iwl_rx_packet *pkt = rxb_addr(rxb);
595 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 622 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
596 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 623 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
597 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 624 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
@@ -791,29 +818,31 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
791 u8 data_retry_limit; 818 u8 data_retry_limit;
792 __le32 tx_flags; 819 __le32 tx_flags;
793 __le16 fc = hdr->frame_control; 820 __le16 fc = hdr->frame_control;
794 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 821 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
795 822
796 rate = iwl3945_rates[rate_index].plcp; 823 rate = iwl3945_rates[rate_index].plcp;
797 tx_flags = tx->tx_flags; 824 tx_flags = tx_cmd->tx_flags;
798 825
799 /* We need to figure out how to get the sta->supp_rates while 826 /* We need to figure out how to get the sta->supp_rates while
800 * in this running context */ 827 * in this running context */
801 rate_mask = IWL_RATES_MASK; 828 rate_mask = IWL_RATES_MASK;
802 829
830
831 /* Set retry limit on DATA packets and Probe Responses*/
832 if (ieee80211_is_probe_resp(fc))
833 data_retry_limit = 3;
834 else
835 data_retry_limit = IWL_DEFAULT_TX_RETRY;
836 tx_cmd->data_retry_limit = data_retry_limit;
837
803 if (tx_id >= IWL_CMD_QUEUE_NUM) 838 if (tx_id >= IWL_CMD_QUEUE_NUM)
804 rts_retry_limit = 3; 839 rts_retry_limit = 3;
805 else 840 else
806 rts_retry_limit = 7; 841 rts_retry_limit = 7;
807 842
808 if (ieee80211_is_probe_resp(fc)) { 843 if (data_retry_limit < rts_retry_limit)
809 data_retry_limit = 3; 844 rts_retry_limit = data_retry_limit;
810 if (data_retry_limit < rts_retry_limit) 845 tx_cmd->rts_retry_limit = rts_retry_limit;
811 rts_retry_limit = data_retry_limit;
812 } else
813 data_retry_limit = IWL_DEFAULT_TX_RETRY;
814
815 if (priv->data_retry_limit != -1)
816 data_retry_limit = priv->data_retry_limit;
817 846
818 if (ieee80211_is_mgmt(fc)) { 847 if (ieee80211_is_mgmt(fc)) {
819 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 848 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
@@ -831,22 +860,20 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
831 } 860 }
832 } 861 }
833 862
834 tx->rts_retry_limit = rts_retry_limit; 863 tx_cmd->rate = rate;
835 tx->data_retry_limit = data_retry_limit; 864 tx_cmd->tx_flags = tx_flags;
836 tx->rate = rate;
837 tx->tx_flags = tx_flags;
838 865
839 /* OFDM */ 866 /* OFDM */
840 tx->supp_rates[0] = 867 tx_cmd->supp_rates[0] =
841 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; 868 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
842 869
843 /* CCK */ 870 /* CCK */
844 tx->supp_rates[1] = (rate_mask & 0xF); 871 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
845 872
846 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " 873 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
847 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, 874 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
848 tx->rate, le32_to_cpu(tx->tx_flags), 875 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
849 tx->supp_rates[1], tx->supp_rates[0]); 876 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
850} 877}
851 878
852u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) 879u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
@@ -962,6 +989,11 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
962 989
963 iwl3945_hw_txq_ctx_free(priv); 990 iwl3945_hw_txq_ctx_free(priv);
964 991
992 /* allocate tx queue structure */
993 rc = iwl_alloc_txq_mem(priv);
994 if (rc)
995 return rc;
996
965 /* Tx CMD queue */ 997 /* Tx CMD queue */
966 rc = iwl3945_tx_reset(priv); 998 rc = iwl3945_tx_reset(priv);
967 if (rc) 999 if (rc)
@@ -986,41 +1018,25 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
986 return rc; 1018 return rc;
987} 1019}
988 1020
1021
1022/*
1023 * Start up 3945's basic functionality after it has been reset
1024 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1025 * NOTE: This does not load uCode nor start the embedded processor
1026 */
989static int iwl3945_apm_init(struct iwl_priv *priv) 1027static int iwl3945_apm_init(struct iwl_priv *priv)
990{ 1028{
991 int ret; 1029 int ret = iwl_apm_init(priv);
992
993 iwl_power_initialize(priv);
994
995 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
996 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
997 1030
998 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */ 1031 /* Clear APMG (NIC's internal power management) interrupts */
999 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 1032 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1000 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 1033 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
1001
1002 /* set "initialization complete" bit to move adapter
1003 * D0U* --> D0A* state */
1004 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1005
1006 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1007 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1008 if (ret < 0) {
1009 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1010 goto out;
1011 }
1012
1013 /* enable DMA */
1014 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
1015 APMG_CLK_VAL_BSM_CLK_RQT);
1016
1017 udelay(20);
1018 1034
1019 /* disable L1-Active */ 1035 /* Reset radio chip */
1020 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 1036 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
1021 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1037 udelay(5);
1038 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
1022 1039
1023out:
1024 return ret; 1040 return ret;
1025} 1041}
1026 1042
@@ -1145,12 +1161,16 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1145 int txq_id; 1161 int txq_id;
1146 1162
1147 /* Tx queues */ 1163 /* Tx queues */
1148 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 1164 if (priv->txq)
1149 if (txq_id == IWL_CMD_QUEUE_NUM) 1165 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1150 iwl_cmd_queue_free(priv); 1166 txq_id++)
1151 else 1167 if (txq_id == IWL_CMD_QUEUE_NUM)
1152 iwl_tx_queue_free(priv, txq_id); 1168 iwl_cmd_queue_free(priv);
1169 else
1170 iwl_tx_queue_free(priv, txq_id);
1153 1171
1172 /* free tx queue structure */
1173 iwl_free_txq_mem(priv);
1154} 1174}
1155 1175
1156void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1176void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1159,6 +1179,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1159 1179
1160 /* stop SCD */ 1180 /* stop SCD */
1161 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1181 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
1182 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1162 1183
1163 /* reset TFD queues */ 1184 /* reset TFD queues */
1164 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 1185 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
@@ -1171,85 +1192,6 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1171 iwl3945_hw_txq_ctx_free(priv); 1192 iwl3945_hw_txq_ctx_free(priv);
1172} 1193}
1173 1194
1174static int iwl3945_apm_stop_master(struct iwl_priv *priv)
1175{
1176 int ret = 0;
1177 unsigned long flags;
1178
1179 spin_lock_irqsave(&priv->lock, flags);
1180
1181 /* set stop master bit */
1182 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1183
1184 iwl_poll_direct_bit(priv, CSR_RESET,
1185 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1186
1187 if (ret < 0)
1188 goto out;
1189
1190out:
1191 spin_unlock_irqrestore(&priv->lock, flags);
1192 IWL_DEBUG_INFO(priv, "stop master\n");
1193
1194 return ret;
1195}
1196
1197static void iwl3945_apm_stop(struct iwl_priv *priv)
1198{
1199 unsigned long flags;
1200
1201 iwl3945_apm_stop_master(priv);
1202
1203 spin_lock_irqsave(&priv->lock, flags);
1204
1205 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1206
1207 udelay(10);
1208 /* clear "init complete" move adapter D0A* --> D0U state */
1209 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1210 spin_unlock_irqrestore(&priv->lock, flags);
1211}
1212
1213static int iwl3945_apm_reset(struct iwl_priv *priv)
1214{
1215 iwl3945_apm_stop_master(priv);
1216
1217
1218 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1219 udelay(10);
1220
1221 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1222
1223 iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1224 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1225
1226 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
1227 APMG_CLK_VAL_BSM_CLK_RQT);
1228
1229 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1230 iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
1231 0xFFFFFFFF);
1232
1233 /* enable DMA */
1234 iwl_write_prph(priv, APMG_CLK_EN_REG,
1235 APMG_CLK_VAL_DMA_CLK_RQT |
1236 APMG_CLK_VAL_BSM_CLK_RQT);
1237 udelay(10);
1238
1239 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
1240 APMG_PS_CTRL_VAL_RESET_REQ);
1241 udelay(5);
1242 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
1243 APMG_PS_CTRL_VAL_RESET_REQ);
1244
1245 /* Clear the 'host command active' bit... */
1246 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1247
1248 wake_up_interruptible(&priv->wait_command_queue);
1249
1250 return 0;
1251}
1252
1253/** 1195/**
1254 * iwl3945_hw_reg_adjust_power_by_temp 1196 * iwl3945_hw_reg_adjust_power_by_temp
1255 * return index delta into power gain settings table 1197 * return index delta into power gain settings table
@@ -1858,7 +1800,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1858static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) 1800static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1859{ 1801{
1860 int rc = 0; 1802 int rc = 0;
1861 struct iwl_rx_packet *res = NULL; 1803 struct iwl_rx_packet *pkt;
1862 struct iwl3945_rxon_assoc_cmd rxon_assoc; 1804 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1863 struct iwl_host_cmd cmd = { 1805 struct iwl_host_cmd cmd = {
1864 .id = REPLY_RXON_ASSOC, 1806 .id = REPLY_RXON_ASSOC,
@@ -1887,14 +1829,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1887 if (rc) 1829 if (rc)
1888 return rc; 1830 return rc;
1889 1831
1890 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 1832 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1891 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 1833 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1892 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); 1834 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1893 rc = -EIO; 1835 rc = -EIO;
1894 } 1836 }
1895 1837
1896 priv->alloc_rxb_skb--; 1838 priv->alloc_rxb_page--;
1897 dev_kfree_skb_any(cmd.reply_skb); 1839 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
1898 1840
1899 return rc; 1841 return rc;
1900} 1842}
@@ -2042,12 +1984,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
2042 return 0; 1984 return 0;
2043} 1985}
2044 1986
2045/* will add 3945 channel switch cmd handling later */
2046int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2047{
2048 return 0;
2049}
2050
2051/** 1987/**
2052 * iwl3945_reg_txpower_periodic - called when time to check our temperature. 1988 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
2053 * 1989 *
@@ -2557,11 +2493,10 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2557 } 2493 }
2558 2494
2559 /* Assign number of Usable TX queues */ 2495 /* Assign number of Usable TX queues */
2560 priv->hw_params.max_txq_num = IWL39_NUM_QUEUES; 2496 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
2561 2497
2562 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); 2498 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2563 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K; 2499 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2564 priv->hw_params.max_pkt_size = 2342;
2565 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 2500 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2566 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 2501 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2567 priv->hw_params.max_stations = IWL3945_STATION_COUNT; 2502 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
@@ -2844,8 +2779,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2844 .dump_nic_error_log = iwl3945_dump_nic_error_log, 2779 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2845 .apm_ops = { 2780 .apm_ops = {
2846 .init = iwl3945_apm_init, 2781 .init = iwl3945_apm_init,
2847 .reset = iwl3945_apm_reset, 2782 .stop = iwl_apm_stop,
2848 .stop = iwl3945_apm_stop,
2849 .config = iwl3945_nic_config, 2783 .config = iwl3945_nic_config,
2850 .set_pwr_src = iwl3945_set_pwr_src, 2784 .set_pwr_src = iwl3945_set_pwr_src,
2851 }, 2785 },
@@ -2874,6 +2808,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2874static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2808static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2875 .get_hcmd_size = iwl3945_get_hcmd_size, 2809 .get_hcmd_size = iwl3945_get_hcmd_size,
2876 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2810 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2811 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2877}; 2812};
2878 2813
2879static struct iwl_ops iwl3945_ops = { 2814static struct iwl_ops iwl3945_ops = {
@@ -2881,6 +2816,7 @@ static struct iwl_ops iwl3945_ops = {
2881 .lib = &iwl3945_lib, 2816 .lib = &iwl3945_lib,
2882 .hcmd = &iwl3945_hcmd, 2817 .hcmd = &iwl3945_hcmd,
2883 .utils = &iwl3945_hcmd_utils, 2818 .utils = &iwl3945_hcmd_utils,
2819 .led = &iwl3945_led_ops,
2884}; 2820};
2885 2821
2886static struct iwl_cfg iwl3945_bg_cfg = { 2822static struct iwl_cfg iwl3945_bg_cfg = {
@@ -2892,9 +2828,14 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2892 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2828 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2893 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2829 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2894 .ops = &iwl3945_ops, 2830 .ops = &iwl3945_ops,
2831 .num_of_queues = IWL39_NUM_QUEUES,
2895 .mod_params = &iwl3945_mod_params, 2832 .mod_params = &iwl3945_mod_params,
2833 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2834 .set_l0s = false,
2835 .use_bsm = true,
2896 .use_isr_legacy = true, 2836 .use_isr_legacy = true,
2897 .ht_greenfield_support = false, 2837 .ht_greenfield_support = false,
2838 .led_compensation = 64,
2898}; 2839};
2899 2840
2900static struct iwl_cfg iwl3945_abg_cfg = { 2841static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2906,9 +2847,11 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2906 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2847 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2907 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2848 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2908 .ops = &iwl3945_ops, 2849 .ops = &iwl3945_ops,
2850 .num_of_queues = IWL39_NUM_QUEUES,
2909 .mod_params = &iwl3945_mod_params, 2851 .mod_params = &iwl3945_mod_params,
2910 .use_isr_legacy = true, 2852 .use_isr_legacy = true,
2911 .ht_greenfield_support = false, 2853 .ht_greenfield_support = false,
2854 .led_compensation = 64,
2912}; 2855};
2913 2856
2914struct pci_device_id iwl3945_hw_card_ids[] = { 2857struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 21679bf3a1aa..ecc23ec1f6a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -46,7 +46,7 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
46#include "iwl-debug.h" 46#include "iwl-debug.h"
47#include "iwl-power.h" 47#include "iwl-power.h"
48#include "iwl-dev.h" 48#include "iwl-dev.h"
49#include "iwl-3945-led.h" 49#include "iwl-led.h"
50 50
51/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2 52#define IWL3945_UCODE_API_MAX 2
@@ -74,8 +74,41 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
74/* Module parameters accessible from iwl-*.c */ 74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params; 75extern struct iwl_mod_params iwl3945_mod_params;
76 76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 u8 ibss_sta_added;
99 struct timer_list rate_scale_flush;
100 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
101#ifdef CONFIG_MAC80211_DEBUGFS
102 struct dentry *rs_sta_dbgfs_stats_table_file;
103#endif
104
105 /* used to be in sta_info */
106 int last_txrate_idx;
107};
108
109
77struct iwl3945_sta_priv { 110struct iwl3945_sta_priv {
78 struct iwl3945_rs_sta *rs_sta; 111 struct iwl3945_rs_sta rs_sta;
79}; 112};
80 113
81enum iwl3945_antenna { 114enum iwl3945_antenna {
@@ -130,12 +163,6 @@ struct iwl3945_frame {
130#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 163#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
131#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 164#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
132 165
133/*
134 * RX related structures and functions
135 */
136#define RX_FREE_BUFFERS 64
137#define RX_LOW_WATERMARK 8
138
139#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 166#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
140#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 167#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
141#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 168#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
@@ -194,22 +221,13 @@ struct iwl3945_ibss_seq {
194 * for use by iwl-*.c 221 * for use by iwl-*.c
195 * 222 *
196 *****************************************************************************/ 223 *****************************************************************************/
197extern int iwl3945_power_init_handle(struct iwl_priv *priv);
198extern int iwl3945_eeprom_init(struct iwl_priv *priv);
199extern int iwl3945_calc_db_from_ratio(int sig_ratio); 224extern int iwl3945_calc_db_from_ratio(int sig_ratio);
200extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm); 225extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
201extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
202 struct iwl_tx_queue *txq, int count, u32 id);
203extern void iwl3945_rx_replenish(void *data); 226extern void iwl3945_rx_replenish(void *data);
204extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 227extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
205extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
206extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
207 const void *data);
208extern int __must_check iwl3945_send_cmd(struct iwl_priv *priv,
209 struct iwl_host_cmd *cmd);
210extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, 228extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
211 struct ieee80211_hdr *hdr,int left); 229 struct ieee80211_hdr *hdr,int left);
212extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv); 230extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
213extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 231extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
214 232
215/* 233/*
@@ -280,8 +298,6 @@ extern void iwl3945_config_ap(struct iwl_priv *priv);
280 */ 298 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 299extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282 300
283extern int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel);
284
285/* 301/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 302 * Forward declare iwl-3945.c functions for iwl-base.c
287 */ 303 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index b34322a32458..c606366b582c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -76,12 +76,9 @@
76 76
77/* 77/*
78 * uCode queue management definitions ... 78 * uCode queue management definitions ...
79 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
80 * The first queue used for block-ack aggregation is #7 (4965 only). 79 * The first queue used for block-ack aggregation is #7 (4965 only).
81 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. 80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
82 */ 81 */
83#define IWL_CMD_QUEUE_NUM 4
84#define IWL_CMD_FIFO_NUM 4
85#define IWL49_FIRST_AMPDU_QUEUE 7 82#define IWL49_FIRST_AMPDU_QUEUE 7
86 83
87/* Time constants */ 84/* Time constants */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 6f703a041847..386513b601f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -45,6 +45,7 @@
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-calib.h" 46#include "iwl-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h"
48 49
49static int iwl4965_send_tx_power(struct iwl_priv *priv); 50static int iwl4965_send_tx_power(struct iwl_priv *priv);
50static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 51static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -62,8 +63,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
62 63
63/* module parameters */ 64/* module parameters */
64static struct iwl_mod_params iwl4965_mod_params = { 65static struct iwl_mod_params iwl4965_mod_params = {
65 .num_of_queues = IWL49_NUM_QUEUES,
66 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
67 .amsdu_size_8K = 1, 66 .amsdu_size_8K = 1,
68 .restart_fw = 1, 67 .restart_fw = 1,
69 /* the rest are 0 by default */ 68 /* the rest are 0 by default */
@@ -319,63 +318,13 @@ static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
319 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask); 318 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
320} 319}
321 320
322static int iwl4965_apm_init(struct iwl_priv *priv)
323{
324 int ret = 0;
325
326 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
327 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
328
329 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
330 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
331 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
332
333 /* set "initialization complete" bit to move adapter
334 * D0U* --> D0A* state */
335 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
336
337 /* wait for clock stabilization */
338 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
339 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
340 if (ret < 0) {
341 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
342 goto out;
343 }
344
345 /* enable DMA */
346 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
347 APMG_CLK_VAL_BSM_CLK_RQT);
348
349 udelay(20);
350
351 /* disable L1-Active */
352 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
353 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
354
355out:
356 return ret;
357}
358
359
360static void iwl4965_nic_config(struct iwl_priv *priv) 321static void iwl4965_nic_config(struct iwl_priv *priv)
361{ 322{
362 unsigned long flags; 323 unsigned long flags;
363 u16 radio_cfg; 324 u16 radio_cfg;
364 u16 lctl;
365 325
366 spin_lock_irqsave(&priv->lock, flags); 326 spin_lock_irqsave(&priv->lock, flags);
367 327
368 lctl = iwl_pcie_link_ctl(priv);
369
370 /* HW bug W/A - negligible power consumption */
371 /* L1-ASPM is enabled by BIOS */
372 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
373 /* L1-ASPM enabled: disable L0S */
374 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
375 else
376 /* L1-ASPM disabled: enable L0S */
377 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
378
379 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 328 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
380 329
381 /* write radio config values to register */ 330 /* write radio config values to register */
@@ -396,79 +345,6 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
396 spin_unlock_irqrestore(&priv->lock, flags); 345 spin_unlock_irqrestore(&priv->lock, flags);
397} 346}
398 347
399static int iwl4965_apm_stop_master(struct iwl_priv *priv)
400{
401 unsigned long flags;
402
403 spin_lock_irqsave(&priv->lock, flags);
404
405 /* set stop master bit */
406 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
407
408 iwl_poll_direct_bit(priv, CSR_RESET,
409 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
410
411 spin_unlock_irqrestore(&priv->lock, flags);
412 IWL_DEBUG_INFO(priv, "stop master\n");
413
414 return 0;
415}
416
417static void iwl4965_apm_stop(struct iwl_priv *priv)
418{
419 unsigned long flags;
420
421 iwl4965_apm_stop_master(priv);
422
423 spin_lock_irqsave(&priv->lock, flags);
424
425 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
426
427 udelay(10);
428 /* clear "init complete" move adapter D0A* --> D0U state */
429 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
430 spin_unlock_irqrestore(&priv->lock, flags);
431}
432
433static int iwl4965_apm_reset(struct iwl_priv *priv)
434{
435 int ret = 0;
436
437 iwl4965_apm_stop_master(priv);
438
439
440 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
441
442 udelay(10);
443
444 /* FIXME: put here L1A -L0S w/a */
445
446 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
447
448 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
449 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
450 if (ret < 0)
451 goto out;
452
453 udelay(10);
454
455 /* Enable DMA and BSM Clock */
456 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
457 APMG_CLK_VAL_BSM_CLK_RQT);
458
459 udelay(10);
460
461 /* disable L1A */
462 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
463 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
464
465 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
466 wake_up_interruptible(&priv->wait_command_queue);
467
468out:
469 return ret;
470}
471
472/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 348/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
473 * Called after every association, but this runs only once! 349 * Called after every association, but this runs only once!
474 * ... once chain noise is calibrated the first time, it's good forever. */ 350 * ... once chain noise is calibrated the first time, it's good forever. */
@@ -496,14 +372,15 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
496static void iwl4965_gain_computation(struct iwl_priv *priv, 372static void iwl4965_gain_computation(struct iwl_priv *priv,
497 u32 *average_noise, 373 u32 *average_noise,
498 u16 min_average_noise_antenna_i, 374 u16 min_average_noise_antenna_i,
499 u32 min_average_noise) 375 u32 min_average_noise,
376 u8 default_chain)
500{ 377{
501 int i, ret; 378 int i, ret;
502 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 379 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
503 380
504 data->delta_gain_code[min_average_noise_antenna_i] = 0; 381 data->delta_gain_code[min_average_noise_antenna_i] = 0;
505 382
506 for (i = 0; i < NUM_RX_CHAINS; i++) { 383 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
507 s32 delta_g = 0; 384 s32 delta_g = 0;
508 385
509 if (!(data->disconn_array[i]) && 386 if (!(data->disconn_array[i]) &&
@@ -557,18 +434,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
557 data->beacon_count = 0; 434 data->beacon_count = 0;
558} 435}
559 436
560static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
561 __le32 *tx_flags)
562{
563 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
564 *tx_flags |= TX_CMD_FLG_RTS_MSK;
565 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
566 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
567 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
568 *tx_flags |= TX_CMD_FLG_CTS_MSK;
569 }
570}
571
572static void iwl4965_bg_txpower_work(struct work_struct *work) 437static void iwl4965_bg_txpower_work(struct work_struct *work)
573{ 438{
574 struct iwl_priv *priv = container_of(work, struct iwl_priv, 439 struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -663,7 +528,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
663 iwl_write_targ_mem(priv, a, 0); 528 iwl_write_targ_mem(priv, a, 0);
664 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) 529 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
665 iwl_write_targ_mem(priv, a, 0); 530 iwl_write_targ_mem(priv, a, 0);
666 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 531 for (; a < priv->scd_base_addr +
532 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
667 iwl_write_targ_mem(priv, a, 0); 533 iwl_write_targ_mem(priv, a, 0);
668 534
669 /* Tel 4965 where to find Tx byte count tables */ 535 /* Tel 4965 where to find Tx byte count tables */
@@ -748,6 +614,10 @@ static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
748 614
749 .nrg_th_cck = 100, 615 .nrg_th_cck = 100,
750 .nrg_th_ofdm = 100, 616 .nrg_th_ofdm = 100,
617
618 .barker_corr_th_min = 190,
619 .barker_corr_th_min_mrc = 390,
620 .nrg_th_cca = 62,
751}; 621};
752 622
753static void iwl4965_set_ct_threshold(struct iwl_priv *priv) 623static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
@@ -764,19 +634,16 @@ static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
764 */ 634 */
765static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 635static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
766{ 636{
637 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
638 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
639 priv->cfg->num_of_queues =
640 priv->cfg->mod_params->num_of_queues;
767 641
768 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) || 642 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
769 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
770 IWL_ERR(priv,
771 "invalid queues_num, should be between %d and %d\n",
772 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
773 return -EINVAL;
774 }
775
776 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
777 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; 643 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
778 priv->hw_params.scd_bc_tbls_size = 644 priv->hw_params.scd_bc_tbls_size =
779 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl); 645 priv->cfg->num_of_queues *
646 sizeof(struct iwl4965_scd_bc_tbl);
780 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 647 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
781 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 648 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
782 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 649 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
@@ -787,10 +654,10 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
787 654
788 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 655 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
789 656
790 priv->hw_params.tx_chains_num = 2; 657 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
791 priv->hw_params.rx_chains_num = 2; 658 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
792 priv->hw_params.valid_tx_ant = ANT_A | ANT_B; 659 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
793 priv->hw_params.valid_rx_ant = ANT_A | ANT_B; 660 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
794 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 661 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
795 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); 662 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
796 663
@@ -1567,14 +1434,13 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1567 return ret; 1434 return ret;
1568} 1435}
1569 1436
1570#ifdef IEEE80211_CONF_CHANNEL_SWITCH
1571static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) 1437static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1572{ 1438{
1573 int rc; 1439 int rc;
1574 u8 band = 0; 1440 u8 band = 0;
1575 bool is_ht40 = false; 1441 bool is_ht40 = false;
1576 u8 ctrl_chan_high = 0; 1442 u8 ctrl_chan_high = 0;
1577 struct iwl4965_channel_switch_cmd cmd = { 0 }; 1443 struct iwl4965_channel_switch_cmd cmd;
1578 const struct iwl_channel_info *ch_info; 1444 const struct iwl_channel_info *ch_info;
1579 1445
1580 band = priv->band == IEEE80211_BAND_2GHZ; 1446 band = priv->band == IEEE80211_BAND_2GHZ;
@@ -1584,19 +1450,22 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1584 is_ht40 = is_ht40_channel(priv->staging_rxon.flags); 1450 is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
1585 1451
1586 if (is_ht40 && 1452 if (is_ht40 &&
1587 (priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK)) 1453 (priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
1588 ctrl_chan_high = 1; 1454 ctrl_chan_high = 1;
1589 1455
1590 cmd.band = band; 1456 cmd.band = band;
1591 cmd.expect_beacon = 0; 1457 cmd.expect_beacon = 0;
1592 cmd.channel = cpu_to_le16(channel); 1458 cmd.channel = cpu_to_le16(channel);
1593 cmd.rxon_flags = priv->active_rxon.flags; 1459 cmd.rxon_flags = priv->staging_rxon.flags;
1594 cmd.rxon_filter_flags = priv->active_rxon.filter_flags; 1460 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
1595 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 1461 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1596 if (ch_info) 1462 if (ch_info)
1597 cmd.expect_beacon = is_channel_radar(ch_info); 1463 cmd.expect_beacon = is_channel_radar(ch_info);
1598 else 1464 else {
1599 cmd.expect_beacon = 1; 1465 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1466 priv->active_rxon.channel, channel);
1467 return -EFAULT;
1468 }
1600 1469
1601 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40, 1470 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
1602 ctrl_chan_high, &cmd.tx_power); 1471 ctrl_chan_high, &cmd.tx_power);
@@ -1605,10 +1474,11 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1605 return rc; 1474 return rc;
1606 } 1475 }
1607 1476
1608 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1477 priv->switch_rxon.channel = cpu_to_le16(channel);
1609 return rc; 1478 priv->switch_rxon.switch_in_progress = true;
1479
1480 return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1610} 1481}
1611#endif
1612 1482
1613/** 1483/**
1614 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1484 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
@@ -1805,11 +1675,13 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1805 u16 ssn_idx, u8 tx_fifo) 1675 u16 ssn_idx, u8 tx_fifo)
1806{ 1676{
1807 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1677 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1808 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1678 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1679 <= txq_id)) {
1809 IWL_WARN(priv, 1680 IWL_WARN(priv,
1810 "queue number out of range: %d, must be %d to %d\n", 1681 "queue number out of range: %d, must be %d to %d\n",
1811 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1682 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1812 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1683 IWL49_FIRST_AMPDU_QUEUE +
1684 priv->cfg->num_of_ampdu_queues - 1);
1813 return -EINVAL; 1685 return -EINVAL;
1814 } 1686 }
1815 1687
@@ -1870,11 +1742,13 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1870 u16 ra_tid; 1742 u16 ra_tid;
1871 1743
1872 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1744 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1873 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1745 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1746 <= txq_id)) {
1874 IWL_WARN(priv, 1747 IWL_WARN(priv,
1875 "queue number out of range: %d, must be %d to %d\n", 1748 "queue number out of range: %d, must be %d to %d\n",
1876 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1749 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1877 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1750 IWL49_FIRST_AMPDU_QUEUE +
1751 priv->cfg->num_of_ampdu_queues - 1);
1878 return -EINVAL; 1752 return -EINVAL;
1879 } 1753 }
1880 1754
@@ -1944,8 +1818,9 @@ static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
1944 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid; 1818 addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
1945 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid; 1819 addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
1946 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn; 1820 addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
1821 addsta->sleep_tx_count = cmd->sleep_tx_count;
1947 addsta->reserved1 = cpu_to_le16(0); 1822 addsta->reserved1 = cpu_to_le16(0);
1948 addsta->reserved2 = cpu_to_le32(0); 1823 addsta->reserved2 = cpu_to_le16(0);
1949 1824
1950 return (u16)sizeof(struct iwl4965_addsta_cmd); 1825 return (u16)sizeof(struct iwl4965_addsta_cmd);
1951} 1826}
@@ -1991,8 +1866,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1991 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 1866 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
1992 info->status.rates[0].count = tx_resp->failure_frame + 1; 1867 info->status.rates[0].count = tx_resp->failure_frame + 1;
1993 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1868 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1994 info->flags |= iwl_is_tx_success(status) ? 1869 info->flags |= iwl_tx_status_to_mac80211(status);
1995 IEEE80211_TX_STAT_ACK : 0;
1996 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 1870 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
1997 /* FIXME: code repetition end */ 1871 /* FIXME: code repetition end */
1998 1872
@@ -2078,7 +1952,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2078static void iwl4965_rx_reply_tx(struct iwl_priv *priv, 1952static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2079 struct iwl_rx_mem_buffer *rxb) 1953 struct iwl_rx_mem_buffer *rxb)
2080{ 1954{
2081 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1955 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2082 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1956 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2083 int txq_id = SEQ_TO_QUEUE(sequence); 1957 int txq_id = SEQ_TO_QUEUE(sequence);
2084 int index = SEQ_TO_INDEX(sequence); 1958 int index = SEQ_TO_INDEX(sequence);
@@ -2147,8 +2021,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2147 } 2021 }
2148 } else { 2022 } else {
2149 info->status.rates[0].count = tx_resp->failure_frame + 1; 2023 info->status.rates[0].count = tx_resp->failure_frame + 1;
2150 info->flags |= iwl_is_tx_success(status) ? 2024 info->flags |= iwl_tx_status_to_mac80211(status);
2151 IEEE80211_TX_STAT_ACK : 0;
2152 iwl_hwrate_to_tx_control(priv, 2025 iwl_hwrate_to_tx_control(priv,
2153 le32_to_cpu(tx_resp->rate_n_flags), 2026 le32_to_cpu(tx_resp->rate_n_flags),
2154 info); 2027 info);
@@ -2279,7 +2152,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2279 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2152 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2280 .chain_noise_reset = iwl4965_chain_noise_reset, 2153 .chain_noise_reset = iwl4965_chain_noise_reset,
2281 .gain_computation = iwl4965_gain_computation, 2154 .gain_computation = iwl4965_gain_computation,
2282 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag, 2155 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2283 .calc_rssi = iwl4965_calc_rssi, 2156 .calc_rssi = iwl4965_calc_rssi,
2284}; 2157};
2285 2158
@@ -2301,10 +2174,10 @@ static struct iwl_lib_ops iwl4965_lib = {
2301 .load_ucode = iwl4965_load_bsm, 2174 .load_ucode = iwl4965_load_bsm,
2302 .dump_nic_event_log = iwl_dump_nic_event_log, 2175 .dump_nic_event_log = iwl_dump_nic_event_log,
2303 .dump_nic_error_log = iwl_dump_nic_error_log, 2176 .dump_nic_error_log = iwl_dump_nic_error_log,
2177 .set_channel_switch = iwl4965_hw_channel_switch,
2304 .apm_ops = { 2178 .apm_ops = {
2305 .init = iwl4965_apm_init, 2179 .init = iwl_apm_init,
2306 .reset = iwl4965_apm_reset, 2180 .stop = iwl_apm_stop,
2307 .stop = iwl4965_apm_stop,
2308 .config = iwl4965_nic_config, 2181 .config = iwl4965_nic_config,
2309 .set_pwr_src = iwl_set_pwr_src, 2182 .set_pwr_src = iwl_set_pwr_src,
2310 }, 2183 },
@@ -2340,6 +2213,7 @@ static struct iwl_ops iwl4965_ops = {
2340 .lib = &iwl4965_lib, 2213 .lib = &iwl4965_lib,
2341 .hcmd = &iwl4965_hcmd, 2214 .hcmd = &iwl4965_hcmd,
2342 .utils = &iwl4965_hcmd_utils, 2215 .utils = &iwl4965_hcmd_utils,
2216 .led = &iwlagn_led_ops,
2343}; 2217};
2344 2218
2345struct iwl_cfg iwl4965_agn_cfg = { 2219struct iwl_cfg iwl4965_agn_cfg = {
@@ -2352,30 +2226,41 @@ struct iwl_cfg iwl4965_agn_cfg = {
2352 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2226 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2353 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, 2227 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2354 .ops = &iwl4965_ops, 2228 .ops = &iwl4965_ops,
2229 .num_of_queues = IWL49_NUM_QUEUES,
2230 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2355 .mod_params = &iwl4965_mod_params, 2231 .mod_params = &iwl4965_mod_params,
2232 .valid_tx_ant = ANT_AB,
2233 .valid_rx_ant = ANT_ABC,
2234 .pll_cfg_val = 0,
2235 .set_l0s = true,
2236 .use_bsm = true,
2356 .use_isr_legacy = true, 2237 .use_isr_legacy = true,
2357 .ht_greenfield_support = false, 2238 .ht_greenfield_support = false,
2358 .broken_powersave = true, 2239 .broken_powersave = true,
2240 .led_compensation = 61,
2241 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2242 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
2359}; 2243};
2360 2244
2361/* Module firmware */ 2245/* Module firmware */
2362MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2246MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2363 2247
2364module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444); 2248module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
2365MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 2249MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2366module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444); 2250module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
2367MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 2251MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2368module_param_named( 2252module_param_named(
2369 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444); 2253 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
2370MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 2254MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2371 2255
2372module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444); 2256module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
2373MODULE_PARM_DESC(queues_num, "number of hw queues."); 2257MODULE_PARM_DESC(queues_num, "number of hw queues.");
2374/* 11n */ 2258/* 11n */
2375module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444); 2259module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
2376MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); 2260MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
2377module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); 2261module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
2262 int, S_IRUGO);
2378MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 2263MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
2379 2264
2380module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444); 2265module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
2381MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error"); 2266MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 6e6f516ba404..e2f8615c8c9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -43,6 +43,7 @@
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-agn-led.h"
46#include "iwl-5000-hw.h" 47#include "iwl-5000-hw.h"
47#include "iwl-6000-hw.h" 48#include "iwl-6000-hw.h"
48 49
@@ -72,157 +73,18 @@ static const u16 iwl5000_default_queue_to_tx_fifo[] = {
72 IWL_TX_FIFO_HCCA_2 73 IWL_TX_FIFO_HCCA_2
73}; 74};
74 75
75/* FIXME: same implementation as 4965 */ 76/* NIC configuration for 5000 series */
76static int iwl5000_apm_stop_master(struct iwl_priv *priv)
77{
78 unsigned long flags;
79
80 spin_lock_irqsave(&priv->lock, flags);
81
82 /* set stop master bit */
83 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
84
85 iwl_poll_direct_bit(priv, CSR_RESET,
86 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
87
88 spin_unlock_irqrestore(&priv->lock, flags);
89 IWL_DEBUG_INFO(priv, "stop master\n");
90
91 return 0;
92}
93
94
95int iwl5000_apm_init(struct iwl_priv *priv)
96{
97 int ret = 0;
98
99 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
100 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
101
102 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
103 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
104 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
105
106 /* Set FH wait threshold to maximum (HW error during stress W/A) */
107 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
108
109 /* enable HAP INTA to move device L1a -> L0s */
110 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
111 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
112
113 if (priv->cfg->need_pll_cfg)
114 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
115
116 /* set "initialization complete" bit to move adapter
117 * D0U* --> D0A* state */
118 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
119
120 /* wait for clock stabilization */
121 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
122 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
123 if (ret < 0) {
124 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
125 return ret;
126 }
127
128 /* enable DMA */
129 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
130
131 udelay(20);
132
133 /* disable L1-Active */
134 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
135 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
136
137 return ret;
138}
139
140/* FIXME: this is identical to 4965 */
141void iwl5000_apm_stop(struct iwl_priv *priv)
142{
143 unsigned long flags;
144
145 iwl5000_apm_stop_master(priv);
146
147 spin_lock_irqsave(&priv->lock, flags);
148
149 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
150
151 udelay(10);
152
153 /* clear "init complete" move adapter D0A* --> D0U state */
154 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
155
156 spin_unlock_irqrestore(&priv->lock, flags);
157}
158
159
160int iwl5000_apm_reset(struct iwl_priv *priv)
161{
162 int ret = 0;
163
164 iwl5000_apm_stop_master(priv);
165
166 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
167
168 udelay(10);
169
170
171 /* FIXME: put here L1A -L0S w/a */
172
173 if (priv->cfg->need_pll_cfg)
174 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
175
176 /* set "initialization complete" bit to move adapter
177 * D0U* --> D0A* state */
178 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
179
180 /* wait for clock stabilization */
181 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
182 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
183 if (ret < 0) {
184 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
185 goto out;
186 }
187
188 /* enable DMA */
189 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
190
191 udelay(20);
192
193 /* disable L1-Active */
194 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
195 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
196out:
197
198 return ret;
199}
200
201
202/* NIC configuration for 5000 series and up */
203void iwl5000_nic_config(struct iwl_priv *priv) 77void iwl5000_nic_config(struct iwl_priv *priv)
204{ 78{
205 unsigned long flags; 79 unsigned long flags;
206 u16 radio_cfg; 80 u16 radio_cfg;
207 u16 lctl;
208 81
209 spin_lock_irqsave(&priv->lock, flags); 82 spin_lock_irqsave(&priv->lock, flags);
210 83
211 lctl = iwl_pcie_link_ctl(priv);
212
213 /* HW bug W/A */
214 /* L1-ASPM is enabled by BIOS */
215 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
216 /* L1-APSM enabled: disable L0S */
217 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
218 else
219 /* L1-ASPM disabled: enable L0S */
220 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
221
222 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 84 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
223 85
224 /* write radio config values to register */ 86 /* write radio config values to register */
225 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX) 87 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_RF_CONFIG_TYPE_MAX)
226 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 88 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
227 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 89 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
228 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 90 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
@@ -302,19 +164,22 @@ u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
302static void iwl5000_gain_computation(struct iwl_priv *priv, 164static void iwl5000_gain_computation(struct iwl_priv *priv,
303 u32 average_noise[NUM_RX_CHAINS], 165 u32 average_noise[NUM_RX_CHAINS],
304 u16 min_average_noise_antenna_i, 166 u16 min_average_noise_antenna_i,
305 u32 min_average_noise) 167 u32 min_average_noise,
168 u8 default_chain)
306{ 169{
307 int i; 170 int i;
308 s32 delta_g; 171 s32 delta_g;
309 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 172 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
310 173
311 /* Find Gain Code for the antennas B and C */ 174 /*
312 for (i = 1; i < NUM_RX_CHAINS; i++) { 175 * Find Gain Code for the chains based on "default chain"
176 */
177 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
313 if ((data->disconn_array[i])) { 178 if ((data->disconn_array[i])) {
314 data->delta_gain_code[i] = 0; 179 data->delta_gain_code[i] = 0;
315 continue; 180 continue;
316 } 181 }
317 delta_g = (1000 * ((s32)average_noise[0] - 182 delta_g = (1000 * ((s32)average_noise[default_chain] -
318 (s32)average_noise[i])) / 1500; 183 (s32)average_noise[i])) / 1500;
319 /* bound gain by 2 bits value max, 3rd bit is sign */ 184 /* bound gain by 2 bits value max, 3rd bit is sign */
320 data->delta_gain_code[i] = 185 data->delta_gain_code[i] =
@@ -407,6 +272,10 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
407 .auto_corr_max_cck_mrc = 400, 272 .auto_corr_max_cck_mrc = 400,
408 .nrg_th_cck = 95, 273 .nrg_th_cck = 95,
409 .nrg_th_ofdm = 95, 274 .nrg_th_ofdm = 95,
275
276 .barker_corr_th_min = 190,
277 .barker_corr_th_min_mrc = 390,
278 .nrg_th_cca = 62,
410}; 279};
411 280
412static struct iwl_sensitivity_ranges iwl5150_sensitivity = { 281static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
@@ -429,6 +298,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
429 .auto_corr_max_cck_mrc = 400, 298 .auto_corr_max_cck_mrc = 400,
430 .nrg_th_cck = 95, 299 .nrg_th_cck = 95,
431 .nrg_th_ofdm = 95, 300 .nrg_th_ofdm = 95,
301
302 .barker_corr_th_min = 190,
303 .barker_corr_th_min_mrc = 390,
304 .nrg_th_cca = 62,
432}; 305};
433 306
434const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 307const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -493,7 +366,7 @@ static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
493static void iwl5000_rx_calib_result(struct iwl_priv *priv, 366static void iwl5000_rx_calib_result(struct iwl_priv *priv,
494 struct iwl_rx_mem_buffer *rxb) 367 struct iwl_rx_mem_buffer *rxb)
495{ 368{
496 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 369 struct iwl_rx_packet *pkt = rxb_addr(rxb);
497 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; 370 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
498 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 371 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
499 int index; 372 int index;
@@ -719,16 +592,6 @@ static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
719 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 592 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
720} 593}
721 594
722static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
723{
724 struct iwl_wimax_coex_cmd coex_cmd;
725
726 memset(&coex_cmd, 0, sizeof(coex_cmd));
727
728 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
729 sizeof(coex_cmd), &coex_cmd);
730}
731
732int iwl5000_alive_notify(struct iwl_priv *priv) 595int iwl5000_alive_notify(struct iwl_priv *priv)
733{ 596{
734 u32 a; 597 u32 a;
@@ -746,7 +609,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
746 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; 609 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
747 a += 4) 610 a += 4)
748 iwl_write_targ_mem(priv, a, 0); 611 iwl_write_targ_mem(priv, a, 0);
749 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 612 for (; a < priv->scd_base_addr +
613 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
750 iwl_write_targ_mem(priv, a, 0); 614 iwl_write_targ_mem(priv, a, 0);
751 615
752 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, 616 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
@@ -798,9 +662,13 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
798 iwl_txq_ctx_activate(priv, i); 662 iwl_txq_ctx_activate(priv, i);
799 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 663 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
800 } 664 }
801 /* TODO - need to initialize those FIFOs inside the loop above, 665
802 * not only mark them as active */ 666 /*
803 iwl_txq_ctx_activate(priv, 4); 667 * TODO - need to initialize these queues and map them to FIFOs
668 * in the loop above, not only mark them as active. We do this
669 * because we want the first aggregation queue to be queue #10,
670 * but do not use 8 or 9 otherwise yet.
671 */
804 iwl_txq_ctx_activate(priv, 7); 672 iwl_txq_ctx_activate(priv, 7);
805 iwl_txq_ctx_activate(priv, 8); 673 iwl_txq_ctx_activate(priv, 8);
806 iwl_txq_ctx_activate(priv, 9); 674 iwl_txq_ctx_activate(priv, 9);
@@ -808,7 +676,7 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
808 spin_unlock_irqrestore(&priv->lock, flags); 676 spin_unlock_irqrestore(&priv->lock, flags);
809 677
810 678
811 iwl5000_send_wimax_coex(priv); 679 iwl_send_wimax_coex(priv);
812 680
813 iwl5000_set_Xtal_calib(priv); 681 iwl5000_set_Xtal_calib(priv);
814 iwl_send_calib_results(priv); 682 iwl_send_calib_results(priv);
@@ -818,32 +686,22 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
818 686
819int iwl5000_hw_set_hw_params(struct iwl_priv *priv) 687int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
820{ 688{
821 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || 689 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
822 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 690 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
823 IWL_ERR(priv, 691 priv->cfg->num_of_queues =
824 "invalid queues_num, should be between %d and %d\n", 692 priv->cfg->mod_params->num_of_queues;
825 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
826 return -EINVAL;
827 }
828 693
829 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 694 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
830 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 695 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
831 priv->hw_params.scd_bc_tbls_size = 696 priv->hw_params.scd_bc_tbls_size =
832 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl); 697 priv->cfg->num_of_queues *
698 sizeof(struct iwl5000_scd_bc_tbl);
833 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 699 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
834 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 700 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
835 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 701 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
836 702
837 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 703 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
838 case CSR_HW_REV_TYPE_6x00: 704 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
839 case CSR_HW_REV_TYPE_6x50:
840 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
841 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
842 break;
843 default:
844 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
845 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
846 }
847 705
848 priv->hw_params.max_bsm_size = 0; 706 priv->hw_params.max_bsm_size = 0;
849 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 707 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -989,11 +847,13 @@ int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
989 u16 ra_tid; 847 u16 ra_tid;
990 848
991 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 849 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
992 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 850 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
851 <= txq_id)) {
993 IWL_WARN(priv, 852 IWL_WARN(priv,
994 "queue number out of range: %d, must be %d to %d\n", 853 "queue number out of range: %d, must be %d to %d\n",
995 txq_id, IWL50_FIRST_AMPDU_QUEUE, 854 txq_id, IWL50_FIRST_AMPDU_QUEUE,
996 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 855 IWL50_FIRST_AMPDU_QUEUE +
856 priv->cfg->num_of_ampdu_queues - 1);
997 return -EINVAL; 857 return -EINVAL;
998 } 858 }
999 859
@@ -1047,11 +907,13 @@ int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1047 u16 ssn_idx, u8 tx_fifo) 907 u16 ssn_idx, u8 tx_fifo)
1048{ 908{
1049 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 909 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1050 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 910 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
911 <= txq_id)) {
1051 IWL_ERR(priv, 912 IWL_ERR(priv,
1052 "queue number out of range: %d, must be %d to %d\n", 913 "queue number out of range: %d, must be %d to %d\n",
1053 txq_id, IWL50_FIRST_AMPDU_QUEUE, 914 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1054 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 915 IWL50_FIRST_AMPDU_QUEUE +
916 priv->cfg->num_of_ampdu_queues - 1);
1055 return -EINVAL; 917 return -EINVAL;
1056 } 918 }
1057 919
@@ -1132,8 +994,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1132 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]); 994 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
1133 info->status.rates[0].count = tx_resp->failure_frame + 1; 995 info->status.rates[0].count = tx_resp->failure_frame + 1;
1134 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 996 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1135 info->flags |= iwl_is_tx_success(status) ? 997 info->flags |= iwl_tx_status_to_mac80211(status);
1136 IEEE80211_TX_STAT_ACK : 0;
1137 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 998 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
1138 999
1139 /* FIXME: code repetition end */ 1000 /* FIXME: code repetition end */
@@ -1218,7 +1079,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1218static void iwl5000_rx_reply_tx(struct iwl_priv *priv, 1079static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1219 struct iwl_rx_mem_buffer *rxb) 1080 struct iwl_rx_mem_buffer *rxb)
1220{ 1081{
1221 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1082 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1222 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1083 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1223 int txq_id = SEQ_TO_QUEUE(sequence); 1084 int txq_id = SEQ_TO_QUEUE(sequence);
1224 int index = SEQ_TO_INDEX(sequence); 1085 int index = SEQ_TO_INDEX(sequence);
@@ -1278,8 +1139,7 @@ static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1278 BUG_ON(txq_id != txq->swq_id); 1139 BUG_ON(txq_id != txq->swq_id);
1279 1140
1280 info->status.rates[0].count = tx_resp->failure_frame + 1; 1141 info->status.rates[0].count = tx_resp->failure_frame + 1;
1281 info->flags |= iwl_is_tx_success(status) ? 1142 info->flags |= iwl_tx_status_to_mac80211(status);
1282 IEEE80211_TX_STAT_ACK : 0;
1283 iwl_hwrate_to_tx_control(priv, 1143 iwl_hwrate_to_tx_control(priv,
1284 le32_to_cpu(tx_resp->rate_n_flags), 1144 le32_to_cpu(tx_resp->rate_n_flags),
1285 info); 1145 info);
@@ -1389,6 +1249,22 @@ int iwl5000_send_tx_power(struct iwl_priv *priv)
1389 1249
1390 /* half dBm need to multiply */ 1250 /* half dBm need to multiply */
1391 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 1251 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
1252
1253 if (priv->tx_power_lmt_in_half_dbm &&
1254 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
1255 /*
1256 * For the newer devices which using enhanced/extend tx power
1257 * table in EEPROM, the format is in half dBm. driver need to
1258 * convert to dBm format before report to mac80211.
1259 * By doing so, there is a possibility of 1/2 dBm resolution
1260 * lost. driver will perform "round-up" operation before
1261 * reporting, but it will cause 1/2 dBm tx power over the
1262 * regulatory limit. Perform the checking here, if the
1263 * "tx_power_user_lmt" is higher than EEPROM value (in
1264 * half-dBm format), lower the tx power based on EEPROM
1265 */
1266 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
1267 }
1392 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED; 1268 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
1393 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO; 1269 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
1394 1270
@@ -1459,6 +1335,24 @@ int iwl5000_calc_rssi(struct iwl_priv *priv,
1459 return max_rssi - agc - IWL49_RSSI_OFFSET; 1335 return max_rssi - agc - IWL49_RSSI_OFFSET;
1460} 1336}
1461 1337
1338static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1339{
1340 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
1341 .valid = cpu_to_le32(valid_tx_ant),
1342 };
1343
1344 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1345 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1346 return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
1347 sizeof(struct iwl_tx_ant_config_cmd),
1348 &tx_ant_cmd);
1349 } else {
1350 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
1351 return -EOPNOTSUPP;
1352 }
1353}
1354
1355
1462#define IWL5000_UCODE_GET(item) \ 1356#define IWL5000_UCODE_GET(item) \
1463static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\ 1357static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
1464 u32 api_ver) \ 1358 u32 api_ver) \
@@ -1497,10 +1391,43 @@ IWL5000_UCODE_GET(init_size);
1497IWL5000_UCODE_GET(init_data_size); 1391IWL5000_UCODE_GET(init_data_size);
1498IWL5000_UCODE_GET(boot_size); 1392IWL5000_UCODE_GET(boot_size);
1499 1393
1394static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1395{
1396 struct iwl5000_channel_switch_cmd cmd;
1397 const struct iwl_channel_info *ch_info;
1398 struct iwl_host_cmd hcmd = {
1399 .id = REPLY_CHANNEL_SWITCH,
1400 .len = sizeof(cmd),
1401 .flags = CMD_SIZE_HUGE,
1402 .data = &cmd,
1403 };
1404
1405 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
1406 priv->active_rxon.channel, channel);
1407 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
1408 cmd.channel = cpu_to_le16(channel);
1409 cmd.rxon_flags = priv->staging_rxon.flags;
1410 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
1411 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1412 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1413 if (ch_info)
1414 cmd.expect_beacon = is_channel_radar(ch_info);
1415 else {
1416 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1417 priv->active_rxon.channel, channel);
1418 return -EFAULT;
1419 }
1420 priv->switch_rxon.channel = cpu_to_le16(channel);
1421 priv->switch_rxon.switch_in_progress = true;
1422
1423 return iwl_send_cmd_sync(priv, &hcmd);
1424}
1425
1500struct iwl_hcmd_ops iwl5000_hcmd = { 1426struct iwl_hcmd_ops iwl5000_hcmd = {
1501 .rxon_assoc = iwl5000_send_rxon_assoc, 1427 .rxon_assoc = iwl5000_send_rxon_assoc,
1502 .commit_rxon = iwl_commit_rxon, 1428 .commit_rxon = iwl_commit_rxon,
1503 .set_rxon_chain = iwl_set_rxon_chain, 1429 .set_rxon_chain = iwl_set_rxon_chain,
1430 .set_tx_ant = iwl5000_send_tx_ant_config,
1504}; 1431};
1505 1432
1506struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { 1433struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
@@ -1543,10 +1470,10 @@ struct iwl_lib_ops iwl5000_lib = {
1543 .alive_notify = iwl5000_alive_notify, 1470 .alive_notify = iwl5000_alive_notify,
1544 .send_tx_power = iwl5000_send_tx_power, 1471 .send_tx_power = iwl5000_send_tx_power,
1545 .update_chain_flags = iwl_update_chain_flags, 1472 .update_chain_flags = iwl_update_chain_flags,
1473 .set_channel_switch = iwl5000_hw_channel_switch,
1546 .apm_ops = { 1474 .apm_ops = {
1547 .init = iwl5000_apm_init, 1475 .init = iwl_apm_init,
1548 .reset = iwl5000_apm_reset, 1476 .stop = iwl_apm_stop,
1549 .stop = iwl5000_apm_stop,
1550 .config = iwl5000_nic_config, 1477 .config = iwl5000_nic_config,
1551 .set_pwr_src = iwl_set_pwr_src, 1478 .set_pwr_src = iwl_set_pwr_src,
1552 }, 1479 },
@@ -1595,10 +1522,10 @@ static struct iwl_lib_ops iwl5150_lib = {
1595 .alive_notify = iwl5000_alive_notify, 1522 .alive_notify = iwl5000_alive_notify,
1596 .send_tx_power = iwl5000_send_tx_power, 1523 .send_tx_power = iwl5000_send_tx_power,
1597 .update_chain_flags = iwl_update_chain_flags, 1524 .update_chain_flags = iwl_update_chain_flags,
1525 .set_channel_switch = iwl5000_hw_channel_switch,
1598 .apm_ops = { 1526 .apm_ops = {
1599 .init = iwl5000_apm_init, 1527 .init = iwl_apm_init,
1600 .reset = iwl5000_apm_reset, 1528 .stop = iwl_apm_stop,
1601 .stop = iwl5000_apm_stop,
1602 .config = iwl5000_nic_config, 1529 .config = iwl5000_nic_config,
1603 .set_pwr_src = iwl_set_pwr_src, 1530 .set_pwr_src = iwl_set_pwr_src,
1604 }, 1531 },
@@ -1627,11 +1554,12 @@ static struct iwl_lib_ops iwl5150_lib = {
1627 }, 1554 },
1628}; 1555};
1629 1556
1630struct iwl_ops iwl5000_ops = { 1557static struct iwl_ops iwl5000_ops = {
1631 .ucode = &iwl5000_ucode, 1558 .ucode = &iwl5000_ucode,
1632 .lib = &iwl5000_lib, 1559 .lib = &iwl5000_lib,
1633 .hcmd = &iwl5000_hcmd, 1560 .hcmd = &iwl5000_hcmd,
1634 .utils = &iwl5000_hcmd_utils, 1561 .utils = &iwl5000_hcmd_utils,
1562 .led = &iwlagn_led_ops,
1635}; 1563};
1636 1564
1637static struct iwl_ops iwl5150_ops = { 1565static struct iwl_ops iwl5150_ops = {
@@ -1639,11 +1567,10 @@ static struct iwl_ops iwl5150_ops = {
1639 .lib = &iwl5150_lib, 1567 .lib = &iwl5150_lib,
1640 .hcmd = &iwl5000_hcmd, 1568 .hcmd = &iwl5000_hcmd,
1641 .utils = &iwl5000_hcmd_utils, 1569 .utils = &iwl5000_hcmd_utils,
1570 .led = &iwlagn_led_ops,
1642}; 1571};
1643 1572
1644struct iwl_mod_params iwl50_mod_params = { 1573struct iwl_mod_params iwl50_mod_params = {
1645 .num_of_queues = IWL50_NUM_QUEUES,
1646 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1647 .amsdu_size_8K = 1, 1574 .amsdu_size_8K = 1,
1648 .restart_fw = 1, 1575 .restart_fw = 1,
1649 /* the rest are 0 by default */ 1576 /* the rest are 0 by default */
@@ -1660,28 +1587,41 @@ struct iwl_cfg iwl5300_agn_cfg = {
1660 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1587 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1661 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1588 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1662 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1589 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1590 .num_of_queues = IWL50_NUM_QUEUES,
1591 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1663 .mod_params = &iwl50_mod_params, 1592 .mod_params = &iwl50_mod_params,
1664 .valid_tx_ant = ANT_ABC, 1593 .valid_tx_ant = ANT_ABC,
1665 .valid_rx_ant = ANT_ABC, 1594 .valid_rx_ant = ANT_ABC,
1666 .need_pll_cfg = true, 1595 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1596 .set_l0s = true,
1597 .use_bsm = false,
1667 .ht_greenfield_support = true, 1598 .ht_greenfield_support = true,
1599 .led_compensation = 51,
1600 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1601 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1668}; 1602};
1669 1603
1670struct iwl_cfg iwl5100_bg_cfg = { 1604struct iwl_cfg iwl5100_bgn_cfg = {
1671 .name = "5100BG", 1605 .name = "5100BGN",
1672 .fw_name_pre = IWL5000_FW_PRE, 1606 .fw_name_pre = IWL5000_FW_PRE,
1673 .ucode_api_max = IWL5000_UCODE_API_MAX, 1607 .ucode_api_max = IWL5000_UCODE_API_MAX,
1674 .ucode_api_min = IWL5000_UCODE_API_MIN, 1608 .ucode_api_min = IWL5000_UCODE_API_MIN,
1675 .sku = IWL_SKU_G, 1609 .sku = IWL_SKU_G|IWL_SKU_N,
1676 .ops = &iwl5000_ops, 1610 .ops = &iwl5000_ops,
1677 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1611 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1678 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1612 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1679 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1613 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1614 .num_of_queues = IWL50_NUM_QUEUES,
1615 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1680 .mod_params = &iwl50_mod_params, 1616 .mod_params = &iwl50_mod_params,
1681 .valid_tx_ant = ANT_B, 1617 .valid_tx_ant = ANT_B,
1682 .valid_rx_ant = ANT_AB, 1618 .valid_rx_ant = ANT_AB,
1683 .need_pll_cfg = true, 1619 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1620 .set_l0s = true,
1621 .use_bsm = false,
1684 .ht_greenfield_support = true, 1622 .ht_greenfield_support = true,
1623 .led_compensation = 51,
1624 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1685}; 1625};
1686 1626
1687struct iwl_cfg iwl5100_abg_cfg = { 1627struct iwl_cfg iwl5100_abg_cfg = {
@@ -1694,11 +1634,16 @@ struct iwl_cfg iwl5100_abg_cfg = {
1694 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1634 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1695 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1635 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1696 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1636 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1637 .num_of_queues = IWL50_NUM_QUEUES,
1638 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1697 .mod_params = &iwl50_mod_params, 1639 .mod_params = &iwl50_mod_params,
1698 .valid_tx_ant = ANT_B, 1640 .valid_tx_ant = ANT_B,
1699 .valid_rx_ant = ANT_AB, 1641 .valid_rx_ant = ANT_AB,
1700 .need_pll_cfg = true, 1642 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1701 .ht_greenfield_support = true, 1643 .set_l0s = true,
1644 .use_bsm = false,
1645 .led_compensation = 51,
1646 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1702}; 1647};
1703 1648
1704struct iwl_cfg iwl5100_agn_cfg = { 1649struct iwl_cfg iwl5100_agn_cfg = {
@@ -1711,11 +1656,18 @@ struct iwl_cfg iwl5100_agn_cfg = {
1711 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1656 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1712 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1657 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1713 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1658 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1659 .num_of_queues = IWL50_NUM_QUEUES,
1660 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1714 .mod_params = &iwl50_mod_params, 1661 .mod_params = &iwl50_mod_params,
1715 .valid_tx_ant = ANT_B, 1662 .valid_tx_ant = ANT_B,
1716 .valid_rx_ant = ANT_AB, 1663 .valid_rx_ant = ANT_AB,
1717 .need_pll_cfg = true, 1664 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1665 .set_l0s = true,
1666 .use_bsm = false,
1718 .ht_greenfield_support = true, 1667 .ht_greenfield_support = true,
1668 .led_compensation = 51,
1669 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1670 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1719}; 1671};
1720 1672
1721struct iwl_cfg iwl5350_agn_cfg = { 1673struct iwl_cfg iwl5350_agn_cfg = {
@@ -1728,11 +1680,18 @@ struct iwl_cfg iwl5350_agn_cfg = {
1728 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1680 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1729 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1681 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1730 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1682 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1683 .num_of_queues = IWL50_NUM_QUEUES,
1684 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1731 .mod_params = &iwl50_mod_params, 1685 .mod_params = &iwl50_mod_params,
1732 .valid_tx_ant = ANT_ABC, 1686 .valid_tx_ant = ANT_ABC,
1733 .valid_rx_ant = ANT_ABC, 1687 .valid_rx_ant = ANT_ABC,
1734 .need_pll_cfg = true, 1688 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1689 .set_l0s = true,
1690 .use_bsm = false,
1735 .ht_greenfield_support = true, 1691 .ht_greenfield_support = true,
1692 .led_compensation = 51,
1693 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1694 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1736}; 1695};
1737 1696
1738struct iwl_cfg iwl5150_agn_cfg = { 1697struct iwl_cfg iwl5150_agn_cfg = {
@@ -1745,24 +1704,54 @@ struct iwl_cfg iwl5150_agn_cfg = {
1745 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1704 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1746 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1705 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1747 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1706 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1707 .num_of_queues = IWL50_NUM_QUEUES,
1708 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1748 .mod_params = &iwl50_mod_params, 1709 .mod_params = &iwl50_mod_params,
1749 .valid_tx_ant = ANT_A, 1710 .valid_tx_ant = ANT_A,
1750 .valid_rx_ant = ANT_AB, 1711 .valid_rx_ant = ANT_AB,
1751 .need_pll_cfg = true, 1712 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1713 .set_l0s = true,
1714 .use_bsm = false,
1752 .ht_greenfield_support = true, 1715 .ht_greenfield_support = true,
1716 .led_compensation = 51,
1717 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1718 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
1719};
1720
1721struct iwl_cfg iwl5150_abg_cfg = {
1722 .name = "5150ABG",
1723 .fw_name_pre = IWL5150_FW_PRE,
1724 .ucode_api_max = IWL5150_UCODE_API_MAX,
1725 .ucode_api_min = IWL5150_UCODE_API_MIN,
1726 .sku = IWL_SKU_A|IWL_SKU_G,
1727 .ops = &iwl5150_ops,
1728 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1729 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1730 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1731 .num_of_queues = IWL50_NUM_QUEUES,
1732 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1733 .mod_params = &iwl50_mod_params,
1734 .valid_tx_ant = ANT_A,
1735 .valid_rx_ant = ANT_AB,
1736 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1737 .set_l0s = true,
1738 .use_bsm = false,
1739 .led_compensation = 51,
1740 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1753}; 1741};
1754 1742
1755MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 1743MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
1756MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 1744MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
1757 1745
1758module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444); 1746module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
1759MODULE_PARM_DESC(swcrypto50, 1747MODULE_PARM_DESC(swcrypto50,
1760 "using software crypto engine (default 0 [hardware])\n"); 1748 "using software crypto engine (default 0 [hardware])\n");
1761module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444); 1749module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
1762MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series"); 1750MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1763module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444); 1751module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
1764MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality"); 1752MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
1765module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444); 1753module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
1754 int, S_IRUGO);
1766MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series"); 1755MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1767module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444); 1756module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
1768MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error"); 1757MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 1473452ba22f..74e571049273 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -44,14 +44,16 @@
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 46#include "iwl-5000-hw.h"
47#include "iwl-6000-hw.h"
48#include "iwl-agn-led.h"
47 49
48/* Highest firmware API version supported */ 50/* Highest firmware API version supported */
49#define IWL6000_UCODE_API_MAX 4 51#define IWL6000_UCODE_API_MAX 4
50#define IWL6050_UCODE_API_MAX 4 52#define IWL6050_UCODE_API_MAX 4
51 53
52/* Lowest firmware API version supported */ 54/* Lowest firmware API version supported */
53#define IWL6000_UCODE_API_MIN 1 55#define IWL6000_UCODE_API_MIN 4
54#define IWL6050_UCODE_API_MIN 1 56#define IWL6050_UCODE_API_MIN 4
55 57
56#define IWL6000_FW_PRE "iwlwifi-6000-" 58#define IWL6000_FW_PRE "iwlwifi-6000-"
57#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 59#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -71,14 +73,24 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
71/* NIC configuration for 6000 series */ 73/* NIC configuration for 6000 series */
72static void iwl6000_nic_config(struct iwl_priv *priv) 74static void iwl6000_nic_config(struct iwl_priv *priv)
73{ 75{
74 iwl5000_nic_config(priv); 76 u16 radio_cfg;
77
78 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
79
80 /* write radio config values to register */
81 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
82 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
83 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
84 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
85 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
86
87 /* set CSR_HW_CONFIG_REG for uCode use */
88 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
89 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
90 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
75 91
76 /* no locking required for register write */ 92 /* no locking required for register write */
77 if (priv->cfg->pa_type == IWL_PA_HYBRID) { 93 if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
78 /* 2x2 hybrid phy type */
79 iwl_write32(priv, CSR_GP_DRIVER_REG,
80 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB);
81 } else if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
82 /* 2x2 IPA phy type */ 94 /* 2x2 IPA phy type */
83 iwl_write32(priv, CSR_GP_DRIVER_REG, 95 iwl_write32(priv, CSR_GP_DRIVER_REG,
84 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA); 96 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
@@ -86,8 +98,109 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
86 /* else do nothing, uCode configured */ 98 /* else do nothing, uCode configured */
87} 99}
88 100
101static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
102 .min_nrg_cck = 97,
103 .max_nrg_cck = 0, /* not used, set to 0 */
104 .auto_corr_min_ofdm = 80,
105 .auto_corr_min_ofdm_mrc = 128,
106 .auto_corr_min_ofdm_x1 = 105,
107 .auto_corr_min_ofdm_mrc_x1 = 192,
108
109 .auto_corr_max_ofdm = 145,
110 .auto_corr_max_ofdm_mrc = 232,
111 .auto_corr_max_ofdm_x1 = 145,
112 .auto_corr_max_ofdm_mrc_x1 = 232,
113
114 .auto_corr_min_cck = 125,
115 .auto_corr_max_cck = 175,
116 .auto_corr_min_cck_mrc = 160,
117 .auto_corr_max_cck_mrc = 310,
118 .nrg_th_cck = 97,
119 .nrg_th_ofdm = 100,
120
121 .barker_corr_th_min = 190,
122 .barker_corr_th_min_mrc = 390,
123 .nrg_th_cca = 62,
124};
125
126static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
127{
128 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
129 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
130 priv->cfg->num_of_queues =
131 priv->cfg->mod_params->num_of_queues;
132
133 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
134 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
135 priv->hw_params.scd_bc_tbls_size =
136 priv->cfg->num_of_queues *
137 sizeof(struct iwl5000_scd_bc_tbl);
138 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
139 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
140 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
141
142 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
143 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
144
145 priv->hw_params.max_bsm_size = 0;
146 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
147 BIT(IEEE80211_BAND_5GHZ);
148 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
149
150 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
151 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
152 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
153 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
154
155 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
156 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
157
158 /* Set initial sensitivity parameters */
159 /* Set initial calibration set */
160 priv->hw_params.sens = &iwl6000_sensitivity;
161 priv->hw_params.calib_init_cfg =
162 BIT(IWL_CALIB_XTAL) |
163 BIT(IWL_CALIB_LO) |
164 BIT(IWL_CALIB_TX_IQ) |
165 BIT(IWL_CALIB_BASE_BAND);
166 return 0;
167}
168
169static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
170{
171 struct iwl6000_channel_switch_cmd cmd;
172 const struct iwl_channel_info *ch_info;
173 struct iwl_host_cmd hcmd = {
174 .id = REPLY_CHANNEL_SWITCH,
175 .len = sizeof(cmd),
176 .flags = CMD_SIZE_HUGE,
177 .data = &cmd,
178 };
179
180 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
181 priv->active_rxon.channel, channel);
182
183 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
184 cmd.channel = cpu_to_le16(channel);
185 cmd.rxon_flags = priv->staging_rxon.flags;
186 cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
187 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
188 ch_info = iwl_get_channel_info(priv, priv->band, channel);
189 if (ch_info)
190 cmd.expect_beacon = is_channel_radar(ch_info);
191 else {
192 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
193 priv->active_rxon.channel, channel);
194 return -EFAULT;
195 }
196 priv->switch_rxon.channel = cpu_to_le16(channel);
197 priv->switch_rxon.switch_in_progress = true;
198
199 return iwl_send_cmd_sync(priv, &hcmd);
200}
201
89static struct iwl_lib_ops iwl6000_lib = { 202static struct iwl_lib_ops iwl6000_lib = {
90 .set_hw_params = iwl5000_hw_set_hw_params, 203 .set_hw_params = iwl6000_hw_set_hw_params,
91 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 204 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
92 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 205 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
93 .txq_set_sched = iwl5000_txq_set_sched, 206 .txq_set_sched = iwl5000_txq_set_sched,
@@ -106,10 +219,10 @@ static struct iwl_lib_ops iwl6000_lib = {
106 .alive_notify = iwl5000_alive_notify, 219 .alive_notify = iwl5000_alive_notify,
107 .send_tx_power = iwl5000_send_tx_power, 220 .send_tx_power = iwl5000_send_tx_power,
108 .update_chain_flags = iwl_update_chain_flags, 221 .update_chain_flags = iwl_update_chain_flags,
222 .set_channel_switch = iwl6000_hw_channel_switch,
109 .apm_ops = { 223 .apm_ops = {
110 .init = iwl5000_apm_init, 224 .init = iwl_apm_init,
111 .reset = iwl5000_apm_reset, 225 .stop = iwl_apm_stop,
112 .stop = iwl5000_apm_stop,
113 .config = iwl6000_nic_config, 226 .config = iwl6000_nic_config,
114 .set_pwr_src = iwl_set_pwr_src, 227 .set_pwr_src = iwl_set_pwr_src,
115 }, 228 },
@@ -139,25 +252,33 @@ static struct iwl_lib_ops iwl6000_lib = {
139 }, 252 },
140}; 253};
141 254
142static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = { 255static struct iwl_ops iwl6000_ops = {
256 .ucode = &iwl5000_ucode,
257 .lib = &iwl6000_lib,
258 .hcmd = &iwl5000_hcmd,
259 .utils = &iwl5000_hcmd_utils,
260 .led = &iwlagn_led_ops,
261};
262
263static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
143 .get_hcmd_size = iwl5000_get_hcmd_size, 264 .get_hcmd_size = iwl5000_get_hcmd_size,
144 .build_addsta_hcmd = iwl5000_build_addsta_hcmd, 265 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
145 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, 266 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
146 .calc_rssi = iwl5000_calc_rssi, 267 .calc_rssi = iwl5000_calc_rssi,
147}; 268};
148 269
149static struct iwl_ops iwl6000_ops = { 270static struct iwl_ops iwl6050_ops = {
150 .ucode = &iwl5000_ucode, 271 .ucode = &iwl5000_ucode,
151 .lib = &iwl6000_lib, 272 .lib = &iwl6000_lib,
152 .hcmd = &iwl5000_hcmd, 273 .hcmd = &iwl5000_hcmd,
153 .utils = &iwl6000_hcmd_utils, 274 .utils = &iwl6050_hcmd_utils,
275 .led = &iwlagn_led_ops,
154}; 276};
155 277
156
157/* 278/*
158 * "h": Hybrid configuration, use both internal and external Power Amplifier 279 * "i": Internal configuration, use internal Power Amplifier
159 */ 280 */
160struct iwl_cfg iwl6000h_2agn_cfg = { 281struct iwl_cfg iwl6000i_2agn_cfg = {
161 .name = "6000 Series 2x2 AGN", 282 .name = "6000 Series 2x2 AGN",
162 .fw_name_pre = IWL6000_FW_PRE, 283 .fw_name_pre = IWL6000_FW_PRE,
163 .ucode_api_max = IWL6000_UCODE_API_MAX, 284 .ucode_api_max = IWL6000_UCODE_API_MAX,
@@ -165,41 +286,85 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
165 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 286 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
166 .ops = &iwl6000_ops, 287 .ops = &iwl6000_ops,
167 .eeprom_size = OTP_LOW_IMAGE_SIZE, 288 .eeprom_size = OTP_LOW_IMAGE_SIZE,
168 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 289 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
169 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 290 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
291 .num_of_queues = IWL50_NUM_QUEUES,
292 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
170 .mod_params = &iwl50_mod_params, 293 .mod_params = &iwl50_mod_params,
171 .valid_tx_ant = ANT_AB, 294 .valid_tx_ant = ANT_BC,
172 .valid_rx_ant = ANT_AB, 295 .valid_rx_ant = ANT_BC,
173 .need_pll_cfg = false, 296 .pll_cfg_val = 0,
174 .pa_type = IWL_PA_HYBRID, 297 .set_l0s = true,
298 .use_bsm = false,
299 .pa_type = IWL_PA_INTERNAL,
175 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 300 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
176 .shadow_ram_support = true, 301 .shadow_ram_support = true,
177 .ht_greenfield_support = true, 302 .ht_greenfield_support = true,
303 .led_compensation = 51,
178 .use_rts_for_ht = true, /* use rts/cts protection */ 304 .use_rts_for_ht = true, /* use rts/cts protection */
305 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
306 .supports_idle = true,
307 .adv_thermal_throttle = true,
308 .support_ct_kill_exit = true,
309 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
179}; 310};
180 311
181/* 312struct iwl_cfg iwl6000i_2abg_cfg = {
182 * "i": Internal configuration, use internal Power Amplifier 313 .name = "6000 Series 2x2 ABG",
183 */
184struct iwl_cfg iwl6000i_2agn_cfg = {
185 .name = "6000 Series 2x2 AGN",
186 .fw_name_pre = IWL6000_FW_PRE, 314 .fw_name_pre = IWL6000_FW_PRE,
187 .ucode_api_max = IWL6000_UCODE_API_MAX, 315 .ucode_api_max = IWL6000_UCODE_API_MAX,
188 .ucode_api_min = IWL6000_UCODE_API_MIN, 316 .ucode_api_min = IWL6000_UCODE_API_MIN,
189 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 317 .sku = IWL_SKU_A|IWL_SKU_G,
190 .ops = &iwl6000_ops, 318 .ops = &iwl6000_ops,
191 .eeprom_size = OTP_LOW_IMAGE_SIZE, 319 .eeprom_size = OTP_LOW_IMAGE_SIZE,
192 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 320 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
193 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 321 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
322 .num_of_queues = IWL50_NUM_QUEUES,
323 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
194 .mod_params = &iwl50_mod_params, 324 .mod_params = &iwl50_mod_params,
195 .valid_tx_ant = ANT_BC, 325 .valid_tx_ant = ANT_BC,
196 .valid_rx_ant = ANT_BC, 326 .valid_rx_ant = ANT_BC,
197 .need_pll_cfg = false, 327 .pll_cfg_val = 0,
328 .set_l0s = true,
329 .use_bsm = false,
198 .pa_type = IWL_PA_INTERNAL, 330 .pa_type = IWL_PA_INTERNAL,
199 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 331 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
200 .shadow_ram_support = true, 332 .shadow_ram_support = true,
201 .ht_greenfield_support = true, 333 .ht_greenfield_support = true,
202 .use_rts_for_ht = true, /* use rts/cts protection */ 334 .led_compensation = 51,
335 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
336 .supports_idle = true,
337 .adv_thermal_throttle = true,
338 .support_ct_kill_exit = true,
339};
340
341struct iwl_cfg iwl6000i_2bg_cfg = {
342 .name = "6000 Series 2x2 BG",
343 .fw_name_pre = IWL6000_FW_PRE,
344 .ucode_api_max = IWL6000_UCODE_API_MAX,
345 .ucode_api_min = IWL6000_UCODE_API_MIN,
346 .sku = IWL_SKU_G,
347 .ops = &iwl6000_ops,
348 .eeprom_size = OTP_LOW_IMAGE_SIZE,
349 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
350 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
351 .num_of_queues = IWL50_NUM_QUEUES,
352 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
353 .mod_params = &iwl50_mod_params,
354 .valid_tx_ant = ANT_BC,
355 .valid_rx_ant = ANT_BC,
356 .pll_cfg_val = 0,
357 .set_l0s = true,
358 .use_bsm = false,
359 .pa_type = IWL_PA_INTERNAL,
360 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
361 .shadow_ram_support = true,
362 .ht_greenfield_support = true,
363 .led_compensation = 51,
364 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
365 .supports_idle = true,
366 .adv_thermal_throttle = true,
367 .support_ct_kill_exit = true,
203}; 368};
204 369
205struct iwl_cfg iwl6050_2agn_cfg = { 370struct iwl_cfg iwl6050_2agn_cfg = {
@@ -208,61 +373,89 @@ struct iwl_cfg iwl6050_2agn_cfg = {
208 .ucode_api_max = IWL6050_UCODE_API_MAX, 373 .ucode_api_max = IWL6050_UCODE_API_MAX,
209 .ucode_api_min = IWL6050_UCODE_API_MIN, 374 .ucode_api_min = IWL6050_UCODE_API_MIN,
210 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 375 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
211 .ops = &iwl6000_ops, 376 .ops = &iwl6050_ops,
212 .eeprom_size = OTP_LOW_IMAGE_SIZE, 377 .eeprom_size = OTP_LOW_IMAGE_SIZE,
213 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 378 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
214 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 379 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
380 .num_of_queues = IWL50_NUM_QUEUES,
381 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
215 .mod_params = &iwl50_mod_params, 382 .mod_params = &iwl50_mod_params,
216 .valid_tx_ant = ANT_AB, 383 .valid_tx_ant = ANT_AB,
217 .valid_rx_ant = ANT_AB, 384 .valid_rx_ant = ANT_AB,
218 .need_pll_cfg = false, 385 .pll_cfg_val = 0,
386 .set_l0s = true,
387 .use_bsm = false,
219 .pa_type = IWL_PA_SYSTEM, 388 .pa_type = IWL_PA_SYSTEM,
220 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 389 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
221 .shadow_ram_support = true, 390 .shadow_ram_support = true,
222 .ht_greenfield_support = true, 391 .ht_greenfield_support = true,
392 .led_compensation = 51,
223 .use_rts_for_ht = true, /* use rts/cts protection */ 393 .use_rts_for_ht = true, /* use rts/cts protection */
394 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
395 .supports_idle = true,
396 .adv_thermal_throttle = true,
397 .support_ct_kill_exit = true,
398 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
224}; 399};
225 400
226struct iwl_cfg iwl6000_3agn_cfg = { 401struct iwl_cfg iwl6050_2abg_cfg = {
227 .name = "6000 Series 3x3 AGN", 402 .name = "6050 Series 2x2 ABG",
228 .fw_name_pre = IWL6000_FW_PRE, 403 .fw_name_pre = IWL6050_FW_PRE,
229 .ucode_api_max = IWL6000_UCODE_API_MAX, 404 .ucode_api_max = IWL6050_UCODE_API_MAX,
230 .ucode_api_min = IWL6000_UCODE_API_MIN, 405 .ucode_api_min = IWL6050_UCODE_API_MIN,
231 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 406 .sku = IWL_SKU_A|IWL_SKU_G,
232 .ops = &iwl6000_ops, 407 .ops = &iwl6050_ops,
233 .eeprom_size = OTP_LOW_IMAGE_SIZE, 408 .eeprom_size = OTP_LOW_IMAGE_SIZE,
234 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 409 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
235 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 410 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
411 .num_of_queues = IWL50_NUM_QUEUES,
412 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
236 .mod_params = &iwl50_mod_params, 413 .mod_params = &iwl50_mod_params,
237 .valid_tx_ant = ANT_ABC, 414 .valid_tx_ant = ANT_AB,
238 .valid_rx_ant = ANT_ABC, 415 .valid_rx_ant = ANT_AB,
239 .need_pll_cfg = false, 416 .pll_cfg_val = 0,
417 .set_l0s = true,
418 .use_bsm = false,
240 .pa_type = IWL_PA_SYSTEM, 419 .pa_type = IWL_PA_SYSTEM,
241 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 420 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
242 .shadow_ram_support = true, 421 .shadow_ram_support = true,
243 .ht_greenfield_support = true, 422 .ht_greenfield_support = true,
244 .use_rts_for_ht = true, /* use rts/cts protection */ 423 .led_compensation = 51,
424 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
425 .supports_idle = true,
426 .adv_thermal_throttle = true,
427 .support_ct_kill_exit = true,
245}; 428};
246 429
247struct iwl_cfg iwl6050_3agn_cfg = { 430struct iwl_cfg iwl6000_3agn_cfg = {
248 .name = "6050 Series 3x3 AGN", 431 .name = "6000 Series 3x3 AGN",
249 .fw_name_pre = IWL6050_FW_PRE, 432 .fw_name_pre = IWL6000_FW_PRE,
250 .ucode_api_max = IWL6050_UCODE_API_MAX, 433 .ucode_api_max = IWL6000_UCODE_API_MAX,
251 .ucode_api_min = IWL6050_UCODE_API_MIN, 434 .ucode_api_min = IWL6000_UCODE_API_MIN,
252 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 435 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
253 .ops = &iwl6000_ops, 436 .ops = &iwl6000_ops,
254 .eeprom_size = OTP_LOW_IMAGE_SIZE, 437 .eeprom_size = OTP_LOW_IMAGE_SIZE,
255 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 438 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
256 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 439 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
440 .num_of_queues = IWL50_NUM_QUEUES,
441 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
257 .mod_params = &iwl50_mod_params, 442 .mod_params = &iwl50_mod_params,
258 .valid_tx_ant = ANT_ABC, 443 .valid_tx_ant = ANT_ABC,
259 .valid_rx_ant = ANT_ABC, 444 .valid_rx_ant = ANT_ABC,
260 .need_pll_cfg = false, 445 .pll_cfg_val = 0,
446 .set_l0s = true,
447 .use_bsm = false,
261 .pa_type = IWL_PA_SYSTEM, 448 .pa_type = IWL_PA_SYSTEM,
262 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 449 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
263 .shadow_ram_support = true, 450 .shadow_ram_support = true,
264 .ht_greenfield_support = true, 451 .ht_greenfield_support = true,
452 .led_compensation = 51,
265 .use_rts_for_ht = true, /* use rts/cts protection */ 453 .use_rts_for_ht = true, /* use rts/cts protection */
454 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
455 .supports_idle = true,
456 .adv_thermal_throttle = true,
457 .support_ct_kill_exit = true,
458 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
266}; 459};
267 460
268MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 461MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
new file mode 100644
index 000000000000..3bccba20f6da
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -0,0 +1,85 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-agn-led.h"
45
46/* Send led command */
47static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
48{
49 struct iwl_host_cmd cmd = {
50 .id = REPLY_LEDS_CMD,
51 .len = sizeof(struct iwl_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56 u32 reg;
57
58 reg = iwl_read32(priv, CSR_LED_REG);
59 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
60 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
61
62 return iwl_send_cmd(priv, &cmd);
63}
64
65/* Set led register off */
66static int iwl_led_on_reg(struct iwl_priv *priv)
67{
68 IWL_DEBUG_LED(priv, "led on\n");
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70 return 0;
71}
72
73/* Set led register off */
74static int iwl_led_off_reg(struct iwl_priv *priv)
75{
76 IWL_DEBUG_LED(priv, "LED Reg off\n");
77 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
78 return 0;
79}
80
81const struct iwl_led_ops iwlagn_led_ops = {
82 .cmd = iwl_send_led_cmd,
83 .on = iwl_led_on_reg,
84 .off = iwl_led_off_reg,
85};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
new file mode 100644
index 000000000000..ab55f92a161d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -0,0 +1,32 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_agn_led_h__
28#define __iwl_agn_led_h__
29
30extern const struct iwl_led_ops iwlagn_led_ops;
31
32#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 81726ee32858..fe511cbf012e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -75,104 +75,6 @@ static const u8 ant_toggle_lookup[] = {
75 /*ANT_ABC -> */ ANT_ABC, 75 /*ANT_ABC -> */ ANT_ABC,
76}; 76};
77 77
78/**
79 * struct iwl_rate_scale_data -- tx success history for one rate
80 */
81struct iwl_rate_scale_data {
82 u64 data; /* bitmap of successful frames */
83 s32 success_counter; /* number of frames successful */
84 s32 success_ratio; /* per-cent * 128 */
85 s32 counter; /* number of frames attempted */
86 s32 average_tpt; /* success ratio * expected throughput */
87 unsigned long stamp;
88};
89
90/**
91 * struct iwl_scale_tbl_info -- tx params and success history for all rates
92 *
93 * There are two of these in struct iwl_lq_sta,
94 * one for "active", and one for "search".
95 */
96struct iwl_scale_tbl_info {
97 enum iwl_table_type lq_type;
98 u8 ant_type;
99 u8 is_SGI; /* 1 = short guard interval */
100 u8 is_ht40; /* 1 = 40 MHz channel width */
101 u8 is_dup; /* 1 = duplicated data streams */
102 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
103 u8 max_search; /* maximun number of tables we can search */
104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
105 u32 current_rate; /* rate_n_flags, uCode API format */
106 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
107};
108
109struct iwl_traffic_load {
110 unsigned long time_stamp; /* age of the oldest statistics */
111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
112 * slice */
113 u32 total; /* total num of packets during the
114 * last TID_MAX_TIME_DIFF */
115 u8 queue_count; /* number of queues that has
116 * been used since the last cleanup */
117 u8 head; /* start of the circular buffer */
118};
119
120/**
121 * struct iwl_lq_sta -- driver's rate scaling private structure
122 *
123 * Pointer to this gets passed back and forth between driver and mac80211.
124 */
125struct iwl_lq_sta {
126 u8 active_tbl; /* index of active table, range 0-1 */
127 u8 enable_counter; /* indicates HT mode */
128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
129 u8 search_better_tbl; /* 1: currently trying alternate mode */
130 s32 last_tpt;
131
132 /* The following determine when to search for a new mode */
133 u32 table_count_limit;
134 u32 max_failure_limit; /* # failed frames before new search */
135 u32 max_success_limit; /* # successful frames before new search */
136 u32 table_count;
137 u32 total_failed; /* total failed frames, any/all rates */
138 u32 total_success; /* total successful frames, any/all rates */
139 u64 flush_timer; /* time staying in mode before new search */
140
141 u8 action_counter; /* # mode-switch actions tried */
142 u8 is_green;
143 u8 is_dup;
144 enum ieee80211_band band;
145 u8 ibss_sta_added;
146
147 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
148 u32 supp_rates;
149 u16 active_legacy_rate;
150 u16 active_siso_rate;
151 u16 active_mimo2_rate;
152 u16 active_mimo3_rate;
153 u16 active_rate_basic;
154 s8 max_rate_idx; /* Max rate set by user */
155 u8 missed_rate_counter;
156
157 struct iwl_link_quality_cmd lq;
158 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
159 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
160 u8 tx_agg_tid_en;
161#ifdef CONFIG_MAC80211_DEBUGFS
162 struct dentry *rs_sta_dbgfs_scale_table_file;
163 struct dentry *rs_sta_dbgfs_stats_table_file;
164 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
165 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
166 u32 dbg_fixed_rate;
167#endif
168 struct iwl_priv *drv;
169
170 /* used to be in sta_info */
171 int last_txrate_idx;
172 /* last tx rate_n_flags */
173 u32 last_rate_n_flags;
174};
175
176static void rs_rate_scale_perform(struct iwl_priv *priv, 78static void rs_rate_scale_perform(struct iwl_priv *priv,
177 struct sk_buff *skb, 79 struct sk_buff *skb,
178 struct ieee80211_sta *sta, 80 struct ieee80211_sta *sta,
@@ -190,84 +92,78 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
190{} 92{}
191#endif 93#endif
192 94
193/* 95/**
194 * Expected throughput metrics for following rates: 96 * The following tables contain the expected throughput metrics for all rates
195 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits 97 *
196 * "G" is the only table that supports CCK (the first 4 rates). 98 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
99 *
100 * where invalid entries are zeros.
101 *
102 * CCK rates are only valid in legacy table and will only be used in G
103 * (2.4 GHz) band.
197 */ 104 */
198 105
199static s32 expected_tpt_A[IWL_RATE_COUNT] = { 106static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
200 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 107 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
201};
202
203static s32 expected_tpt_G[IWL_RATE_COUNT] = {
204 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 186
205};
206
207static s32 expected_tpt_siso20MHz[IWL_RATE_COUNT] = {
208 0, 0, 0, 0, 42, 42, 76, 102, 124, 159, 183, 193, 202
209};
210
211static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = {
212 0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211
213};
214
215static s32 expected_tpt_mimo2_20MHz[IWL_RATE_COUNT] = {
216 0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251
217};
218
219static s32 expected_tpt_mimo2_20MHzSGI[IWL_RATE_COUNT] = {
220 0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257
221}; 108};
222 109
223static s32 expected_tpt_siso40MHz[IWL_RATE_COUNT] = { 110static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
224 0, 0, 0, 0, 77, 77, 127, 160, 184, 220, 242, 250, 257 111 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
112 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
113 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
114 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
225}; 115};
226 116
227static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = { 117static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
228 0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264 118 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
119 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
120 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
121 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
229}; 122};
230 123
231static s32 expected_tpt_mimo2_40MHz[IWL_RATE_COUNT] = { 124static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
232 0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289 125 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
126 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
127 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
128 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
233}; 129};
234 130
235static s32 expected_tpt_mimo2_40MHzSGI[IWL_RATE_COUNT] = { 131static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
236 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 132 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
133 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
134 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
135 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
237}; 136};
238 137
239/* Expected throughput metric MIMO3 */ 138static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
240static s32 expected_tpt_mimo3_20MHz[IWL_RATE_COUNT] = { 139 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
241 0, 0, 0, 0, 99, 99, 153, 186, 208, 239, 256, 263, 268 140 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
141 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
142 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
242}; 143};
243 144
244static s32 expected_tpt_mimo3_20MHzSGI[IWL_RATE_COUNT] = { 145static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
245 0, 0, 0, 0, 106, 106, 162, 194, 215, 246, 262, 268, 273 146 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
246}; 147 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
247 148 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
248static s32 expected_tpt_mimo3_40MHz[IWL_RATE_COUNT] = { 149 {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
249 0, 0, 0, 0, 152, 152, 211, 239, 255, 279, 290, 294, 297
250};
251
252static s32 expected_tpt_mimo3_40MHzSGI[IWL_RATE_COUNT] = {
253 0, 0, 0, 0, 160, 160, 219, 245, 261, 284, 294, 297, 300
254}; 150};
255 151
256/* mbps, mcs */ 152/* mbps, mcs */
257const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { 153const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
258 {"1", ""}, 154 { "1", "BPSK DSSS"},
259 {"2", ""}, 155 { "2", "QPSK DSSS"},
260 {"5.5", ""}, 156 {"5.5", "BPSK CCK"},
261 {"11", ""}, 157 { "11", "QPSK CCK"},
262 {"6", "BPSK 1/2"}, 158 { "6", "BPSK 1/2"},
263 {"9", "BPSK 1/2"}, 159 { "9", "BPSK 1/2"},
264 {"12", "QPSK 1/2"}, 160 { "12", "QPSK 1/2"},
265 {"18", "QPSK 3/4"}, 161 { "18", "QPSK 3/4"},
266 {"24", "16QAM 1/2"}, 162 { "24", "16QAM 1/2"},
267 {"36", "16QAM 3/4"}, 163 { "36", "16QAM 3/4"},
268 {"48", "64QAM 2/3"}, 164 { "48", "64QAM 2/3"},
269 {"54", "64QAM 3/4"}, 165 { "54", "64QAM 3/4"},
270 {"60", "64QAM 5/6"} 166 { "60", "64QAM 5/6"},
271}; 167};
272 168
273#define MCS_INDEX_PER_STREAM (8) 169#define MCS_INDEX_PER_STREAM (8)
@@ -405,7 +301,7 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
405 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 301 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
406 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 302 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
407 sta->addr, tid); 303 sta->addr, tid);
408 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid); 304 ieee80211_start_tx_ba_session(sta, tid);
409 } 305 }
410} 306}
411 307
@@ -444,7 +340,7 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
444 * packets. 340 * packets.
445 */ 341 */
446static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, 342static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
447 int scale_index, s32 tpt, int retries, 343 int scale_index, s32 tpt, int attempts,
448 int successes) 344 int successes)
449{ 345{
450 struct iwl_rate_scale_data *window = NULL; 346 struct iwl_rate_scale_data *window = NULL;
@@ -454,7 +350,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
454 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 350 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
455 return -EINVAL; 351 return -EINVAL;
456 352
457 /* Select data for current tx bit rate */ 353 /* Select window for current tx bit rate */
458 window = &(windows[scale_index]); 354 window = &(windows[scale_index]);
459 355
460 /* 356 /*
@@ -465,7 +361,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
465 * subtract "1" from the success counter (this is the main reason 361 * subtract "1" from the success counter (this is the main reason
466 * we keep these bitmaps!). 362 * we keep these bitmaps!).
467 */ 363 */
468 while (retries > 0) { 364 while (attempts > 0) {
469 if (window->counter >= IWL_RATE_MAX_WINDOW) { 365 if (window->counter >= IWL_RATE_MAX_WINDOW) {
470 366
471 /* remove earliest */ 367 /* remove earliest */
@@ -480,17 +376,17 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
480 /* Increment frames-attempted counter */ 376 /* Increment frames-attempted counter */
481 window->counter++; 377 window->counter++;
482 378
483 /* Shift bitmap by one frame (throw away oldest history), 379 /* Shift bitmap by one frame to throw away oldest history */
484 * OR in "1", and increment "success" if this
485 * frame was successful. */
486 window->data <<= 1; 380 window->data <<= 1;
381
382 /* Mark the most recent #successes attempts as successful */
487 if (successes > 0) { 383 if (successes > 0) {
488 window->success_counter++; 384 window->success_counter++;
489 window->data |= 0x1; 385 window->data |= 0x1;
490 successes--; 386 successes--;
491 } 387 }
492 388
493 retries--; 389 attempts--;
494 } 390 }
495 391
496 /* Calculate current success ratio, avoid divide-by-0! */ 392 /* Calculate current success ratio, avoid divide-by-0! */
@@ -671,7 +567,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
671 * there are no non-GF stations present in the BSS. 567 * there are no non-GF stations present in the BSS.
672 */ 568 */
673static inline u8 rs_use_green(struct ieee80211_sta *sta, 569static inline u8 rs_use_green(struct ieee80211_sta *sta,
674 struct iwl_ht_info *ht_conf) 570 struct iwl_ht_config *ht_conf)
675{ 571{
676 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && 572 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
677 !(ht_conf->non_GF_STA_present); 573 !(ht_conf->non_GF_STA_present);
@@ -821,27 +717,45 @@ out:
821} 717}
822 718
823/* 719/*
720 * Simple function to compare two rate scale table types
721 */
722static bool table_type_matches(struct iwl_scale_tbl_info *a,
723 struct iwl_scale_tbl_info *b)
724{
725 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
726 (a->is_SGI == b->is_SGI);
727}
728/*
729 * Static function to get the expected throughput from an iwl_scale_tbl_info
730 * that wraps a NULL pointer check
731 */
732static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
733{
734 if (tbl->expected_tpt)
735 return tbl->expected_tpt[rs_index];
736 return 0;
737}
738
739/*
824 * mac80211 sends us Tx status 740 * mac80211 sends us Tx status
825 */ 741 */
826static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, 742static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
827 struct ieee80211_sta *sta, void *priv_sta, 743 struct ieee80211_sta *sta, void *priv_sta,
828 struct sk_buff *skb) 744 struct sk_buff *skb)
829{ 745{
830 int status; 746 int legacy_success;
831 u8 retries; 747 int retries;
832 int rs_index, mac_index, index = 0; 748 int rs_index, mac_index, i;
833 struct iwl_lq_sta *lq_sta = priv_sta; 749 struct iwl_lq_sta *lq_sta = priv_sta;
834 struct iwl_link_quality_cmd *table; 750 struct iwl_link_quality_cmd *table;
835 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 751 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
836 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 752 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
837 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 753 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
838 struct iwl_rate_scale_data *window = NULL; 754 struct iwl_rate_scale_data *window = NULL;
839 struct iwl_rate_scale_data *search_win = NULL;
840 enum mac80211_rate_control_flags mac_flags; 755 enum mac80211_rate_control_flags mac_flags;
841 u32 tx_rate; 756 u32 tx_rate;
842 struct iwl_scale_tbl_info tbl_type; 757 struct iwl_scale_tbl_info tbl_type;
843 struct iwl_scale_tbl_info *curr_tbl, *search_tbl; 758 struct iwl_scale_tbl_info *curr_tbl, *other_tbl;
844 u8 active_index = 0;
845 s32 tpt = 0; 759 s32 tpt = 0;
846 760
847 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 761 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -850,30 +764,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
850 info->flags & IEEE80211_TX_CTL_NO_ACK) 764 info->flags & IEEE80211_TX_CTL_NO_ACK)
851 return; 765 return;
852 766
853 /* This packet was aggregated but doesn't carry rate scale info */ 767 /* This packet was aggregated but doesn't carry status info */
854 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && 768 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
855 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 769 !(info->flags & IEEE80211_TX_STAT_AMPDU))
856 return; 770 return;
857 771
858 if (info->flags & IEEE80211_TX_STAT_AMPDU)
859 retries = 0;
860 else
861 retries = info->status.rates[0].count - 1;
862
863 if (retries > 15)
864 retries = 15;
865
866 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 772 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
867 !lq_sta->ibss_sta_added) 773 !lq_sta->ibss_sta_added)
868 goto out; 774 return;
869
870 table = &lq_sta->lq;
871 active_index = lq_sta->active_tbl;
872
873 curr_tbl = &(lq_sta->lq_info[active_index]);
874 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
875 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
876 search_win = (struct iwl_rate_scale_data *)&(search_tbl->win[0]);
877 775
878 /* 776 /*
879 * Ignore this Tx frame response if its initial rate doesn't match 777 * Ignore this Tx frame response if its initial rate doesn't match
@@ -883,6 +781,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
883 * to check "search" mode, or a prior "search" mode after we've moved 781 * to check "search" mode, or a prior "search" mode after we've moved
884 * to a new "search" mode (which might become the new "active" mode). 782 * to a new "search" mode (which might become the new "active" mode).
885 */ 783 */
784 table = &lq_sta->lq;
886 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 785 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
887 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); 786 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
888 if (priv->band == IEEE80211_BAND_5GHZ) 787 if (priv->band == IEEE80211_BAND_5GHZ)
@@ -901,7 +800,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
901 if (priv->band == IEEE80211_BAND_2GHZ) 800 if (priv->band == IEEE80211_BAND_2GHZ)
902 mac_index += IWL_FIRST_OFDM_RATE; 801 mac_index += IWL_FIRST_OFDM_RATE;
903 } 802 }
904 803 /* Here we actually compare this rate to the latest LQ command */
905 if ((mac_index < 0) || 804 if ((mac_index < 0) ||
906 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 805 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
907 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || 806 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
@@ -911,124 +810,106 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
911 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || 810 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
912 (rs_index != mac_index)) { 811 (rs_index != mac_index)) {
913 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate); 812 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
914 /* the last LQ command could failed so the LQ in ucode not 813 /*
915 * the same in driver sync up 814 * Since rates mis-match, the last LQ command may have failed.
815 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
816 * ... driver.
916 */ 817 */
917 lq_sta->missed_rate_counter++; 818 lq_sta->missed_rate_counter++;
918 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 819 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
919 lq_sta->missed_rate_counter = 0; 820 lq_sta->missed_rate_counter = 0;
920 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 821 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
921 } 822 }
922 goto out; 823 /* Regardless, ignore this status info for outdated rate */
824 return;
825 } else
826 /* Rate did match, so reset the missed_rate_counter */
827 lq_sta->missed_rate_counter = 0;
828
829 /* Figure out if rate scale algorithm is in active or search table */
830 if (table_type_matches(&tbl_type,
831 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
832 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
833 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
834 } else if (table_type_matches(&tbl_type,
835 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
836 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
837 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
838 } else {
839 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
840 return;
923 } 841 }
842 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
924 843
925 lq_sta->missed_rate_counter = 0; 844 /*
926 /* Update frame history window with "failure" for each Tx retry. */ 845 * Updating the frame history depends on whether packets were
927 while (retries) { 846 * aggregated.
928 /* Look up the rate and other info used for each tx attempt. 847 *
929 * Each tx attempt steps one entry deeper in the rate table. */ 848 * For aggregation, all packets were transmitted at the same rate, the
930 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags); 849 * first index into rate scale table.
931 rs_get_tbl_info_from_mcs(tx_rate, priv->band, 850 */
932 &tbl_type, &rs_index); 851 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
933 852 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
934 /* If type matches "search" table, 853 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
935 * add failure to "search" history */ 854 &rs_index);
936 if ((tbl_type.lq_type == search_tbl->lq_type) && 855 tpt = get_expected_tpt(curr_tbl, rs_index);
937 (tbl_type.ant_type == search_tbl->ant_type) && 856 rs_collect_tx_data(window, rs_index, tpt,
938 (tbl_type.is_SGI == search_tbl->is_SGI)) { 857 info->status.ampdu_ack_len,
939 if (search_tbl->expected_tpt) 858 info->status.ampdu_ack_map);
940 tpt = search_tbl->expected_tpt[rs_index]; 859
941 else 860 /* Update success/fail counts if not searching for new mode */
942 tpt = 0; 861 if (lq_sta->stay_in_tbl) {
943 rs_collect_tx_data(search_win, rs_index, tpt, 1, 0); 862 lq_sta->total_success += info->status.ampdu_ack_map;
944 863 lq_sta->total_failed += (info->status.ampdu_ack_len -
945 /* Else if type matches "current/active" table, 864 info->status.ampdu_ack_map);
946 * add failure to "current/active" history */
947 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
948 (tbl_type.ant_type == curr_tbl->ant_type) &&
949 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
950 if (curr_tbl->expected_tpt)
951 tpt = curr_tbl->expected_tpt[rs_index];
952 else
953 tpt = 0;
954 rs_collect_tx_data(window, rs_index, tpt, 1, 0);
955 } 865 }
956 866 } else {
957 /* If not searching for a new mode, increment failed counter
958 * ... this helps determine when to start searching again */
959 if (lq_sta->stay_in_tbl)
960 lq_sta->total_failed++;
961 --retries;
962 index++;
963
964 }
965
966 /* 867 /*
967 * Find (by rate) the history window to update with final Tx attempt; 868 * For legacy, update frame history with for each Tx retry.
968 * if Tx was successful first try, use original rate,
969 * else look up the rate that was, finally, successful.
970 */ 869 */
971 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags); 870 retries = info->status.rates[0].count - 1;
972 lq_sta->last_rate_n_flags = tx_rate; 871 /* HW doesn't send more than 15 retries */
973 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); 872 retries = min(retries, 15);
974 873
975 /* Update frame history window with "success" if Tx got ACKed ... */ 874 /* The last transmission may have been successful */
976 status = !!(info->flags & IEEE80211_TX_STAT_ACK); 875 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
977 876 /* Collect data for each rate used during failed TX attempts */
978 /* If type matches "search" table, 877 for (i = 0; i <= retries; ++i) {
979 * add final tx status to "search" history */ 878 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
980 if ((tbl_type.lq_type == search_tbl->lq_type) && 879 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
981 (tbl_type.ant_type == search_tbl->ant_type) && 880 &tbl_type, &rs_index);
982 (tbl_type.is_SGI == search_tbl->is_SGI)) { 881 /*
983 if (search_tbl->expected_tpt) 882 * Only collect stats if retried rate is in the same RS
984 tpt = search_tbl->expected_tpt[rs_index]; 883 * table as active/search.
985 else 884 */
986 tpt = 0; 885 if (table_type_matches(&tbl_type, curr_tbl))
987 if (info->flags & IEEE80211_TX_STAT_AMPDU) 886 tpt = get_expected_tpt(curr_tbl, rs_index);
988 rs_collect_tx_data(search_win, rs_index, tpt, 887 else if (table_type_matches(&tbl_type, other_tbl))
989 info->status.ampdu_ack_len, 888 tpt = get_expected_tpt(other_tbl, rs_index);
990 info->status.ampdu_ack_map); 889 else
991 else 890 continue;
992 rs_collect_tx_data(search_win, rs_index, tpt,
993 1, status);
994 /* Else if type matches "current/active" table,
995 * add final tx status to "current/active" history */
996 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
997 (tbl_type.ant_type == curr_tbl->ant_type) &&
998 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
999 if (curr_tbl->expected_tpt)
1000 tpt = curr_tbl->expected_tpt[rs_index];
1001 else
1002 tpt = 0;
1003 if (info->flags & IEEE80211_TX_STAT_AMPDU)
1004 rs_collect_tx_data(window, rs_index, tpt,
1005 info->status.ampdu_ack_len,
1006 info->status.ampdu_ack_map);
1007 else
1008 rs_collect_tx_data(window, rs_index, tpt,
1009 1, status);
1010 }
1011 891
1012 /* If not searching for new mode, increment success/failed counter 892 /* Constants mean 1 transmission, 0 successes */
1013 * ... these help determine when to start searching again */ 893 if (i < retries)
1014 if (lq_sta->stay_in_tbl) { 894 rs_collect_tx_data(window, rs_index, tpt, 1,
1015 if (info->flags & IEEE80211_TX_STAT_AMPDU) { 895 0);
1016 lq_sta->total_success += info->status.ampdu_ack_map;
1017 lq_sta->total_failed +=
1018 (info->status.ampdu_ack_len - info->status.ampdu_ack_map);
1019 } else {
1020 if (status)
1021 lq_sta->total_success++;
1022 else 896 else
1023 lq_sta->total_failed++; 897 rs_collect_tx_data(window, rs_index, tpt, 1,
898 legacy_success);
899 }
900
901 /* Update success/fail counts if not searching for new mode */
902 if (lq_sta->stay_in_tbl) {
903 lq_sta->total_success += legacy_success;
904 lq_sta->total_failed += retries + (1 - legacy_success);
1024 } 905 }
1025 } 906 }
907 /* The last TX rate is cached in lq_sta; it's set in if/else above */
908 lq_sta->last_rate_n_flags = tx_rate;
1026 909
1027 /* See if there's a better rate or modulation mode to try. */ 910 /* See if there's a better rate or modulation mode to try. */
1028 if (sta && sta->supp_rates[sband->band]) 911 if (sta && sta->supp_rates[sband->band])
1029 rs_rate_scale_perform(priv, skb, sta, lq_sta); 912 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1030out:
1031 return;
1032} 913}
1033 914
1034/* 915/*
@@ -1066,43 +947,45 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1066static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, 947static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1067 struct iwl_scale_tbl_info *tbl) 948 struct iwl_scale_tbl_info *tbl)
1068{ 949{
950 /* Used to choose among HT tables */
951 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
952
953 /* Check for invalid LQ type */
954 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
955 tbl->expected_tpt = expected_tpt_legacy;
956 return;
957 }
958
959 /* Legacy rates have only one table */
1069 if (is_legacy(tbl->lq_type)) { 960 if (is_legacy(tbl->lq_type)) {
1070 if (!is_a_band(tbl->lq_type)) 961 tbl->expected_tpt = expected_tpt_legacy;
1071 tbl->expected_tpt = expected_tpt_G; 962 return;
1072 else 963 }
1073 tbl->expected_tpt = expected_tpt_A; 964
1074 } else if (is_siso(tbl->lq_type)) { 965 /* Choose among many HT tables depending on number of streams
1075 if (tbl->is_ht40 && !lq_sta->is_dup) 966 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
1076 if (tbl->is_SGI) 967 * status */
1077 tbl->expected_tpt = expected_tpt_siso40MHzSGI; 968 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1078 else 969 ht_tbl_pointer = expected_tpt_siso20MHz;
1079 tbl->expected_tpt = expected_tpt_siso40MHz; 970 else if (is_siso(tbl->lq_type))
1080 else if (tbl->is_SGI) 971 ht_tbl_pointer = expected_tpt_siso40MHz;
1081 tbl->expected_tpt = expected_tpt_siso20MHzSGI; 972 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1082 else 973 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1083 tbl->expected_tpt = expected_tpt_siso20MHz; 974 else if (is_mimo2(tbl->lq_type))
1084 } else if (is_mimo2(tbl->lq_type)) { 975 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1085 if (tbl->is_ht40 && !lq_sta->is_dup) 976 else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1086 if (tbl->is_SGI) 977 ht_tbl_pointer = expected_tpt_mimo3_20MHz;
1087 tbl->expected_tpt = expected_tpt_mimo2_40MHzSGI; 978 else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
1088 else 979 ht_tbl_pointer = expected_tpt_mimo3_40MHz;
1089 tbl->expected_tpt = expected_tpt_mimo2_40MHz; 980
1090 else if (tbl->is_SGI) 981 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1091 tbl->expected_tpt = expected_tpt_mimo2_20MHzSGI; 982 tbl->expected_tpt = ht_tbl_pointer[0];
1092 else 983 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1093 tbl->expected_tpt = expected_tpt_mimo2_20MHz; 984 tbl->expected_tpt = ht_tbl_pointer[1];
1094 } else if (is_mimo3(tbl->lq_type)) { 985 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1095 if (tbl->is_ht40 && !lq_sta->is_dup) 986 tbl->expected_tpt = ht_tbl_pointer[2];
1096 if (tbl->is_SGI) 987 else /* AGG+SGI */
1097 tbl->expected_tpt = expected_tpt_mimo3_40MHzSGI; 988 tbl->expected_tpt = ht_tbl_pointer[3];
1098 else
1099 tbl->expected_tpt = expected_tpt_mimo3_40MHz;
1100 else if (tbl->is_SGI)
1101 tbl->expected_tpt = expected_tpt_mimo3_20MHzSGI;
1102 else
1103 tbl->expected_tpt = expected_tpt_mimo3_20MHz;
1104 } else
1105 tbl->expected_tpt = expected_tpt_G;
1106} 989}
1107 990
1108/* 991/*
@@ -2077,6 +1960,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2077 lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; 1960 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2078 1961
2079 tid = rs_tl_add_packet(lq_sta, hdr); 1962 tid = rs_tl_add_packet(lq_sta, hdr);
1963 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1964 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1965 if (tid_data->agg.state == IWL_AGG_OFF)
1966 lq_sta->is_agg = 0;
1967 else
1968 lq_sta->is_agg = 1;
1969 } else
1970 lq_sta->is_agg = 0;
2080 1971
2081 /* 1972 /*
2082 * Select rate-scale / modulation-mode table to work with in 1973 * Select rate-scale / modulation-mode table to work with in
@@ -2177,10 +2068,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2177 2068
2178 goto out; 2069 goto out;
2179 } 2070 }
2180
2181 /* Else we have enough samples; calculate estimate of 2071 /* Else we have enough samples; calculate estimate of
2182 * actual average throughput */ 2072 * actual average throughput */
2183 2073
2074 /* Sanity-check TPT calculations */
2184 BUG_ON(window->average_tpt != ((window->success_ratio * 2075 BUG_ON(window->average_tpt != ((window->success_ratio *
2185 tbl->expected_tpt[index] + 64) / 128)); 2076 tbl->expected_tpt[index] + 64) / 128));
2186 2077
@@ -2584,22 +2475,13 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2584 gfp_t gfp) 2475 gfp_t gfp)
2585{ 2476{
2586 struct iwl_lq_sta *lq_sta; 2477 struct iwl_lq_sta *lq_sta;
2478 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2587 struct iwl_priv *priv; 2479 struct iwl_priv *priv;
2588 int i, j;
2589 2480
2590 priv = (struct iwl_priv *)priv_rate; 2481 priv = (struct iwl_priv *)priv_rate;
2591 IWL_DEBUG_RATE(priv, "create station rate scale window\n"); 2482 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2592 2483
2593 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp); 2484 lq_sta = &sta_priv->lq_sta;
2594
2595 if (lq_sta == NULL)
2596 return NULL;
2597 lq_sta->lq.sta_id = 0xff;
2598
2599
2600 for (j = 0; j < LQ_SIZE; j++)
2601 for (i = 0; i < IWL_RATE_COUNT; i++)
2602 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2603 2485
2604 return lq_sta; 2486 return lq_sta;
2605} 2487}
@@ -2613,6 +2495,12 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2613 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2495 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2614 struct iwl_lq_sta *lq_sta = priv_sta; 2496 struct iwl_lq_sta *lq_sta = priv_sta;
2615 2497
2498 lq_sta->lq.sta_id = 0xff;
2499
2500 for (j = 0; j < LQ_SIZE; j++)
2501 for (i = 0; i < IWL_RATE_COUNT; i++)
2502 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2503
2616 lq_sta->flush_timer = 0; 2504 lq_sta->flush_timer = 0;
2617 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2505 lq_sta->supp_rates = sta->supp_rates[sband->band];
2618 for (j = 0; j < LQ_SIZE; j++) 2506 for (j = 0; j < LQ_SIZE; j++)
@@ -2690,6 +2578,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2690 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); 2578 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2691 if (sband->band == IEEE80211_BAND_5GHZ) 2579 if (sband->band == IEEE80211_BAND_5GHZ)
2692 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2580 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2581 lq_sta->is_agg = 0;
2693 2582
2694 rs_initialize_lq(priv, conf, sta, lq_sta); 2583 rs_initialize_lq(priv, conf, sta, lq_sta);
2695} 2584}
@@ -2808,7 +2697,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2808 repeat_rate--; 2697 repeat_rate--;
2809 } 2698 }
2810 2699
2811 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX; 2700 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2812 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 2701 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2813 lq_cmd->agg_params.agg_time_limit = 2702 lq_cmd->agg_params.agg_time_limit =
2814 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 2703 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
@@ -2827,11 +2716,9 @@ static void rs_free(void *priv_rate)
2827static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, 2716static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2828 void *priv_sta) 2717 void *priv_sta)
2829{ 2718{
2830 struct iwl_lq_sta *lq_sta = priv_sta;
2831 struct iwl_priv *priv __maybe_unused = priv_r; 2719 struct iwl_priv *priv __maybe_unused = priv_r;
2832 2720
2833 IWL_DEBUG_RATE(priv, "enter\n"); 2721 IWL_DEBUG_RATE(priv, "enter\n");
2834 kfree(lq_sta);
2835 IWL_DEBUG_RATE(priv, "leave\n"); 2722 IWL_DEBUG_RATE(priv, "leave\n");
2836} 2723}
2837 2724
@@ -2942,8 +2829,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2942 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); 2829 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
2943 desc += sprintf(buff+desc, " %s", 2830 desc += sprintf(buff+desc, " %s",
2944 (tbl->is_ht40) ? "40MHz" : "20MHz"); 2831 (tbl->is_ht40) ? "40MHz" : "20MHz");
2945 desc += sprintf(buff+desc, " %s %s\n", (tbl->is_SGI) ? "SGI" : "", 2832 desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
2946 (lq_sta->is_green) ? "GF enabled" : ""); 2833 (lq_sta->is_green) ? "GF enabled" : "",
2834 (lq_sta->is_agg) ? "AGG on" : "");
2947 } 2835 }
2948 desc += sprintf(buff+desc, "last tx rate=0x%X\n", 2836 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2949 lq_sta->last_rate_n_flags); 2837 lq_sta->last_rate_n_flags);
@@ -3076,16 +2964,16 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
3076{ 2964{
3077 struct iwl_lq_sta *lq_sta = priv_sta; 2965 struct iwl_lq_sta *lq_sta = priv_sta;
3078 lq_sta->rs_sta_dbgfs_scale_table_file = 2966 lq_sta->rs_sta_dbgfs_scale_table_file =
3079 debugfs_create_file("rate_scale_table", 0600, dir, 2967 debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
3080 lq_sta, &rs_sta_dbgfs_scale_table_ops); 2968 lq_sta, &rs_sta_dbgfs_scale_table_ops);
3081 lq_sta->rs_sta_dbgfs_stats_table_file = 2969 lq_sta->rs_sta_dbgfs_stats_table_file =
3082 debugfs_create_file("rate_stats_table", 0600, dir, 2970 debugfs_create_file("rate_stats_table", S_IRUSR, dir,
3083 lq_sta, &rs_sta_dbgfs_stats_table_ops); 2971 lq_sta, &rs_sta_dbgfs_stats_table_ops);
3084 lq_sta->rs_sta_dbgfs_rate_scale_data_file = 2972 lq_sta->rs_sta_dbgfs_rate_scale_data_file =
3085 debugfs_create_file("rate_scale_data", 0600, dir, 2973 debugfs_create_file("rate_scale_data", S_IRUSR, dir,
3086 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops); 2974 lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
3087 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = 2975 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
3088 debugfs_create_u8("tx_agg_tid_enable", 0600, dir, 2976 debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
3089 &lq_sta->tx_agg_tid_en); 2977 &lq_sta->tx_agg_tid_en);
3090 2978
3091} 2979}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9fac530cfb7e..affc0c5a2f2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -54,6 +54,7 @@ struct iwl3945_rate_info {
54 u8 prev_table_rs; /* prev in rate table cmd */ 54 u8 prev_table_rs; /* prev in rate table cmd */
55}; 55};
56 56
57
57/* 58/*
58 * These serve as indexes into 59 * These serve as indexes into
59 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 60 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -335,6 +336,106 @@ struct iwl_rate_mcs_info {
335 char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; 336 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
336}; 337};
337 338
339/**
340 * struct iwl_rate_scale_data -- tx success history for one rate
341 */
342struct iwl_rate_scale_data {
343 u64 data; /* bitmap of successful frames */
344 s32 success_counter; /* number of frames successful */
345 s32 success_ratio; /* per-cent * 128 */
346 s32 counter; /* number of frames attempted */
347 s32 average_tpt; /* success ratio * expected throughput */
348 unsigned long stamp;
349};
350
351/**
352 * struct iwl_scale_tbl_info -- tx params and success history for all rates
353 *
354 * There are two of these in struct iwl_lq_sta,
355 * one for "active", and one for "search".
356 */
357struct iwl_scale_tbl_info {
358 enum iwl_table_type lq_type;
359 u8 ant_type;
360 u8 is_SGI; /* 1 = short guard interval */
361 u8 is_ht40; /* 1 = 40 MHz channel width */
362 u8 is_dup; /* 1 = duplicated data streams */
363 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
364 u8 max_search; /* maximun number of tables we can search */
365 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
366 u32 current_rate; /* rate_n_flags, uCode API format */
367 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
368};
369
370struct iwl_traffic_load {
371 unsigned long time_stamp; /* age of the oldest statistics */
372 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
373 * slice */
374 u32 total; /* total num of packets during the
375 * last TID_MAX_TIME_DIFF */
376 u8 queue_count; /* number of queues that has
377 * been used since the last cleanup */
378 u8 head; /* start of the circular buffer */
379};
380
381/**
382 * struct iwl_lq_sta -- driver's rate scaling private structure
383 *
384 * Pointer to this gets passed back and forth between driver and mac80211.
385 */
386struct iwl_lq_sta {
387 u8 active_tbl; /* index of active table, range 0-1 */
388 u8 enable_counter; /* indicates HT mode */
389 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
390 u8 search_better_tbl; /* 1: currently trying alternate mode */
391 s32 last_tpt;
392
393 /* The following determine when to search for a new mode */
394 u32 table_count_limit;
395 u32 max_failure_limit; /* # failed frames before new search */
396 u32 max_success_limit; /* # successful frames before new search */
397 u32 table_count;
398 u32 total_failed; /* total failed frames, any/all rates */
399 u32 total_success; /* total successful frames, any/all rates */
400 u64 flush_timer; /* time staying in mode before new search */
401
402 u8 action_counter; /* # mode-switch actions tried */
403 u8 is_green;
404 u8 is_dup;
405 enum ieee80211_band band;
406 u8 ibss_sta_added;
407
408 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
409 u32 supp_rates;
410 u16 active_legacy_rate;
411 u16 active_siso_rate;
412 u16 active_mimo2_rate;
413 u16 active_mimo3_rate;
414 u16 active_rate_basic;
415 s8 max_rate_idx; /* Max rate set by user */
416 u8 missed_rate_counter;
417
418 struct iwl_link_quality_cmd lq;
419 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
420 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
421 u8 tx_agg_tid_en;
422#ifdef CONFIG_MAC80211_DEBUGFS
423 struct dentry *rs_sta_dbgfs_scale_table_file;
424 struct dentry *rs_sta_dbgfs_stats_table_file;
425 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
426 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
427 u32 dbg_fixed_rate;
428#endif
429 struct iwl_priv *drv;
430
431 /* used to be in sta_info */
432 int last_txrate_idx;
433 /* last tx rate_n_flags */
434 u32 last_rate_n_flags;
435 /* packets destined for this STA are aggregated */
436 u8 is_agg;
437};
438
338static inline u8 num_of_ant(u8 mask) 439static inline u8 num_of_ant(u8 mask)
339{ 440{
340 return !!((mask) & ANT_A) + 441 return !!((mask) & ANT_A) +
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 921dc4a26fe2..c96513bddb10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -123,6 +123,17 @@ int iwl_commit_rxon(struct iwl_priv *priv)
123 return -EINVAL; 123 return -EINVAL;
124 } 124 }
125 125
126 /*
127 * receive commit_rxon request
128 * abort any previous channel switch if still in process
129 */
130 if (priv->switch_rxon.switch_in_progress &&
131 (priv->switch_rxon.channel != priv->staging_rxon.channel)) {
132 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
133 le16_to_cpu(priv->switch_rxon.channel));
134 priv->switch_rxon.switch_in_progress = false;
135 }
136
126 /* If we don't need to send a full RXON, we can use 137 /* If we don't need to send a full RXON, we can use
127 * iwl_rxon_assoc_cmd which is used to reconfigure filter 138 * iwl_rxon_assoc_cmd which is used to reconfigure filter
128 * and other flags for the current radio configuration. */ 139 * and other flags for the current radio configuration. */
@@ -134,6 +145,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
134 } 145 }
135 146
136 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 147 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
148 iwl_print_rx_config_cmd(priv);
137 return 0; 149 return 0;
138 } 150 }
139 151
@@ -191,11 +203,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
191 priv->start_calib = 0; 203 priv->start_calib = 0;
192 204
193 /* Add the broadcast address so we can send broadcast frames */ 205 /* Add the broadcast address so we can send broadcast frames */
194 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) == 206 iwl_add_bcast_station(priv);
195 IWL_INVALID_STATION) {
196 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
197 return -EIO;
198 }
199 207
200 /* If we have set the ASSOC_MSK and we are in BSS mode then 208 /* If we have set the ASSOC_MSK and we are in BSS mode then
201 * add the IWL_AP_ID to the station rate table */ 209 * add the IWL_AP_ID to the station rate table */
@@ -233,6 +241,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
233 } 241 }
234 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 242 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
235 } 243 }
244 iwl_print_rx_config_cmd(priv);
236 245
237 iwl_init_sensitivity(priv); 246 iwl_init_sensitivity(priv);
238 247
@@ -302,7 +311,7 @@ static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
302 list_add(&frame->list, &priv->free_frames); 311 list_add(&frame->list, &priv->free_frames);
303} 312}
304 313
305static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, 314static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
306 struct ieee80211_hdr *hdr, 315 struct ieee80211_hdr *hdr,
307 int left) 316 int left)
308{ 317{
@@ -319,34 +328,74 @@ static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
319 return priv->ibss_beacon->len; 328 return priv->ibss_beacon->len;
320} 329}
321 330
331/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
332static void iwl_set_beacon_tim(struct iwl_priv *priv,
333 struct iwl_tx_beacon_cmd *tx_beacon_cmd,
334 u8 *beacon, u32 frame_size)
335{
336 u16 tim_idx;
337 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
338
339 /*
340 * The index is relative to frame start but we start looking at the
341 * variable-length part of the beacon.
342 */
343 tim_idx = mgmt->u.beacon.variable - beacon;
344
345 /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
346 while ((tim_idx < (frame_size - 2)) &&
347 (beacon[tim_idx] != WLAN_EID_TIM))
348 tim_idx += beacon[tim_idx+1] + 2;
349
350 /* If TIM field was found, set variables */
351 if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
352 tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
353 tx_beacon_cmd->tim_size = beacon[tim_idx+1];
354 } else
355 IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
356}
357
322static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv, 358static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
323 struct iwl_frame *frame, u8 rate) 359 struct iwl_frame *frame)
324{ 360{
325 struct iwl_tx_beacon_cmd *tx_beacon_cmd; 361 struct iwl_tx_beacon_cmd *tx_beacon_cmd;
326 unsigned int frame_size; 362 u32 frame_size;
363 u32 rate_flags;
364 u32 rate;
365 /*
366 * We have to set up the TX command, the TX Beacon command, and the
367 * beacon contents.
368 */
327 369
370 /* Initialize memory */
328 tx_beacon_cmd = &frame->u.beacon; 371 tx_beacon_cmd = &frame->u.beacon;
329 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 372 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
330 373
331 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id; 374 /* Set up TX beacon contents */
332 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
333
334 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame, 375 frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
335 sizeof(frame->u) - sizeof(*tx_beacon_cmd)); 376 sizeof(frame->u) - sizeof(*tx_beacon_cmd));
377 if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
378 return 0;
336 379
337 BUG_ON(frame_size > MAX_MPDU_SIZE); 380 /* Set up TX command fields */
338 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); 381 tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
382 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
383 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
384 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
385 TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
339 386
340 if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP)) 387 /* Set up TX beacon command fields */
341 tx_beacon_cmd->tx.rate_n_flags = 388 iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
342 iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK); 389 frame_size);
343 else
344 tx_beacon_cmd->tx.rate_n_flags =
345 iwl_hw_set_rate_n_flags(rate, 0);
346 390
347 tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK | 391 /* Set up packet rate and flags */
348 TX_CMD_FLG_TSF_MSK | 392 rate = iwl_rate_get_lowest_plcp(priv);
349 TX_CMD_FLG_STA_RATE_MSK; 393 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
394 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
395 if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
396 rate_flags |= RATE_MCS_CCK_MSK;
397 tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
398 rate_flags);
350 399
351 return sizeof(*tx_beacon_cmd) + frame_size; 400 return sizeof(*tx_beacon_cmd) + frame_size;
352} 401}
@@ -355,19 +404,20 @@ static int iwl_send_beacon_cmd(struct iwl_priv *priv)
355 struct iwl_frame *frame; 404 struct iwl_frame *frame;
356 unsigned int frame_size; 405 unsigned int frame_size;
357 int rc; 406 int rc;
358 u8 rate;
359 407
360 frame = iwl_get_free_frame(priv); 408 frame = iwl_get_free_frame(priv);
361
362 if (!frame) { 409 if (!frame) {
363 IWL_ERR(priv, "Could not obtain free frame buffer for beacon " 410 IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
364 "command.\n"); 411 "command.\n");
365 return -ENOMEM; 412 return -ENOMEM;
366 } 413 }
367 414
368 rate = iwl_rate_get_lowest_plcp(priv); 415 frame_size = iwl_hw_get_beacon_cmd(priv, frame);
369 416 if (!frame_size) {
370 frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); 417 IWL_ERR(priv, "Error configuring the beacon command\n");
418 iwl_free_frame(priv, frame);
419 return -EINVAL;
420 }
371 421
372 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 422 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
373 &frame->u.cmd[0]); 423 &frame->u.cmd[0]);
@@ -525,7 +575,7 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
525static void iwl_rx_reply_alive(struct iwl_priv *priv, 575static void iwl_rx_reply_alive(struct iwl_priv *priv,
526 struct iwl_rx_mem_buffer *rxb) 576 struct iwl_rx_mem_buffer *rxb)
527{ 577{
528 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 578 struct iwl_rx_packet *pkt = rxb_addr(rxb);
529 struct iwl_alive_resp *palive; 579 struct iwl_alive_resp *palive;
530 struct delayed_work *pwork; 580 struct delayed_work *pwork;
531 581
@@ -604,14 +654,14 @@ static void iwl_bg_statistics_periodic(unsigned long data)
604 if (!iwl_is_ready_rf(priv)) 654 if (!iwl_is_ready_rf(priv))
605 return; 655 return;
606 656
607 iwl_send_statistics_request(priv, CMD_ASYNC); 657 iwl_send_statistics_request(priv, CMD_ASYNC, false);
608} 658}
609 659
610static void iwl_rx_beacon_notif(struct iwl_priv *priv, 660static void iwl_rx_beacon_notif(struct iwl_priv *priv,
611 struct iwl_rx_mem_buffer *rxb) 661 struct iwl_rx_mem_buffer *rxb)
612{ 662{
613#ifdef CONFIG_IWLWIFI_DEBUG 663#ifdef CONFIG_IWLWIFI_DEBUG
614 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 664 struct iwl_rx_packet *pkt = rxb_addr(rxb);
615 struct iwl4965_beacon_notif *beacon = 665 struct iwl4965_beacon_notif *beacon =
616 (struct iwl4965_beacon_notif *)pkt->u.raw; 666 (struct iwl4965_beacon_notif *)pkt->u.raw;
617 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 667 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -635,7 +685,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
635static void iwl_rx_card_state_notif(struct iwl_priv *priv, 685static void iwl_rx_card_state_notif(struct iwl_priv *priv,
636 struct iwl_rx_mem_buffer *rxb) 686 struct iwl_rx_mem_buffer *rxb)
637{ 687{
638 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 688 struct iwl_rx_packet *pkt = rxb_addr(rxb);
639 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 689 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
640 unsigned long status = priv->status; 690 unsigned long status = priv->status;
641 691
@@ -721,7 +771,7 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
721 * statistics request from the host as well as for the periodic 771 * statistics request from the host as well as for the periodic
722 * statistics notifications (after received beacons) from the uCode. 772 * statistics notifications (after received beacons) from the uCode.
723 */ 773 */
724 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics; 774 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
725 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; 775 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
726 776
727 iwl_setup_spectrum_handlers(priv); 777 iwl_setup_spectrum_handlers(priv);
@@ -770,7 +820,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
770 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 820 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
771 821
772 /* calculate total frames need to be restock after handling RX */ 822 /* calculate total frames need to be restock after handling RX */
773 total_empty = r - priv->rxq.write_actual; 823 total_empty = r - rxq->write_actual;
774 if (total_empty < 0) 824 if (total_empty < 0)
775 total_empty += RX_QUEUE_SIZE; 825 total_empty += RX_QUEUE_SIZE;
776 826
@@ -787,10 +837,13 @@ void iwl_rx_handle(struct iwl_priv *priv)
787 837
788 rxq->queue[i] = NULL; 838 rxq->queue[i] = NULL;
789 839
790 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 840 pci_unmap_page(priv->pci_dev, rxb->page_dma,
791 priv->hw_params.rx_buf_size + 256, 841 PAGE_SIZE << priv->hw_params.rx_page_order,
792 PCI_DMA_FROMDEVICE); 842 PCI_DMA_FROMDEVICE);
793 pkt = (struct iwl_rx_packet *)rxb->skb->data; 843 pkt = rxb_addr(rxb);
844
845 trace_iwlwifi_dev_rx(priv, pkt,
846 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
794 847
795 /* Reclaim a command buffer only if this packet is a response 848 /* Reclaim a command buffer only if this packet is a response
796 * to a (driver-originated) command. 849 * to a (driver-originated) command.
@@ -812,8 +865,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
812 if (priv->rx_handlers[pkt->hdr.cmd]) { 865 if (priv->rx_handlers[pkt->hdr.cmd]) {
813 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 866 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
814 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 867 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
815 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
816 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 868 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
869 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
817 } else { 870 } else {
818 /* No handling needed */ 871 /* No handling needed */
819 IWL_DEBUG_RX(priv, 872 IWL_DEBUG_RX(priv,
@@ -822,35 +875,45 @@ void iwl_rx_handle(struct iwl_priv *priv)
822 pkt->hdr.cmd); 875 pkt->hdr.cmd);
823 } 876 }
824 877
878 /*
879 * XXX: After here, we should always check rxb->page
880 * against NULL before touching it or its virtual
881 * memory (pkt). Because some rx_handler might have
882 * already taken or freed the pages.
883 */
884
825 if (reclaim) { 885 if (reclaim) {
826 /* Invoke any callbacks, transfer the skb to caller, and 886 /* Invoke any callbacks, transfer the buffer to caller,
827 * fire off the (possibly) blocking iwl_send_cmd() 887 * and fire off the (possibly) blocking iwl_send_cmd()
828 * as we reclaim the driver command queue */ 888 * as we reclaim the driver command queue */
829 if (rxb && rxb->skb) 889 if (rxb->page)
830 iwl_tx_cmd_complete(priv, rxb); 890 iwl_tx_cmd_complete(priv, rxb);
831 else 891 else
832 IWL_WARN(priv, "Claim null rxb?\n"); 892 IWL_WARN(priv, "Claim null rxb?\n");
833 } 893 }
834 894
835 /* For now we just don't re-use anything. We can tweak this 895 /* Reuse the page if possible. For notification packets and
836 * later to try and re-use notification packets and SKBs that 896 * SKBs that fail to Rx correctly, add them back into the
837 * fail to Rx correctly */ 897 * rx_free list for reuse later. */
838 if (rxb->skb != NULL) {
839 priv->alloc_rxb_skb--;
840 dev_kfree_skb_any(rxb->skb);
841 rxb->skb = NULL;
842 }
843
844 spin_lock_irqsave(&rxq->lock, flags); 898 spin_lock_irqsave(&rxq->lock, flags);
845 list_add_tail(&rxb->list, &priv->rxq.rx_used); 899 if (rxb->page != NULL) {
900 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
901 0, PAGE_SIZE << priv->hw_params.rx_page_order,
902 PCI_DMA_FROMDEVICE);
903 list_add_tail(&rxb->list, &rxq->rx_free);
904 rxq->free_count++;
905 } else
906 list_add_tail(&rxb->list, &rxq->rx_used);
907
846 spin_unlock_irqrestore(&rxq->lock, flags); 908 spin_unlock_irqrestore(&rxq->lock, flags);
909
847 i = (i + 1) & RX_QUEUE_MASK; 910 i = (i + 1) & RX_QUEUE_MASK;
848 /* If there are a lot of unused frames, 911 /* If there are a lot of unused frames,
849 * restock the Rx queue so ucode wont assert. */ 912 * restock the Rx queue so ucode wont assert. */
850 if (fill_rx) { 913 if (fill_rx) {
851 count++; 914 count++;
852 if (count >= 8) { 915 if (count >= 8) {
853 priv->rxq.read = i; 916 rxq->read = i;
854 iwl_rx_replenish_now(priv); 917 iwl_rx_replenish_now(priv);
855 count = 0; 918 count = 0;
856 } 919 }
@@ -858,7 +921,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
858 } 921 }
859 922
860 /* Backtrack one entry */ 923 /* Backtrack one entry */
861 priv->rxq.read = i; 924 rxq->read = i;
862 if (fill_rx) 925 if (fill_rx)
863 iwl_rx_replenish_now(priv); 926 iwl_rx_replenish_now(priv);
864 else 927 else
@@ -878,6 +941,7 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
878 u32 inta, handled = 0; 941 u32 inta, handled = 0;
879 u32 inta_fh; 942 u32 inta_fh;
880 unsigned long flags; 943 unsigned long flags;
944 u32 i;
881#ifdef CONFIG_IWLWIFI_DEBUG 945#ifdef CONFIG_IWLWIFI_DEBUG
882 u32 inta_mask; 946 u32 inta_mask;
883#endif 947#endif
@@ -905,6 +969,8 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
905 } 969 }
906#endif 970#endif
907 971
972 spin_unlock_irqrestore(&priv->lock, flags);
973
908 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 974 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
909 * atomic, make sure that inta covers all the interrupts that 975 * atomic, make sure that inta covers all the interrupts that
910 * we've discovered, even if FH interrupt came in just after 976 * we've discovered, even if FH interrupt came in just after
@@ -926,8 +992,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
926 992
927 handled |= CSR_INT_BIT_HW_ERR; 993 handled |= CSR_INT_BIT_HW_ERR;
928 994
929 spin_unlock_irqrestore(&priv->lock, flags);
930
931 return; 995 return;
932 } 996 }
933 997
@@ -995,19 +1059,17 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
995 handled |= CSR_INT_BIT_SW_ERR; 1059 handled |= CSR_INT_BIT_SW_ERR;
996 } 1060 }
997 1061
998 /* uCode wakes up after power-down sleep */ 1062 /*
1063 * uCode wakes up after power-down sleep.
1064 * Tell device about any new tx or host commands enqueued,
1065 * and about any Rx buffers made available while asleep.
1066 */
999 if (inta & CSR_INT_BIT_WAKEUP) { 1067 if (inta & CSR_INT_BIT_WAKEUP) {
1000 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1068 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1001 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1069 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
1002 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1070 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1003 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1071 iwl_txq_update_write_ptr(priv, &priv->txq[i]);
1004 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
1005 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
1006 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
1007 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
1008
1009 priv->isr_stats.wakeup++; 1072 priv->isr_stats.wakeup++;
1010
1011 handled |= CSR_INT_BIT_WAKEUP; 1073 handled |= CSR_INT_BIT_WAKEUP;
1012 } 1074 }
1013 1075
@@ -1020,11 +1082,12 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1020 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1082 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1021 } 1083 }
1022 1084
1085 /* This "Tx" DMA channel is used only for loading uCode */
1023 if (inta & CSR_INT_BIT_FH_TX) { 1086 if (inta & CSR_INT_BIT_FH_TX) {
1024 IWL_DEBUG_ISR(priv, "Tx interrupt\n"); 1087 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1025 priv->isr_stats.tx++; 1088 priv->isr_stats.tx++;
1026 handled |= CSR_INT_BIT_FH_TX; 1089 handled |= CSR_INT_BIT_FH_TX;
1027 /* FH finished to write, send event */ 1090 /* Wake up uCode load routine, now that load is complete */
1028 priv->ucode_write_complete = 1; 1091 priv->ucode_write_complete = 1;
1029 wake_up_interruptible(&priv->wait_command_queue); 1092 wake_up_interruptible(&priv->wait_command_queue);
1030 } 1093 }
@@ -1054,7 +1117,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1054 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1117 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1055 } 1118 }
1056#endif 1119#endif
1057 spin_unlock_irqrestore(&priv->lock, flags);
1058} 1120}
1059 1121
1060/* tasklet for iwlagn interrupt */ 1122/* tasklet for iwlagn interrupt */
@@ -1063,6 +1125,7 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1063 u32 inta = 0; 1125 u32 inta = 0;
1064 u32 handled = 0; 1126 u32 handled = 0;
1065 unsigned long flags; 1127 unsigned long flags;
1128 u32 i;
1066#ifdef CONFIG_IWLWIFI_DEBUG 1129#ifdef CONFIG_IWLWIFI_DEBUG
1067 u32 inta_mask; 1130 u32 inta_mask;
1068#endif 1131#endif
@@ -1084,6 +1147,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1084 inta, inta_mask); 1147 inta, inta_mask);
1085 } 1148 }
1086#endif 1149#endif
1150
1151 spin_unlock_irqrestore(&priv->lock, flags);
1152
1087 /* saved interrupt in inta variable now we can reset priv->inta */ 1153 /* saved interrupt in inta variable now we can reset priv->inta */
1088 priv->inta = 0; 1154 priv->inta = 0;
1089 1155
@@ -1099,8 +1165,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1099 1165
1100 handled |= CSR_INT_BIT_HW_ERR; 1166 handled |= CSR_INT_BIT_HW_ERR;
1101 1167
1102 spin_unlock_irqrestore(&priv->lock, flags);
1103
1104 return; 1168 return;
1105 } 1169 }
1106 1170
@@ -1172,12 +1236,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1172 if (inta & CSR_INT_BIT_WAKEUP) { 1236 if (inta & CSR_INT_BIT_WAKEUP) {
1173 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1237 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1174 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1238 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
1175 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1239 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1176 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1240 iwl_txq_update_write_ptr(priv, &priv->txq[i]);
1177 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
1178 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
1179 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
1180 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
1181 1241
1182 priv->isr_stats.wakeup++; 1242 priv->isr_stats.wakeup++;
1183 1243
@@ -1206,26 +1266,36 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1206 * 3- update RX shared data to indicate last write index. 1266 * 3- update RX shared data to indicate last write index.
1207 * 4- send interrupt. 1267 * 4- send interrupt.
1208 * This could lead to RX race, driver could receive RX interrupt 1268 * This could lead to RX race, driver could receive RX interrupt
1209 * but the shared data changes does not reflect this. 1269 * but the shared data changes does not reflect this;
1210 * this could lead to RX race, RX periodic will solve this race 1270 * periodic interrupt will detect any dangling Rx activity.
1211 */ 1271 */
1212 iwl_write32(priv, CSR_INT_PERIODIC_REG, 1272
1273 /* Disable periodic interrupt; we use it as just a one-shot. */
1274 iwl_write8(priv, CSR_INT_PERIODIC_REG,
1213 CSR_INT_PERIODIC_DIS); 1275 CSR_INT_PERIODIC_DIS);
1214 iwl_rx_handle(priv); 1276 iwl_rx_handle(priv);
1215 /* Only set RX periodic if real RX is received. */ 1277
1278 /*
1279 * Enable periodic interrupt in 8 msec only if we received
1280 * real RX interrupt (instead of just periodic int), to catch
1281 * any dangling Rx interrupt. If it was just the periodic
1282 * interrupt, there was no dangling Rx activity, and no need
1283 * to extend the periodic interrupt; one-shot is enough.
1284 */
1216 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) 1285 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1217 iwl_write32(priv, CSR_INT_PERIODIC_REG, 1286 iwl_write8(priv, CSR_INT_PERIODIC_REG,
1218 CSR_INT_PERIODIC_ENA); 1287 CSR_INT_PERIODIC_ENA);
1219 1288
1220 priv->isr_stats.rx++; 1289 priv->isr_stats.rx++;
1221 } 1290 }
1222 1291
1292 /* This "Tx" DMA channel is used only for loading uCode */
1223 if (inta & CSR_INT_BIT_FH_TX) { 1293 if (inta & CSR_INT_BIT_FH_TX) {
1224 iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); 1294 iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK);
1225 IWL_DEBUG_ISR(priv, "Tx interrupt\n"); 1295 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1226 priv->isr_stats.tx++; 1296 priv->isr_stats.tx++;
1227 handled |= CSR_INT_BIT_FH_TX; 1297 handled |= CSR_INT_BIT_FH_TX;
1228 /* FH finished to write, send event */ 1298 /* Wake up uCode load routine, now that load is complete */
1229 priv->ucode_write_complete = 1; 1299 priv->ucode_write_complete = 1;
1230 wake_up_interruptible(&priv->wait_command_queue); 1300 wake_up_interruptible(&priv->wait_command_queue);
1231 } 1301 }
@@ -1240,14 +1310,10 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1240 inta & ~priv->inta_mask); 1310 inta & ~priv->inta_mask);
1241 } 1311 }
1242 1312
1243
1244 /* Re-enable all interrupts */ 1313 /* Re-enable all interrupts */
1245 /* only Re-enable if diabled by irq */ 1314 /* only Re-enable if diabled by irq */
1246 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1315 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1247 iwl_enable_interrupts(priv); 1316 iwl_enable_interrupts(priv);
1248
1249 spin_unlock_irqrestore(&priv->lock, flags);
1250
1251} 1317}
1252 1318
1253 1319
@@ -1367,6 +1433,14 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1367 IWL_UCODE_API(priv->ucode_ver), 1433 IWL_UCODE_API(priv->ucode_ver),
1368 IWL_UCODE_SERIAL(priv->ucode_ver)); 1434 IWL_UCODE_SERIAL(priv->ucode_ver));
1369 1435
1436 snprintf(priv->hw->wiphy->fw_version,
1437 sizeof(priv->hw->wiphy->fw_version),
1438 "%u.%u.%u.%u",
1439 IWL_UCODE_MAJOR(priv->ucode_ver),
1440 IWL_UCODE_MINOR(priv->ucode_ver),
1441 IWL_UCODE_API(priv->ucode_ver),
1442 IWL_UCODE_SERIAL(priv->ucode_ver));
1443
1370 if (build) 1444 if (build)
1371 IWL_DEBUG_INFO(priv, "Build %u\n", build); 1445 IWL_DEBUG_INFO(priv, "Build %u\n", build);
1372 1446
@@ -1531,7 +1605,6 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1531 return ret; 1605 return ret;
1532} 1606}
1533 1607
1534#ifdef CONFIG_IWLWIFI_DEBUG
1535static const char *desc_lookup_text[] = { 1608static const char *desc_lookup_text[] = {
1536 "OK", 1609 "OK",
1537 "FAIL", 1610 "FAIL",
@@ -1611,6 +1684,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1611 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 1684 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1612 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 1685 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1613 1686
1687 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
1688 blink1, blink2, ilink1, ilink2);
1689
1614 IWL_ERR(priv, "Desc Time " 1690 IWL_ERR(priv, "Desc Time "
1615 "data1 data2 line\n"); 1691 "data1 data2 line\n");
1616 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", 1692 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
@@ -1635,6 +1711,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1635 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ 1711 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1636 u32 ptr; /* SRAM byte address of log data */ 1712 u32 ptr; /* SRAM byte address of log data */
1637 u32 ev, time, data; /* event log data */ 1713 u32 ev, time, data; /* event log data */
1714 unsigned long reg_flags;
1638 1715
1639 if (num_events == 0) 1716 if (num_events == 0)
1640 return; 1717 return;
@@ -1650,26 +1727,72 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1650 1727
1651 ptr = base + EVENT_START_OFFSET + (start_idx * event_size); 1728 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1652 1729
1730 /* Make sure device is powered up for SRAM reads */
1731 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1732 iwl_grab_nic_access(priv);
1733
1734 /* Set starting address; reads will auto-increment */
1735 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1736 rmb();
1737
1653 /* "time" is actually "data" for mode 0 (no timestamp). 1738 /* "time" is actually "data" for mode 0 (no timestamp).
1654 * place event id # at far right for easier visual parsing. */ 1739 * place event id # at far right for easier visual parsing. */
1655 for (i = 0; i < num_events; i++) { 1740 for (i = 0; i < num_events; i++) {
1656 ev = iwl_read_targ_mem(priv, ptr); 1741 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1657 ptr += sizeof(u32); 1742 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1658 time = iwl_read_targ_mem(priv, ptr);
1659 ptr += sizeof(u32);
1660 if (mode == 0) { 1743 if (mode == 0) {
1661 /* data, ev */ 1744 /* data, ev */
1745 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
1662 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); 1746 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1663 } else { 1747 } else {
1664 data = iwl_read_targ_mem(priv, ptr); 1748 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1665 ptr += sizeof(u32);
1666 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", 1749 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1667 time, data, ev); 1750 time, data, ev);
1751 trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
1668 } 1752 }
1669 } 1753 }
1754
1755 /* Allow device to power down */
1756 iwl_release_nic_access(priv);
1757 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1758}
1759
1760/**
1761 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1762 */
1763static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1764 u32 num_wraps, u32 next_entry,
1765 u32 size, u32 mode)
1766{
1767 /*
1768 * display the newest DEFAULT_LOG_ENTRIES entries
1769 * i.e the entries just before the next ont that uCode would fill.
1770 */
1771 if (num_wraps) {
1772 if (next_entry < size) {
1773 iwl_print_event_log(priv,
1774 capacity - (size - next_entry),
1775 size - next_entry, mode);
1776 iwl_print_event_log(priv, 0,
1777 next_entry, mode);
1778 } else
1779 iwl_print_event_log(priv, next_entry - size,
1780 size, mode);
1781 } else {
1782 if (next_entry < size)
1783 iwl_print_event_log(priv, 0, next_entry, mode);
1784 else
1785 iwl_print_event_log(priv, next_entry - size,
1786 size, mode);
1787 }
1670} 1788}
1671 1789
1672void iwl_dump_nic_event_log(struct iwl_priv *priv) 1790/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1791#define MAX_EVENT_LOG_SIZE (512)
1792
1793#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1794
1795void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1673{ 1796{
1674 u32 base; /* SRAM byte address of event log header */ 1797 u32 base; /* SRAM byte address of event log header */
1675 u32 capacity; /* event log capacity in # entries */ 1798 u32 capacity; /* event log capacity in # entries */
@@ -1694,6 +1817,18 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
1694 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1817 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1695 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1818 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1696 1819
1820 if (capacity > MAX_EVENT_LOG_SIZE) {
1821 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1822 capacity, MAX_EVENT_LOG_SIZE);
1823 capacity = MAX_EVENT_LOG_SIZE;
1824 }
1825
1826 if (next_entry > MAX_EVENT_LOG_SIZE) {
1827 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1828 next_entry, MAX_EVENT_LOG_SIZE);
1829 next_entry = MAX_EVENT_LOG_SIZE;
1830 }
1831
1697 size = num_wraps ? capacity : next_entry; 1832 size = num_wraps ? capacity : next_entry;
1698 1833
1699 /* bail out if nothing in log */ 1834 /* bail out if nothing in log */
@@ -1702,19 +1837,37 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv)
1702 return; 1837 return;
1703 } 1838 }
1704 1839
1705 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n", 1840#ifdef CONFIG_IWLWIFI_DEBUG
1706 size, num_wraps); 1841 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
1707 1842 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1708 /* if uCode has wrapped back to top of log, start at the oldest entry, 1843 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1709 * i.e the next one that uCode would fill. */ 1844#else
1710 if (num_wraps) 1845 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
1711 iwl_print_event_log(priv, next_entry, 1846 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
1712 capacity - next_entry, mode); 1847#endif
1713 /* (then/else) start at top of log */ 1848 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
1714 iwl_print_event_log(priv, 0, next_entry, mode); 1849 size);
1715 1850
1716} 1851#ifdef CONFIG_IWLWIFI_DEBUG
1852 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1853 /*
1854 * if uCode has wrapped back to top of log,
1855 * start at the oldest entry,
1856 * i.e the next one that uCode would fill.
1857 */
1858 if (num_wraps)
1859 iwl_print_event_log(priv, next_entry,
1860 capacity - next_entry, mode);
1861 /* (then/else) start at top of log */
1862 iwl_print_event_log(priv, 0, next_entry, mode);
1863 } else
1864 iwl_print_last_event_logs(priv, capacity, num_wraps,
1865 next_entry, size, mode);
1866#else
1867 iwl_print_last_event_logs(priv, capacity, num_wraps,
1868 next_entry, size, mode);
1717#endif 1869#endif
1870}
1718 1871
1719/** 1872/**
1720 * iwl_alive_start - called after REPLY_ALIVE notification received 1873 * iwl_alive_start - called after REPLY_ALIVE notification received
@@ -1763,6 +1916,10 @@ static void iwl_alive_start(struct iwl_priv *priv)
1763 priv->active_rate = priv->rates_mask; 1916 priv->active_rate = priv->rates_mask;
1764 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 1917 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1765 1918
1919 /* Configure Tx antenna selection based on H/W config */
1920 if (priv->cfg->ops->hcmd->set_tx_ant)
1921 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
1922
1766 if (iwl_is_associated(priv)) { 1923 if (iwl_is_associated(priv)) {
1767 struct iwl_rxon_cmd *active_rxon = 1924 struct iwl_rxon_cmd *active_rxon =
1768 (struct iwl_rxon_cmd *)&priv->active_rxon; 1925 (struct iwl_rxon_cmd *)&priv->active_rxon;
@@ -1790,7 +1947,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
1790 /* At this point, the NIC is initialized and operational */ 1947 /* At this point, the NIC is initialized and operational */
1791 iwl_rf_kill_ct_config(priv); 1948 iwl_rf_kill_ct_config(priv);
1792 1949
1793 iwl_leds_register(priv); 1950 iwl_leds_init(priv);
1794 1951
1795 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 1952 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1796 set_bit(STATUS_READY, &priv->status); 1953 set_bit(STATUS_READY, &priv->status);
@@ -1828,8 +1985,6 @@ static void __iwl_down(struct iwl_priv *priv)
1828 if (!exit_pending) 1985 if (!exit_pending)
1829 set_bit(STATUS_EXIT_PENDING, &priv->status); 1986 set_bit(STATUS_EXIT_PENDING, &priv->status);
1830 1987
1831 iwl_leds_unregister(priv);
1832
1833 iwl_clear_stations_table(priv); 1988 iwl_clear_stations_table(priv);
1834 1989
1835 /* Unblock any waiting calls */ 1990 /* Unblock any waiting calls */
@@ -1877,24 +2032,20 @@ static void __iwl_down(struct iwl_priv *priv)
1877 2032
1878 /* device going down, Stop using ICT table */ 2033 /* device going down, Stop using ICT table */
1879 iwl_disable_ict(priv); 2034 iwl_disable_ict(priv);
1880 spin_lock_irqsave(&priv->lock, flags);
1881 iwl_clear_bit(priv, CSR_GP_CNTRL,
1882 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1883 spin_unlock_irqrestore(&priv->lock, flags);
1884 2035
1885 iwl_txq_ctx_stop(priv); 2036 iwl_txq_ctx_stop(priv);
1886 iwl_rxq_stop(priv); 2037 iwl_rxq_stop(priv);
1887 2038
1888 iwl_write_prph(priv, APMG_CLK_DIS_REG, 2039 /* Power-down device's busmaster DMA clocks */
1889 APMG_CLK_VAL_DMA_CLK_RQT); 2040 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
1890
1891 udelay(5); 2041 udelay(5);
1892 2042
1893 /* FIXME: apm_ops.suspend(priv) */ 2043 /* Make sure (redundant) we've released our request to stay awake */
1894 if (exit_pending) 2044 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1895 priv->cfg->ops->lib->apm_ops.stop(priv); 2045
1896 else 2046 /* Stop the device, and put it in low power state */
1897 priv->cfg->ops->lib->apm_ops.reset(priv); 2047 priv->cfg->ops->lib->apm_ops.stop(priv);
2048
1898 exit: 2049 exit:
1899 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2050 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1900 2051
@@ -2281,6 +2432,67 @@ void iwl_post_associate(struct iwl_priv *priv)
2281 2432
2282#define UCODE_READY_TIMEOUT (4 * HZ) 2433#define UCODE_READY_TIMEOUT (4 * HZ)
2283 2434
2435/*
2436 * Not a mac80211 entry point function, but it fits in with all the
2437 * other mac80211 functions grouped here.
2438 */
2439static int iwl_setup_mac(struct iwl_priv *priv)
2440{
2441 int ret;
2442 struct ieee80211_hw *hw = priv->hw;
2443 hw->rate_control_algorithm = "iwl-agn-rs";
2444
2445 /* Tell mac80211 our characteristics */
2446 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2447 IEEE80211_HW_NOISE_DBM |
2448 IEEE80211_HW_AMPDU_AGGREGATION |
2449 IEEE80211_HW_SPECTRUM_MGMT;
2450
2451 if (!priv->cfg->broken_powersave)
2452 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2453 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2454
2455 hw->sta_data_size = sizeof(struct iwl_station_priv);
2456 hw->wiphy->interface_modes =
2457 BIT(NL80211_IFTYPE_STATION) |
2458 BIT(NL80211_IFTYPE_ADHOC);
2459
2460 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
2461 WIPHY_FLAG_DISABLE_BEACON_HINTS;
2462
2463 /*
2464 * For now, disable PS by default because it affects
2465 * RX performance significantly.
2466 */
2467 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
2468
2469 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2470 /* we create the 802.11 header and a zero-length SSID element */
2471 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
2472
2473 /* Default value; 4 EDCA QOS priorities */
2474 hw->queues = 4;
2475
2476 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2477
2478 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2479 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2480 &priv->bands[IEEE80211_BAND_2GHZ];
2481 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2482 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2483 &priv->bands[IEEE80211_BAND_5GHZ];
2484
2485 ret = ieee80211_register_hw(priv->hw);
2486 if (ret) {
2487 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2488 return ret;
2489 }
2490 priv->mac80211_registered = 1;
2491
2492 return 0;
2493}
2494
2495
2284static int iwl_mac_start(struct ieee80211_hw *hw) 2496static int iwl_mac_start(struct ieee80211_hw *hw)
2285{ 2497{
2286 struct iwl_priv *priv = hw->priv; 2498 struct iwl_priv *priv = hw->priv;
@@ -2328,6 +2540,8 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2328 } 2540 }
2329 } 2541 }
2330 2542
2543 iwl_led_start(priv);
2544
2331out: 2545out:
2332 priv->is_open = 1; 2546 priv->is_open = 1;
2333 IWL_DEBUG_MAC80211(priv, "leave\n"); 2547 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2404,6 +2618,10 @@ void iwl_config_ap(struct iwl_priv *priv)
2404 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 2618 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
2405 "Attempting to continue.\n"); 2619 "Attempting to continue.\n");
2406 2620
2621 /* AP has all antennas */
2622 priv->chain_noise_data.active_chains =
2623 priv->hw_params.valid_rx_ant;
2624 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2407 if (priv->cfg->ops->hcmd->set_rxon_chain) 2625 if (priv->cfg->ops->hcmd->set_rxon_chain)
2408 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2626 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2409 2627
@@ -2432,10 +2650,11 @@ void iwl_config_ap(struct iwl_priv *priv)
2432 /* restore RXON assoc */ 2650 /* restore RXON assoc */
2433 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2651 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2434 iwlcore_commit_rxon(priv); 2652 iwlcore_commit_rxon(priv);
2653 iwl_reset_qos(priv);
2435 spin_lock_irqsave(&priv->lock, flags); 2654 spin_lock_irqsave(&priv->lock, flags);
2436 iwl_activate_qos(priv, 1); 2655 iwl_activate_qos(priv, 1);
2437 spin_unlock_irqrestore(&priv->lock, flags); 2656 spin_unlock_irqrestore(&priv->lock, flags);
2438 iwl_rxon_add_station(priv, iwl_bcast_addr, 0); 2657 iwl_add_bcast_station(priv);
2439 } 2658 }
2440 iwl_send_beacon_cmd(priv); 2659 iwl_send_beacon_cmd(priv);
2441 2660
@@ -2527,6 +2746,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2527} 2746}
2528 2747
2529static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 2748static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2749 struct ieee80211_vif *vif,
2530 enum ieee80211_ampdu_mlme_action action, 2750 enum ieee80211_ampdu_mlme_action action,
2531 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2751 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2532{ 2752{
@@ -2580,6 +2800,45 @@ static int iwl_mac_get_stats(struct ieee80211_hw *hw,
2580 return 0; 2800 return 0;
2581} 2801}
2582 2802
2803static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
2804 struct ieee80211_vif *vif,
2805 enum sta_notify_cmd cmd,
2806 struct ieee80211_sta *sta)
2807{
2808 struct iwl_priv *priv = hw->priv;
2809 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2810 int sta_id;
2811
2812 /*
2813 * TODO: We really should use this callback to
2814 * actually maintain the station table in
2815 * the device.
2816 */
2817
2818 switch (cmd) {
2819 case STA_NOTIFY_ADD:
2820 atomic_set(&sta_priv->pending_frames, 0);
2821 if (vif->type == NL80211_IFTYPE_AP)
2822 sta_priv->client = true;
2823 break;
2824 case STA_NOTIFY_SLEEP:
2825 WARN_ON(!sta_priv->client);
2826 sta_priv->asleep = true;
2827 if (atomic_read(&sta_priv->pending_frames) > 0)
2828 ieee80211_sta_block_awake(hw, sta, true);
2829 break;
2830 case STA_NOTIFY_AWAKE:
2831 WARN_ON(!sta_priv->client);
2832 sta_priv->asleep = false;
2833 sta_id = iwl_find_station(priv, sta->addr);
2834 if (sta_id != IWL_INVALID_STATION)
2835 iwl_sta_modify_ps_wake(priv, sta_id);
2836 break;
2837 default:
2838 break;
2839 }
2840}
2841
2583/***************************************************************************** 2842/*****************************************************************************
2584 * 2843 *
2585 * sysfs attributes 2844 * sysfs attributes
@@ -2774,7 +3033,7 @@ static ssize_t show_statistics(struct device *d,
2774 return -EAGAIN; 3033 return -EAGAIN;
2775 3034
2776 mutex_lock(&priv->mutex); 3035 mutex_lock(&priv->mutex);
2777 rc = iwl_send_statistics_request(priv, 0); 3036 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
2778 mutex_unlock(&priv->mutex); 3037 mutex_unlock(&priv->mutex);
2779 3038
2780 if (rc) { 3039 if (rc) {
@@ -2799,6 +3058,40 @@ static ssize_t show_statistics(struct device *d,
2799 3058
2800static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); 3059static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
2801 3060
3061static ssize_t show_rts_ht_protection(struct device *d,
3062 struct device_attribute *attr, char *buf)
3063{
3064 struct iwl_priv *priv = dev_get_drvdata(d);
3065
3066 return sprintf(buf, "%s\n",
3067 priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
3068}
3069
3070static ssize_t store_rts_ht_protection(struct device *d,
3071 struct device_attribute *attr,
3072 const char *buf, size_t count)
3073{
3074 struct iwl_priv *priv = dev_get_drvdata(d);
3075 unsigned long val;
3076 int ret;
3077
3078 ret = strict_strtoul(buf, 10, &val);
3079 if (ret)
3080 IWL_INFO(priv, "Input is not in decimal form.\n");
3081 else {
3082 if (!iwl_is_associated(priv))
3083 priv->cfg->use_rts_for_ht = val ? true : false;
3084 else
3085 IWL_ERR(priv, "Sta associated with AP - "
3086 "Change protection mechanism is not allowed\n");
3087 ret = count;
3088 }
3089 return ret;
3090}
3091
3092static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
3093 show_rts_ht_protection, store_rts_ht_protection);
3094
2802 3095
2803/***************************************************************************** 3096/*****************************************************************************
2804 * 3097 *
@@ -2849,12 +3142,103 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
2849 del_timer_sync(&priv->statistics_periodic); 3142 del_timer_sync(&priv->statistics_periodic);
2850} 3143}
2851 3144
3145static void iwl_init_hw_rates(struct iwl_priv *priv,
3146 struct ieee80211_rate *rates)
3147{
3148 int i;
3149
3150 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
3151 rates[i].bitrate = iwl_rates[i].ieee * 5;
3152 rates[i].hw_value = i; /* Rate scaling will work on indexes */
3153 rates[i].hw_value_short = i;
3154 rates[i].flags = 0;
3155 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
3156 /*
3157 * If CCK != 1M then set short preamble rate flag.
3158 */
3159 rates[i].flags |=
3160 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
3161 0 : IEEE80211_RATE_SHORT_PREAMBLE;
3162 }
3163 }
3164}
3165
3166static int iwl_init_drv(struct iwl_priv *priv)
3167{
3168 int ret;
3169
3170 priv->ibss_beacon = NULL;
3171
3172 spin_lock_init(&priv->lock);
3173 spin_lock_init(&priv->sta_lock);
3174 spin_lock_init(&priv->hcmd_lock);
3175
3176 INIT_LIST_HEAD(&priv->free_frames);
3177
3178 mutex_init(&priv->mutex);
3179
3180 /* Clear the driver's (not device's) station table */
3181 iwl_clear_stations_table(priv);
3182
3183 priv->ieee_channels = NULL;
3184 priv->ieee_rates = NULL;
3185 priv->band = IEEE80211_BAND_2GHZ;
3186
3187 priv->iw_mode = NL80211_IFTYPE_STATION;
3188
3189 /* Choose which receivers/antennas to use */
3190 if (priv->cfg->ops->hcmd->set_rxon_chain)
3191 priv->cfg->ops->hcmd->set_rxon_chain(priv);
3192
3193 iwl_init_scan_params(priv);
3194
3195 iwl_reset_qos(priv);
3196
3197 priv->qos_data.qos_active = 0;
3198 priv->qos_data.qos_cap.val = 0;
3199
3200 priv->rates_mask = IWL_RATES_MASK;
3201 /* Set the tx_power_user_lmt to the lowest power level
3202 * this value will get overwritten by channel max power avg
3203 * from eeprom */
3204 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
3205
3206 ret = iwl_init_channel_map(priv);
3207 if (ret) {
3208 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3209 goto err;
3210 }
3211
3212 ret = iwlcore_init_geos(priv);
3213 if (ret) {
3214 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3215 goto err_free_channel_map;
3216 }
3217 iwl_init_hw_rates(priv, priv->ieee_rates);
3218
3219 return 0;
3220
3221err_free_channel_map:
3222 iwl_free_channel_map(priv);
3223err:
3224 return ret;
3225}
3226
3227static void iwl_uninit_drv(struct iwl_priv *priv)
3228{
3229 iwl_calib_free_results(priv);
3230 iwlcore_free_geos(priv);
3231 iwl_free_channel_map(priv);
3232 kfree(priv->scan);
3233}
3234
2852static struct attribute *iwl_sysfs_entries[] = { 3235static struct attribute *iwl_sysfs_entries[] = {
2853 &dev_attr_flags.attr, 3236 &dev_attr_flags.attr,
2854 &dev_attr_filter_flags.attr, 3237 &dev_attr_filter_flags.attr,
2855 &dev_attr_statistics.attr, 3238 &dev_attr_statistics.attr,
2856 &dev_attr_temperature.attr, 3239 &dev_attr_temperature.attr,
2857 &dev_attr_tx_power.attr, 3240 &dev_attr_tx_power.attr,
3241 &dev_attr_rts_ht_protection.attr,
2858#ifdef CONFIG_IWLWIFI_DEBUG 3242#ifdef CONFIG_IWLWIFI_DEBUG
2859 &dev_attr_debug_level.attr, 3243 &dev_attr_debug_level.attr,
2860#endif 3244#endif
@@ -2882,7 +3266,8 @@ static struct ieee80211_ops iwl_hw_ops = {
2882 .reset_tsf = iwl_mac_reset_tsf, 3266 .reset_tsf = iwl_mac_reset_tsf,
2883 .bss_info_changed = iwl_bss_info_changed, 3267 .bss_info_changed = iwl_bss_info_changed,
2884 .ampdu_action = iwl_mac_ampdu_action, 3268 .ampdu_action = iwl_mac_ampdu_action,
2885 .hw_scan = iwl_mac_hw_scan 3269 .hw_scan = iwl_mac_hw_scan,
3270 .sta_notify = iwl_mac_sta_notify,
2886}; 3271};
2887 3272
2888static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3273static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -2990,12 +3375,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2990 goto out_iounmap; 3375 goto out_iounmap;
2991 } 3376 }
2992 3377
2993 /* amp init */
2994 err = priv->cfg->ops->lib->apm_ops.init(priv);
2995 if (err < 0) {
2996 IWL_ERR(priv, "Failed to init APMG\n");
2997 goto out_iounmap;
2998 }
2999 /***************** 3378 /*****************
3000 * 4. Read EEPROM 3379 * 4. Read EEPROM
3001 *****************/ 3380 *****************/
@@ -3141,6 +3520,15 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3141 iwl_down(priv); 3520 iwl_down(priv);
3142 } 3521 }
3143 3522
3523 /*
3524 * Make sure device is reset to low power before unloading driver.
3525 * This may be redundant with iwl_down(), but there are paths to
3526 * run iwl_down() without calling apm_ops.stop(), and there are
3527 * paths to avoid running iwl_down() at all before leaving driver.
3528 * This (inexpensive) call *makes sure* device is reset.
3529 */
3530 priv->cfg->ops->lib->apm_ops.stop(priv);
3531
3144 iwl_tt_exit(priv); 3532 iwl_tt_exit(priv);
3145 3533
3146 /* make sure we flush any pending irq or 3534 /* make sure we flush any pending irq or
@@ -3203,37 +3591,97 @@ static struct pci_device_id iwl_hw_card_ids[] = {
3203 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)}, 3591 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
3204#endif /* CONFIG_IWL4965 */ 3592#endif /* CONFIG_IWL4965 */
3205#ifdef CONFIG_IWL5000 3593#ifdef CONFIG_IWL5000
3206 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bg_cfg)}, 3594/* 5100 Series WiFi */
3207 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bg_cfg)}, 3595 {IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
3208 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, 3596 {IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
3209 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, 3597 {IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
3210 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, 3598 {IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
3211 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, 3599 {IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
3212 {IWL_PCI_DEVICE(0x4232, PCI_ANY_ID, iwl5100_agn_cfg)}, 3600 {IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
3213 {IWL_PCI_DEVICE(0x4235, PCI_ANY_ID, iwl5300_agn_cfg)}, 3601 {IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
3214 {IWL_PCI_DEVICE(0x4236, PCI_ANY_ID, iwl5300_agn_cfg)}, 3602 {IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
3215 {IWL_PCI_DEVICE(0x4237, PCI_ANY_ID, iwl5100_agn_cfg)}, 3603 {IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
3216/* 5350 WiFi/WiMax */ 3604 {IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
3217 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, 3605 {IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
3218 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, 3606 {IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
3219 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, 3607 {IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
3220/* 5150 Wifi/WiMax */ 3608 {IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
3221 {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)}, 3609 {IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
3222 {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)}, 3610 {IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
3223/* 6000/6050 Series */ 3611 {IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
3224 {IWL_PCI_DEVICE(0x008D, PCI_ANY_ID, iwl6000h_2agn_cfg)}, 3612 {IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
3225 {IWL_PCI_DEVICE(0x008E, PCI_ANY_ID, iwl6000h_2agn_cfg)}, 3613 {IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
3226 {IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)}, 3614 {IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
3227 {IWL_PCI_DEVICE(0x422C, PCI_ANY_ID, iwl6000i_2agn_cfg)}, 3615 {IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
3228 {IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)}, 3616 {IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
3229 {IWL_PCI_DEVICE(0x4239, PCI_ANY_ID, iwl6000i_2agn_cfg)}, 3617 {IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
3230 {IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)}, 3618 {IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
3231 {IWL_PCI_DEVICE(0x0087, PCI_ANY_ID, iwl6050_2agn_cfg)}, 3619
3232 {IWL_PCI_DEVICE(0x0088, PCI_ANY_ID, iwl6050_3agn_cfg)}, 3620/* 5300 Series WiFi */
3233 {IWL_PCI_DEVICE(0x0089, PCI_ANY_ID, iwl6050_2agn_cfg)}, 3621 {IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
3622 {IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
3623 {IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
3624 {IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
3625 {IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
3626 {IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
3627 {IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
3628 {IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
3629 {IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
3630 {IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
3631 {IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
3632 {IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
3633
3634/* 5350 Series WiFi/WiMax */
3635 {IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
3636 {IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
3637 {IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
3638
3639/* 5150 Series Wifi/WiMax */
3640 {IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
3641 {IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
3642 {IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
3643 {IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
3644 {IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
3645 {IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
3646
3647 {IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
3648 {IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
3649 {IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
3650 {IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
3651
3652/* 6x00 Series */
3653 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
3654 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
3655 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
3656 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
3657 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
3658 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
3659 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
3660 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
3661 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
3662 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
3663
3664/* 6x50 WiFi/WiMax Series */
3665 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
3666 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
3667 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
3668 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
3669 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
3670 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
3671
3234/* 1000 Series WiFi */ 3672/* 1000 Series WiFi */
3235 {IWL_PCI_DEVICE(0x0083, PCI_ANY_ID, iwl1000_bgn_cfg)}, 3673 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
3236 {IWL_PCI_DEVICE(0x0084, PCI_ANY_ID, iwl1000_bgn_cfg)}, 3674 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
3675 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
3676 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
3677 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
3678 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
3679 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
3680 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
3681 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
3682 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
3683 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
3684 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
3237#endif /* CONFIG_IWL5000 */ 3685#endif /* CONFIG_IWL5000 */
3238 3686
3239 {0} 3687 {0}
@@ -3288,9 +3736,9 @@ module_exit(iwl_exit);
3288module_init(iwl_init); 3736module_init(iwl_init);
3289 3737
3290#ifdef CONFIG_IWLWIFI_DEBUG 3738#ifdef CONFIG_IWLWIFI_DEBUG
3291module_param_named(debug50, iwl_debug_level, uint, 0444); 3739module_param_named(debug50, iwl_debug_level, uint, S_IRUGO);
3292MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)"); 3740MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)");
3293module_param_named(debug, iwl_debug_level, uint, 0644); 3741module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3294MODULE_PARM_DESC(debug, "debug output mask"); 3742MODULE_PARM_DESC(debug, "debug output mask");
3295#endif 3743#endif
3296 3744
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index c4b565a2de94..95a57b36a7ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -132,6 +132,7 @@ void iwl_calib_free_results(struct iwl_priv *priv)
132 priv->calib_results[i].buf_len = 0; 132 priv->calib_results[i].buf_len = 0;
133 } 133 }
134} 134}
135EXPORT_SYMBOL(iwl_calib_free_results);
135 136
136/***************************************************************************** 137/*****************************************************************************
137 * RUNTIME calibrations framework 138 * RUNTIME calibrations framework
@@ -447,11 +448,11 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
447 cpu_to_le16((u16)data->nrg_th_ofdm); 448 cpu_to_le16((u16)data->nrg_th_ofdm);
448 449
449 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 450 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
450 cpu_to_le16(190); 451 cpu_to_le16(data->barker_corr_th_min);
451 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 452 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
452 cpu_to_le16(390); 453 cpu_to_le16(data->barker_corr_th_min_mrc);
453 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = 454 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
454 cpu_to_le16(62); 455 cpu_to_le16(data->nrg_th_cca);
455 456
456 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 457 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
457 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 458 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
@@ -516,7 +517,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
516 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 517 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
517 data->nrg_silence_rssi[i] = 0; 518 data->nrg_silence_rssi[i] = 0;
518 519
519 data->auto_corr_ofdm = 90; 520 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
520 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; 521 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
521 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; 522 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
522 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; 523 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
@@ -524,6 +525,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
524 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; 525 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
525 data->nrg_th_cck = ranges->nrg_th_cck; 526 data->nrg_th_cck = ranges->nrg_th_cck;
526 data->nrg_th_ofdm = ranges->nrg_th_ofdm; 527 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
528 data->barker_corr_th_min = ranges->barker_corr_th_min;
529 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
530 data->nrg_th_cca = ranges->nrg_th_cca;
527 531
528 data->last_bad_plcp_cnt_ofdm = 0; 532 data->last_bad_plcp_cnt_ofdm = 0;
529 data->last_fa_cnt_ofdm = 0; 533 data->last_fa_cnt_ofdm = 0;
@@ -643,6 +647,15 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
643} 647}
644EXPORT_SYMBOL(iwl_sensitivity_calibration); 648EXPORT_SYMBOL(iwl_sensitivity_calibration);
645 649
650static inline u8 find_first_chain(u8 mask)
651{
652 if (mask & ANT_A)
653 return CHAIN_A;
654 if (mask & ANT_B)
655 return CHAIN_B;
656 return CHAIN_C;
657}
658
646/* 659/*
647 * Accumulate 20 beacons of signal and noise statistics for each of 660 * Accumulate 20 beacons of signal and noise statistics for each of
648 * 3 receivers/antennas/rx-chains, then figure out: 661 * 3 receivers/antennas/rx-chains, then figure out:
@@ -675,14 +688,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
675 u8 num_tx_chains; 688 u8 num_tx_chains;
676 unsigned long flags; 689 unsigned long flags;
677 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); 690 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
691 u8 first_chain;
678 692
679 if (priv->disable_chain_noise_cal) 693 if (priv->disable_chain_noise_cal)
680 return; 694 return;
681 695
682 data = &(priv->chain_noise_data); 696 data = &(priv->chain_noise_data);
683 697
684 /* Accumulate just the first 20 beacons after the first association, 698 /*
685 * then we're done forever. */ 699 * Accumulate just the first "chain_noise_num_beacons" after
700 * the first association, then we're done forever.
701 */
686 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { 702 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
687 if (data->state == IWL_CHAIN_NOISE_ALIVE) 703 if (data->state == IWL_CHAIN_NOISE_ALIVE)
688 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); 704 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
@@ -710,7 +726,10 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
710 return; 726 return;
711 } 727 }
712 728
713 /* Accumulate beacon statistics values across 20 beacons */ 729 /*
730 * Accumulate beacon statistics values across
731 * "chain_noise_num_beacons"
732 */
714 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & 733 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
715 IN_BAND_FILTER; 734 IN_BAND_FILTER;
716 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & 735 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
@@ -741,16 +760,19 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
741 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", 760 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
742 chain_noise_a, chain_noise_b, chain_noise_c); 761 chain_noise_a, chain_noise_b, chain_noise_c);
743 762
744 /* If this is the 20th beacon, determine: 763 /* If this is the "chain_noise_num_beacons", determine:
745 * 1) Disconnected antennas (using signal strengths) 764 * 1) Disconnected antennas (using signal strengths)
746 * 2) Differential gain (using silence noise) to balance receivers */ 765 * 2) Differential gain (using silence noise) to balance receivers */
747 if (data->beacon_count != CAL_NUM_OF_BEACONS) 766 if (data->beacon_count != priv->cfg->chain_noise_num_beacons)
748 return; 767 return;
749 768
750 /* Analyze signal for disconnected antenna */ 769 /* Analyze signal for disconnected antenna */
751 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS; 770 average_sig[0] =
752 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS; 771 (data->chain_signal_a) / priv->cfg->chain_noise_num_beacons;
753 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS; 772 average_sig[1] =
773 (data->chain_signal_b) / priv->cfg->chain_noise_num_beacons;
774 average_sig[2] =
775 (data->chain_signal_c) / priv->cfg->chain_noise_num_beacons;
754 776
755 if (average_sig[0] >= average_sig[1]) { 777 if (average_sig[0] >= average_sig[1]) {
756 max_average_sig = average_sig[0]; 778 max_average_sig = average_sig[0];
@@ -803,13 +825,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
803 /* there is a Tx antenna connected */ 825 /* there is a Tx antenna connected */
804 break; 826 break;
805 if (num_tx_chains == priv->hw_params.tx_chains_num && 827 if (num_tx_chains == priv->hw_params.tx_chains_num &&
806 data->disconn_array[i]) { 828 data->disconn_array[i]) {
807 /* This is the last TX antenna and is also 829 /*
808 * disconnected connect it anyway */ 830 * If all chains are disconnected
809 data->disconn_array[i] = 0; 831 * connect the first valid tx chain
810 active_chains |= ant_msk; 832 */
811 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - " 833 first_chain =
812 "declare %d as connected\n", i); 834 find_first_chain(priv->cfg->valid_tx_ant);
835 data->disconn_array[first_chain] = 0;
836 active_chains |= BIT(first_chain);
837 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
838 first_chain);
813 break; 839 break;
814 } 840 }
815 } 841 }
@@ -820,9 +846,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
820 active_chains); 846 active_chains);
821 847
822 /* Analyze noise for rx balance */ 848 /* Analyze noise for rx balance */
823 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); 849 average_noise[0] =
824 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); 850 ((data->chain_noise_a) / priv->cfg->chain_noise_num_beacons);
825 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS); 851 average_noise[1] =
852 ((data->chain_noise_b) / priv->cfg->chain_noise_num_beacons);
853 average_noise[2] =
854 ((data->chain_noise_c) / priv->cfg->chain_noise_num_beacons);
826 855
827 for (i = 0; i < NUM_RX_CHAINS; i++) { 856 for (i = 0; i < NUM_RX_CHAINS; i++) {
828 if (!(data->disconn_array[i]) && 857 if (!(data->disconn_array[i]) &&
@@ -843,7 +872,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
843 872
844 if (priv->cfg->ops->utils->gain_computation) 873 if (priv->cfg->ops->utils->gain_computation)
845 priv->cfg->ops->utils->gain_computation(priv, average_noise, 874 priv->cfg->ops->utils->gain_computation(priv, average_noise,
846 min_average_noise_antenna_i, min_average_noise); 875 min_average_noise_antenna_i, min_average_noise,
876 find_first_chain(priv->cfg->valid_rx_ant));
847 877
848 /* Some power changes may have been made during the calibration. 878 /* Some power changes may have been made during the calibration.
849 * Update and commit the RXON 879 * Update and commit the RXON
@@ -870,7 +900,7 @@ void iwl_reset_run_time_calib(struct iwl_priv *priv)
870 900
871 /* Ask for statistics now, the uCode will send notification 901 /* Ask for statistics now, the uCode will send notification
872 * periodically after association */ 902 * periodically after association */
873 iwl_send_statistics_request(priv, CMD_ASYNC); 903 iwl_send_statistics_request(priv, CMD_ASYNC, true);
874} 904}
875EXPORT_SYMBOL(iwl_reset_run_time_calib); 905EXPORT_SYMBOL(iwl_reset_run_time_calib);
876 906
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 4afaf773aeac..e91507531923 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,11 +109,12 @@ enum {
109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
110 110
111 /* WiMAX coexistence */ 111 /* WiMAX coexistence */
112 COEX_PRIORITY_TABLE_CMD = 0x5a, /*5000 only */ 112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
113 COEX_MEDIUM_NOTIFICATION = 0x5b, 113 COEX_MEDIUM_NOTIFICATION = 0x5b,
114 COEX_EVENT_CMD = 0x5c, 114 COEX_EVENT_CMD = 0x5c,
115 115
116 /* Calibration */ 116 /* Calibration */
117 TEMPERATURE_NOTIFICATION = 0x62,
117 CALIBRATION_CFG_CMD = 0x65, 118 CALIBRATION_CFG_CMD = 0x65,
118 CALIBRATION_RES_NOTIFICATION = 0x66, 119 CALIBRATION_RES_NOTIFICATION = 0x66,
119 CALIBRATION_COMPLETE_NOTIFICATION = 0x67, 120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
@@ -148,7 +149,7 @@ enum {
148 QUIET_NOTIFICATION = 0x96, /* not used */ 149 QUIET_NOTIFICATION = 0x96, /* not used */
149 REPLY_TX_PWR_TABLE_CMD = 0x97, 150 REPLY_TX_PWR_TABLE_CMD = 0x97,
150 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */ 151 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */
151 TX_ANT_CONFIGURATION_CMD = 0x98, /* not used */ 152 TX_ANT_CONFIGURATION_CMD = 0x98,
152 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ 153 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
153 154
154 /* Bluetooth device coexistence config command */ 155 /* Bluetooth device coexistence config command */
@@ -353,6 +354,9 @@ struct iwl3945_power_per_rate {
353#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 354#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
354#define POWER_TABLE_CCK_ENTRY 32 355#define POWER_TABLE_CCK_ENTRY 32
355 356
357#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
358#define IWL_PWR_CCK_ENTRIES 2
359
356/** 360/**
357 * union iwl4965_tx_power_dual_stream 361 * union iwl4965_tx_power_dual_stream
358 * 362 *
@@ -411,6 +415,16 @@ struct iwl5000_tx_power_dbm_cmd {
411 u8 reserved; 415 u8 reserved;
412} __attribute__ ((packed)); 416} __attribute__ ((packed));
413 417
418/**
419 * Command TX_ANT_CONFIGURATION_CMD = 0x98
420 * This command is used to configure valid Tx antenna.
421 * By default uCode concludes the valid antenna according to the radio flavor.
422 * This command enables the driver to override/modify this conclusion.
423 */
424struct iwl_tx_ant_config_cmd {
425 __le32 valid;
426} __attribute__ ((packed));
427
414/****************************************************************************** 428/******************************************************************************
415 * (0a) 429 * (0a)
416 * Alive and Error Commands & Responses: 430 * Alive and Error Commands & Responses:
@@ -793,7 +807,7 @@ struct iwl3945_channel_switch_cmd {
793 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 807 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
794} __attribute__ ((packed)); 808} __attribute__ ((packed));
795 809
796struct iwl_channel_switch_cmd { 810struct iwl4965_channel_switch_cmd {
797 u8 band; 811 u8 band;
798 u8 expect_beacon; 812 u8 expect_beacon;
799 __le16 channel; 813 __le16 channel;
@@ -803,6 +817,48 @@ struct iwl_channel_switch_cmd {
803 struct iwl4965_tx_power_db tx_power; 817 struct iwl4965_tx_power_db tx_power;
804} __attribute__ ((packed)); 818} __attribute__ ((packed));
805 819
820/**
821 * struct iwl5000_channel_switch_cmd
822 * @band: 0- 5.2GHz, 1- 2.4GHz
823 * @expect_beacon: 0- resume transmits after channel switch
824 * 1- wait for beacon to resume transmits
825 * @channel: new channel number
826 * @rxon_flags: Rx on flags
827 * @rxon_filter_flags: filtering parameters
828 * @switch_time: switch time in extended beacon format
829 * @reserved: reserved bytes
830 */
831struct iwl5000_channel_switch_cmd {
832 u8 band;
833 u8 expect_beacon;
834 __le16 channel;
835 __le32 rxon_flags;
836 __le32 rxon_filter_flags;
837 __le32 switch_time;
838 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
839} __attribute__ ((packed));
840
841/**
842 * struct iwl6000_channel_switch_cmd
843 * @band: 0- 5.2GHz, 1- 2.4GHz
844 * @expect_beacon: 0- resume transmits after channel switch
845 * 1- wait for beacon to resume transmits
846 * @channel: new channel number
847 * @rxon_flags: Rx on flags
848 * @rxon_filter_flags: filtering parameters
849 * @switch_time: switch time in extended beacon format
850 * @reserved: reserved bytes
851 */
852struct iwl6000_channel_switch_cmd {
853 u8 band;
854 u8 expect_beacon;
855 __le16 channel;
856 __le32 rxon_flags;
857 __le32 rxon_filter_flags;
858 __le32 switch_time;
859 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
860} __attribute__ ((packed));
861
806/* 862/*
807 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 863 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
808 */ 864 */
@@ -921,6 +977,7 @@ struct iwl_qosparam_cmd {
921#define STA_MODIFY_TX_RATE_MSK 0x04 977#define STA_MODIFY_TX_RATE_MSK 0x04
922#define STA_MODIFY_ADDBA_TID_MSK 0x08 978#define STA_MODIFY_ADDBA_TID_MSK 0x08
923#define STA_MODIFY_DELBA_TID_MSK 0x10 979#define STA_MODIFY_DELBA_TID_MSK 0x10
980#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
924 981
925/* Receiver address (actually, Rx station's index into station table), 982/* Receiver address (actually, Rx station's index into station table),
926 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 983 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
@@ -1051,7 +1108,14 @@ struct iwl4965_addsta_cmd {
1051 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1108 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1052 __le16 add_immediate_ba_ssn; 1109 __le16 add_immediate_ba_ssn;
1053 1110
1054 __le32 reserved2; 1111 /*
1112 * Number of packets OK to transmit to station even though
1113 * it is asleep -- used to synchronise PS-poll and u-APSD
1114 * responses while ucode keeps track of STA sleep state.
1115 */
1116 __le16 sleep_tx_count;
1117
1118 __le16 reserved2;
1055} __attribute__ ((packed)); 1119} __attribute__ ((packed));
1056 1120
1057/* 5000 */ 1121/* 5000 */
@@ -1082,7 +1146,14 @@ struct iwl_addsta_cmd {
1082 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */ 1146 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
1083 __le16 add_immediate_ba_ssn; 1147 __le16 add_immediate_ba_ssn;
1084 1148
1085 __le32 reserved2; 1149 /*
1150 * Number of packets OK to transmit to station even though
1151 * it is asleep -- used to synchronise PS-poll and u-APSD
1152 * responses while ucode keeps track of STA sleep state.
1153 */
1154 __le16 sleep_tx_count;
1155
1156 __le16 reserved2;
1086} __attribute__ ((packed)); 1157} __attribute__ ((packed));
1087 1158
1088 1159
@@ -1634,6 +1705,21 @@ enum {
1634 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1705 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1635}; 1706};
1636 1707
1708static inline u32 iwl_tx_status_to_mac80211(u32 status)
1709{
1710 status &= TX_STATUS_MSK;
1711
1712 switch (status) {
1713 case TX_STATUS_SUCCESS:
1714 case TX_STATUS_DIRECT_DONE:
1715 return IEEE80211_TX_STAT_ACK;
1716 case TX_STATUS_FAIL_DEST_PS:
1717 return IEEE80211_TX_STAT_TX_FILTERED;
1718 default:
1719 return 0;
1720 }
1721}
1722
1637static inline bool iwl_is_tx_success(u32 status) 1723static inline bool iwl_is_tx_success(u32 status)
1638{ 1724{
1639 status &= TX_STATUS_MSK; 1725 status &= TX_STATUS_MSK;
@@ -2162,6 +2248,19 @@ struct iwl_link_quality_cmd {
2162 __le32 reserved2; 2248 __le32 reserved2;
2163} __attribute__ ((packed)); 2249} __attribute__ ((packed));
2164 2250
2251#define BT_COEX_DISABLE (0x0)
2252#define BT_COEX_MODE_2W (0x1)
2253#define BT_COEX_MODE_3W (0x2)
2254#define BT_COEX_MODE_4W (0x3)
2255
2256#define BT_LEAD_TIME_MIN (0x0)
2257#define BT_LEAD_TIME_DEF (0x1E)
2258#define BT_LEAD_TIME_MAX (0xFF)
2259
2260#define BT_MAX_KILL_MIN (0x1)
2261#define BT_MAX_KILL_DEF (0x5)
2262#define BT_MAX_KILL_MAX (0xFF)
2263
2165/* 2264/*
2166 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2265 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2167 * 2266 *
@@ -2497,9 +2596,10 @@ struct iwl_scan_channel {
2497/** 2596/**
2498 * struct iwl_ssid_ie - directed scan network information element 2597 * struct iwl_ssid_ie - directed scan network information element
2499 * 2598 *
2500 * Up to 4 of these may appear in REPLY_SCAN_CMD, selected by "type" field 2599 * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
2501 * in struct iwl_scan_channel; each channel may select different ssids from 2600 * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
2502 * among the 4 entries. SSID IEs get transmitted in reverse order of entry. 2601 * each channel may select different ssids from among the 20 (4) entries.
2602 * SSID IEs get transmitted in reverse order of entry.
2503 */ 2603 */
2504struct iwl_ssid_ie { 2604struct iwl_ssid_ie {
2505 u8 id; 2605 u8 id;
@@ -3001,6 +3101,10 @@ struct statistics_general {
3001 __le32 reserved3; 3101 __le32 reserved3;
3002} __attribute__ ((packed)); 3102} __attribute__ ((packed));
3003 3103
3104#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
3105#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
3106#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
3107
3004/* 3108/*
3005 * REPLY_STATISTICS_CMD = 0x9c, 3109 * REPLY_STATISTICS_CMD = 0x9c,
3006 * 3945 and 4965 identical. 3110 * 3945 and 4965 identical.
@@ -3237,12 +3341,6 @@ struct iwl_missed_beacon_notif {
3237 * Lower values mean higher energy; this means making sure that the value 3341 * Lower values mean higher energy; this means making sure that the value
3238 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". 3342 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3239 * 3343 *
3240 * Driver should set the following entries to fixed values:
3241 *
3242 * HD_MIN_ENERGY_OFDM_DET_INDEX 100
3243 * HD_BARKER_CORR_TH_ADD_MIN_INDEX 190
3244 * HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX 390
3245 * HD_OFDM_ENERGY_TH_IN_INDEX 62
3246 */ 3344 */
3247 3345
3248/* 3346/*
@@ -3440,30 +3538,134 @@ struct iwl_led_cmd {
3440} __attribute__ ((packed)); 3538} __attribute__ ((packed));
3441 3539
3442/* 3540/*
3443 * Coexistence WIFI/WIMAX Command 3541 * station priority table entries
3444 * COEX_PRIORITY_TABLE_CMD = 0x5a 3542 * also used as potential "events" value for both
3445 * 3543 * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
3544 */
3545
3546/*
3547 * COEX events entry flag masks
3548 * RP - Requested Priority
3549 * WP - Win Medium Priority: priority assigned when the contention has been won
3550 */
3551#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1)
3552#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2)
3553#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4)
3554
3555#define COEX_CU_UNASSOC_IDLE_RP 4
3556#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4
3557#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4
3558#define COEX_CU_CALIBRATION_RP 4
3559#define COEX_CU_PERIODIC_CALIBRATION_RP 4
3560#define COEX_CU_CONNECTION_ESTAB_RP 4
3561#define COEX_CU_ASSOCIATED_IDLE_RP 4
3562#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4
3563#define COEX_CU_ASSOC_AUTO_SCAN_RP 4
3564#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4
3565#define COEX_CU_RF_ON_RP 6
3566#define COEX_CU_RF_OFF_RP 4
3567#define COEX_CU_STAND_ALONE_DEBUG_RP 6
3568#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4
3569#define COEX_CU_RSRVD1_RP 4
3570#define COEX_CU_RSRVD2_RP 4
3571
3572#define COEX_CU_UNASSOC_IDLE_WP 3
3573#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3
3574#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3
3575#define COEX_CU_CALIBRATION_WP 3
3576#define COEX_CU_PERIODIC_CALIBRATION_WP 3
3577#define COEX_CU_CONNECTION_ESTAB_WP 3
3578#define COEX_CU_ASSOCIATED_IDLE_WP 3
3579#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3
3580#define COEX_CU_ASSOC_AUTO_SCAN_WP 3
3581#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3
3582#define COEX_CU_RF_ON_WP 3
3583#define COEX_CU_RF_OFF_WP 3
3584#define COEX_CU_STAND_ALONE_DEBUG_WP 6
3585#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3
3586#define COEX_CU_RSRVD1_WP 3
3587#define COEX_CU_RSRVD2_WP 3
3588
3589#define COEX_UNASSOC_IDLE_FLAGS 0
3590#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \
3591 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3592 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3593#define COEX_UNASSOC_AUTO_SCAN_FLAGS \
3594 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3595 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3596#define COEX_CALIBRATION_FLAGS \
3597 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3598 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3599#define COEX_PERIODIC_CALIBRATION_FLAGS 0
3600/*
3601 * COEX_CONNECTION_ESTAB:
3602 * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3603 */
3604#define COEX_CONNECTION_ESTAB_FLAGS \
3605 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3606 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3607 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3608#define COEX_ASSOCIATED_IDLE_FLAGS 0
3609#define COEX_ASSOC_MANUAL_SCAN_FLAGS \
3610 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3611 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3612#define COEX_ASSOC_AUTO_SCAN_FLAGS \
3613 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3614 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3615#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
3616#define COEX_RF_ON_FLAGS 0
3617#define COEX_RF_OFF_FLAGS 0
3618#define COEX_STAND_ALONE_DEBUG_FLAGS \
3619 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3620 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3621#define COEX_IPAN_ASSOC_LEVEL_FLAGS \
3622 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3623 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3624 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3625#define COEX_RSRVD1_FLAGS 0
3626#define COEX_RSRVD2_FLAGS 0
3627/*
3628 * COEX_CU_RF_ON is the event wrapping all radio ownership.
3629 * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3446 */ 3630 */
3631#define COEX_CU_RF_ON_FLAGS \
3632 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3633 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3634 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3635
3636
3447enum { 3637enum {
3638 /* un-association part */
3448 COEX_UNASSOC_IDLE = 0, 3639 COEX_UNASSOC_IDLE = 0,
3449 COEX_UNASSOC_MANUAL_SCAN = 1, 3640 COEX_UNASSOC_MANUAL_SCAN = 1,
3450 COEX_UNASSOC_AUTO_SCAN = 2, 3641 COEX_UNASSOC_AUTO_SCAN = 2,
3642 /* calibration */
3451 COEX_CALIBRATION = 3, 3643 COEX_CALIBRATION = 3,
3452 COEX_PERIODIC_CALIBRATION = 4, 3644 COEX_PERIODIC_CALIBRATION = 4,
3645 /* connection */
3453 COEX_CONNECTION_ESTAB = 5, 3646 COEX_CONNECTION_ESTAB = 5,
3647 /* association part */
3454 COEX_ASSOCIATED_IDLE = 6, 3648 COEX_ASSOCIATED_IDLE = 6,
3455 COEX_ASSOC_MANUAL_SCAN = 7, 3649 COEX_ASSOC_MANUAL_SCAN = 7,
3456 COEX_ASSOC_AUTO_SCAN = 8, 3650 COEX_ASSOC_AUTO_SCAN = 8,
3457 COEX_ASSOC_ACTIVE_LEVEL = 9, 3651 COEX_ASSOC_ACTIVE_LEVEL = 9,
3652 /* RF ON/OFF */
3458 COEX_RF_ON = 10, 3653 COEX_RF_ON = 10,
3459 COEX_RF_OFF = 11, 3654 COEX_RF_OFF = 11,
3460 COEX_STAND_ALONE_DEBUG = 12, 3655 COEX_STAND_ALONE_DEBUG = 12,
3656 /* IPAN */
3461 COEX_IPAN_ASSOC_LEVEL = 13, 3657 COEX_IPAN_ASSOC_LEVEL = 13,
3658 /* reserved */
3462 COEX_RSRVD1 = 14, 3659 COEX_RSRVD1 = 14,
3463 COEX_RSRVD2 = 15, 3660 COEX_RSRVD2 = 15,
3464 COEX_NUM_OF_EVENTS = 16 3661 COEX_NUM_OF_EVENTS = 16
3465}; 3662};
3466 3663
3664/*
3665 * Coexistence WIFI/WIMAX Command
3666 * COEX_PRIORITY_TABLE_CMD = 0x5a
3667 *
3668 */
3467struct iwl_wimax_coex_event_entry { 3669struct iwl_wimax_coex_event_entry {
3468 u8 request_prio; 3670 u8 request_prio;
3469 u8 win_medium_prio; 3671 u8 win_medium_prio;
@@ -3488,6 +3690,55 @@ struct iwl_wimax_coex_cmd {
3488 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; 3690 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3489} __attribute__ ((packed)); 3691} __attribute__ ((packed));
3490 3692
3693/*
3694 * Coexistence MEDIUM NOTIFICATION
3695 * COEX_MEDIUM_NOTIFICATION = 0x5b
3696 *
3697 * notification from uCode to host to indicate medium changes
3698 *
3699 */
3700/*
3701 * status field
3702 * bit 0 - 2: medium status
3703 * bit 3: medium change indication
3704 * bit 4 - 31: reserved
3705 */
3706/* status option values, (0 - 2 bits) */
3707#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */
3708#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */
3709#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */
3710#define COEX_MEDIUM_MSK (0x7)
3711
3712/* send notification status (1 bit) */
3713#define COEX_MEDIUM_CHANGED (0x8)
3714#define COEX_MEDIUM_CHANGED_MSK (0x8)
3715#define COEX_MEDIUM_SHIFT (3)
3716
3717struct iwl_coex_medium_notification {
3718 __le32 status;
3719 __le32 events;
3720} __attribute__ ((packed));
3721
3722/*
3723 * Coexistence EVENT Command
3724 * COEX_EVENT_CMD = 0x5c
3725 *
3726 * send from host to uCode for coex event request.
3727 */
3728/* flags options */
3729#define COEX_EVENT_REQUEST_MSK (0x1)
3730
3731struct iwl_coex_event_cmd {
3732 u8 flags;
3733 u8 event;
3734 __le16 reserved;
3735} __attribute__ ((packed));
3736
3737struct iwl_coex_event_resp {
3738 __le32 status;
3739} __attribute__ ((packed));
3740
3741
3491/****************************************************************************** 3742/******************************************************************************
3492 * (13) 3743 * (13)
3493 * Union of all expected notifications/responses: 3744 * Union of all expected notifications/responses:
@@ -3495,6 +3746,16 @@ struct iwl_wimax_coex_cmd {
3495 *****************************************************************************/ 3746 *****************************************************************************/
3496 3747
3497struct iwl_rx_packet { 3748struct iwl_rx_packet {
3749 /*
3750 * The first 4 bytes of the RX frame header contain both the RX frame
3751 * size and some flags.
3752 * Bit fields:
3753 * 31: flag flush RB request
3754 * 30: flag ignore TC (terminal counter) request
3755 * 29: flag fast IRQ request
3756 * 28-14: Reserved
3757 * 13-00: RX frame size
3758 */
3498 __le32 len_n_flags; 3759 __le32 len_n_flags;
3499 struct iwl_cmd_header hdr; 3760 struct iwl_cmd_header hdr;
3500 union { 3761 union {
@@ -3514,6 +3775,8 @@ struct iwl_rx_packet {
3514 struct iwl_notif_statistics stats; 3775 struct iwl_notif_statistics stats;
3515 struct iwl_compressed_ba_resp compressed_ba; 3776 struct iwl_compressed_ba_resp compressed_ba;
3516 struct iwl_missed_beacon_notif missed_beacon; 3777 struct iwl_missed_beacon_notif missed_beacon;
3778 struct iwl_coex_medium_notification coex_medium_notif;
3779 struct iwl_coex_event_resp coex_event;
3517 __le32 status; 3780 __le32 status;
3518 u8 raw[0]; 3781 u8 raw[0];
3519 } u; 3782 } u;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 2dc928755454..574d36658702 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -47,6 +47,37 @@ MODULE_VERSION(IWLWIFI_VERSION);
47MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 47MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
48MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
49 49
50static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
51 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
52 0, COEX_UNASSOC_IDLE_FLAGS},
53 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
54 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
55 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
56 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
57 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
58 0, COEX_CALIBRATION_FLAGS},
59 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
60 0, COEX_PERIODIC_CALIBRATION_FLAGS},
61 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
62 0, COEX_CONNECTION_ESTAB_FLAGS},
63 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
64 0, COEX_ASSOCIATED_IDLE_FLAGS},
65 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
66 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
67 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
68 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
69 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
70 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
71 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
72 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
73 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
74 0, COEX_STAND_ALONE_DEBUG_FLAGS},
75 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
76 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
77 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
78 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
79};
80
50#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ 81#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
51 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 82 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
52 IWL_RATE_SISO_##s##M_PLCP, \ 83 IWL_RATE_SISO_##s##M_PLCP, \
@@ -178,6 +209,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
178 } 209 }
179 return ant; 210 return ant;
180} 211}
212EXPORT_SYMBOL(iwl_toggle_tx_ant);
181 213
182const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 214const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
183EXPORT_SYMBOL(iwl_bcast_addr); 215EXPORT_SYMBOL(iwl_bcast_addr);
@@ -224,7 +256,10 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
224 /* nic_init */ 256 /* nic_init */
225 spin_lock_irqsave(&priv->lock, flags); 257 spin_lock_irqsave(&priv->lock, flags);
226 priv->cfg->ops->lib->apm_ops.init(priv); 258 priv->cfg->ops->lib->apm_ops.init(priv);
227 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32); 259
260 /* Set interrupt coalescing timer to 512 usecs */
261 iwl_write8(priv, CSR_INT_COALESCING, 512 / 32);
262
228 spin_unlock_irqrestore(&priv->lock, flags); 263 spin_unlock_irqrestore(&priv->lock, flags);
229 264
230 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN); 265 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
@@ -416,8 +451,7 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
416 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 451 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
417 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 452 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
418 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS & 453 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
419 (WLAN_HT_CAP_SM_PS_DISABLED << 2)); 454 (priv->cfg->sm_ps_mode << 2));
420
421 max_bit_rate = MAX_BIT_RATE_20_MHZ; 455 max_bit_rate = MAX_BIT_RATE_20_MHZ;
422 if (priv->hw_params.ht40_channel & BIT(band)) { 456 if (priv->hw_params.ht40_channel & BIT(band)) {
423 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 457 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -452,28 +486,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
452 } 486 }
453} 487}
454 488
455static void iwlcore_init_hw_rates(struct iwl_priv *priv,
456 struct ieee80211_rate *rates)
457{
458 int i;
459
460 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
461 rates[i].bitrate = iwl_rates[i].ieee * 5;
462 rates[i].hw_value = i; /* Rate scaling will work on indexes */
463 rates[i].hw_value_short = i;
464 rates[i].flags = 0;
465 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
466 /*
467 * If CCK != 1M then set short preamble rate flag.
468 */
469 rates[i].flags |=
470 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
471 0 : IEEE80211_RATE_SHORT_PREAMBLE;
472 }
473 }
474}
475
476
477/** 489/**
478 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom 490 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
479 */ 491 */
@@ -605,11 +617,27 @@ void iwlcore_free_geos(struct iwl_priv *priv)
605} 617}
606EXPORT_SYMBOL(iwlcore_free_geos); 618EXPORT_SYMBOL(iwlcore_free_geos);
607 619
620/*
621 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
622 * function.
623 */
624void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
625 __le32 *tx_flags)
626{
627 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
628 *tx_flags |= TX_CMD_FLG_RTS_MSK;
629 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
630 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
631 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
632 *tx_flags |= TX_CMD_FLG_CTS_MSK;
633 }
634}
635EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
636
608static bool is_single_rx_stream(struct iwl_priv *priv) 637static bool is_single_rx_stream(struct iwl_priv *priv)
609{ 638{
610 return !priv->current_ht_config.is_ht || 639 return !priv->current_ht_config.is_ht ||
611 ((priv->current_ht_config.mcs.rx_mask[1] == 0) && 640 priv->current_ht_config.single_chain_sufficient;
612 (priv->current_ht_config.mcs.rx_mask[2] == 0));
613} 641}
614 642
615static u8 iwl_is_channel_extension(struct iwl_priv *priv, 643static u8 iwl_is_channel_extension(struct iwl_priv *priv,
@@ -635,10 +663,9 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
635u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 663u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
636 struct ieee80211_sta_ht_cap *sta_ht_inf) 664 struct ieee80211_sta_ht_cap *sta_ht_inf)
637{ 665{
638 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; 666 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
639 667
640 if ((!iwl_ht_conf->is_ht) || 668 if (!ht_conf->is_ht || !ht_conf->is_40mhz)
641 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ))
642 return 0; 669 return 0;
643 670
644 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 671 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
@@ -654,7 +681,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
654#endif 681#endif
655 return iwl_is_channel_extension(priv, priv->band, 682 return iwl_is_channel_extension(priv, priv->band,
656 le16_to_cpu(priv->staging_rxon.channel), 683 le16_to_cpu(priv->staging_rxon.channel),
657 iwl_ht_conf->extension_chan_offset); 684 ht_conf->extension_chan_offset);
658} 685}
659EXPORT_SYMBOL(iwl_is_ht40_tx_allowed); 686EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
660 687
@@ -878,11 +905,11 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
878} 905}
879EXPORT_SYMBOL(iwl_rate_get_lowest_plcp); 906EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
880 907
881void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) 908void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
882{ 909{
883 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 910 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
884 911
885 if (!ht_info->is_ht) { 912 if (!ht_conf->is_ht) {
886 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | 913 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
887 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | 914 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
888 RXON_FLG_HT40_PROT_MSK | 915 RXON_FLG_HT40_PROT_MSK |
@@ -893,7 +920,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
893 /* FIXME: if the definition of ht_protection changed, the "translation" 920 /* FIXME: if the definition of ht_protection changed, the "translation"
894 * will be needed for rxon->flags 921 * will be needed for rxon->flags
895 */ 922 */
896 rxon->flags |= cpu_to_le32(ht_info->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS); 923 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
897 924
898 /* Set up channel bandwidth: 925 /* Set up channel bandwidth:
899 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 926 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
@@ -902,10 +929,10 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
902 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 929 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
903 if (iwl_is_ht40_tx_allowed(priv, NULL)) { 930 if (iwl_is_ht40_tx_allowed(priv, NULL)) {
904 /* pure ht40 */ 931 /* pure ht40 */
905 if (ht_info->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 932 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
906 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 933 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
907 /* Note: control channel is opposite of extension channel */ 934 /* Note: control channel is opposite of extension channel */
908 switch (ht_info->extension_chan_offset) { 935 switch (ht_conf->extension_chan_offset) {
909 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 936 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
910 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 937 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
911 break; 938 break;
@@ -915,7 +942,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
915 } 942 }
916 } else { 943 } else {
917 /* Note: control channel is opposite of extension channel */ 944 /* Note: control channel is opposite of extension channel */
918 switch (ht_info->extension_chan_offset) { 945 switch (ht_conf->extension_chan_offset) {
919 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 946 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
920 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 947 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
921 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 948 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
@@ -938,14 +965,10 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
938 if (priv->cfg->ops->hcmd->set_rxon_chain) 965 if (priv->cfg->ops->hcmd->set_rxon_chain)
939 priv->cfg->ops->hcmd->set_rxon_chain(priv); 966 priv->cfg->ops->hcmd->set_rxon_chain(priv);
940 967
941 IWL_DEBUG_ASSOC(priv, "supported HT rate 0x%X 0x%X 0x%X " 968 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
942 "rxon flags 0x%X operation mode :0x%X "
943 "extension channel offset 0x%x\n", 969 "extension channel offset 0x%x\n",
944 ht_info->mcs.rx_mask[0], 970 le32_to_cpu(rxon->flags), ht_conf->ht_protection,
945 ht_info->mcs.rx_mask[1], 971 ht_conf->extension_chan_offset);
946 ht_info->mcs.rx_mask[2],
947 le32_to_cpu(rxon->flags), ht_info->ht_protection,
948 ht_info->extension_chan_offset);
949 return; 972 return;
950} 973}
951EXPORT_SYMBOL(iwl_set_rxon_ht); 974EXPORT_SYMBOL(iwl_set_rxon_ht);
@@ -955,44 +978,50 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
955#define IWL_NUM_IDLE_CHAINS_DUAL 2 978#define IWL_NUM_IDLE_CHAINS_DUAL 2
956#define IWL_NUM_IDLE_CHAINS_SINGLE 1 979#define IWL_NUM_IDLE_CHAINS_SINGLE 1
957 980
958/* Determine how many receiver/antenna chains to use. 981/*
959 * More provides better reception via diversity. Fewer saves power. 982 * Determine how many receiver/antenna chains to use.
983 *
984 * More provides better reception via diversity. Fewer saves power
985 * at the expense of throughput, but only when not in powersave to
986 * start with.
987 *
960 * MIMO (dual stream) requires at least 2, but works better with 3. 988 * MIMO (dual stream) requires at least 2, but works better with 3.
961 * This does not determine *which* chains to use, just how many. 989 * This does not determine *which* chains to use, just how many.
962 */ 990 */
963static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 991static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
964{ 992{
965 bool is_single = is_single_rx_stream(priv);
966 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
967
968 /* # of Rx chains to use when expecting MIMO. */ 993 /* # of Rx chains to use when expecting MIMO. */
969 if (is_single || (!is_cam && (priv->current_ht_config.sm_ps == 994 if (is_single_rx_stream(priv))
970 WLAN_HT_CAP_SM_PS_STATIC)))
971 return IWL_NUM_RX_CHAINS_SINGLE; 995 return IWL_NUM_RX_CHAINS_SINGLE;
972 else 996 else
973 return IWL_NUM_RX_CHAINS_MULTIPLE; 997 return IWL_NUM_RX_CHAINS_MULTIPLE;
974} 998}
975 999
1000/*
1001 * When we are in power saving mode, unless device support spatial
1002 * multiplexing power save, use the active count for rx chain count.
1003 */
976static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) 1004static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
977{ 1005{
978 int idle_cnt; 1006 int idle_cnt = active_cnt;
979 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 1007 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
1008
980 /* # Rx chains when idling and maybe trying to save power */ 1009 /* # Rx chains when idling and maybe trying to save power */
981 switch (priv->current_ht_config.sm_ps) { 1010 switch (priv->cfg->sm_ps_mode) {
982 case WLAN_HT_CAP_SM_PS_STATIC: 1011 case WLAN_HT_CAP_SM_PS_STATIC:
1012 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
1013 break;
983 case WLAN_HT_CAP_SM_PS_DYNAMIC: 1014 case WLAN_HT_CAP_SM_PS_DYNAMIC:
984 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL : 1015 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
985 IWL_NUM_IDLE_CHAINS_SINGLE; 1016 IWL_NUM_IDLE_CHAINS_SINGLE;
986 break; 1017 break;
987 case WLAN_HT_CAP_SM_PS_DISABLED: 1018 case WLAN_HT_CAP_SM_PS_DISABLED:
988 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
989 break; 1019 break;
990 case WLAN_HT_CAP_SM_PS_INVALID: 1020 case WLAN_HT_CAP_SM_PS_INVALID:
991 default: 1021 default:
992 IWL_ERR(priv, "invalid mimo ps mode %d\n", 1022 IWL_ERR(priv, "invalid sm_ps mode %u\n",
993 priv->current_ht_config.sm_ps); 1023 priv->cfg->sm_ps_mode);
994 WARN_ON(1); 1024 WARN_ON(1);
995 idle_cnt = -1;
996 break; 1025 break;
997 } 1026 }
998 return idle_cnt; 1027 return idle_cnt;
@@ -1005,7 +1034,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1005 res = (chain_bitmap & BIT(0)) >> 0; 1034 res = (chain_bitmap & BIT(0)) >> 0;
1006 res += (chain_bitmap & BIT(1)) >> 1; 1035 res += (chain_bitmap & BIT(1)) >> 1;
1007 res += (chain_bitmap & BIT(2)) >> 2; 1036 res += (chain_bitmap & BIT(2)) >> 2;
1008 res += (chain_bitmap & BIT(4)) >> 4; 1037 res += (chain_bitmap & BIT(3)) >> 3;
1009 return res; 1038 return res;
1010} 1039}
1011 1040
@@ -1281,18 +1310,28 @@ static void iwl_set_rate(struct iwl_priv *priv)
1281 1310
1282void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1311void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1283{ 1312{
1284 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1313 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1285 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; 1314 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1286 struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 1315 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1287 IWL_DEBUG_11H(priv, "CSA notif: channel %d, status %d\n", 1316
1288 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 1317 if (priv->switch_rxon.switch_in_progress) {
1289 rxon->channel = csa->channel; 1318 if (!le32_to_cpu(csa->status) &&
1290 priv->staging_rxon.channel = csa->channel; 1319 (csa->channel == priv->switch_rxon.channel)) {
1320 rxon->channel = csa->channel;
1321 priv->staging_rxon.channel = csa->channel;
1322 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1323 le16_to_cpu(csa->channel));
1324 } else
1325 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1326 le16_to_cpu(csa->channel));
1327
1328 priv->switch_rxon.switch_in_progress = false;
1329 }
1291} 1330}
1292EXPORT_SYMBOL(iwl_rx_csa); 1331EXPORT_SYMBOL(iwl_rx_csa);
1293 1332
1294#ifdef CONFIG_IWLWIFI_DEBUG 1333#ifdef CONFIG_IWLWIFI_DEBUG
1295static void iwl_print_rx_config_cmd(struct iwl_priv *priv) 1334void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1296{ 1335{
1297 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 1336 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1298 1337
@@ -1310,6 +1349,7 @@ static void iwl_print_rx_config_cmd(struct iwl_priv *priv)
1310 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr); 1349 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
1311 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); 1350 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
1312} 1351}
1352EXPORT_SYMBOL(iwl_print_rx_config_cmd);
1313#endif 1353#endif
1314/** 1354/**
1315 * iwl_irq_handle_error - called for HW or SW error interrupt from card 1355 * iwl_irq_handle_error - called for HW or SW error interrupt from card
@@ -1322,12 +1362,11 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1322 /* Cancel currently queued command. */ 1362 /* Cancel currently queued command. */
1323 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1363 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1324 1364
1365 priv->cfg->ops->lib->dump_nic_error_log(priv);
1366 priv->cfg->ops->lib->dump_nic_event_log(priv, false);
1325#ifdef CONFIG_IWLWIFI_DEBUG 1367#ifdef CONFIG_IWLWIFI_DEBUG
1326 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) { 1368 if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
1327 priv->cfg->ops->lib->dump_nic_error_log(priv);
1328 priv->cfg->ops->lib->dump_nic_event_log(priv);
1329 iwl_print_rx_config_cmd(priv); 1369 iwl_print_rx_config_cmd(priv);
1330 }
1331#endif 1370#endif
1332 1371
1333 wake_up_interruptible(&priv->wait_command_queue); 1372 wake_up_interruptible(&priv->wait_command_queue);
@@ -1346,6 +1385,160 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1346} 1385}
1347EXPORT_SYMBOL(iwl_irq_handle_error); 1386EXPORT_SYMBOL(iwl_irq_handle_error);
1348 1387
1388int iwl_apm_stop_master(struct iwl_priv *priv)
1389{
1390 int ret = 0;
1391
1392 /* stop device's busmaster DMA activity */
1393 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1394
1395 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
1396 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1397 if (ret)
1398 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
1399
1400 IWL_DEBUG_INFO(priv, "stop master\n");
1401
1402 return ret;
1403}
1404EXPORT_SYMBOL(iwl_apm_stop_master);
1405
1406void iwl_apm_stop(struct iwl_priv *priv)
1407{
1408 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1409
1410 /* Stop device's DMA activity */
1411 iwl_apm_stop_master(priv);
1412
1413 /* Reset the entire device */
1414 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1415
1416 udelay(10);
1417
1418 /*
1419 * Clear "initialization complete" bit to move adapter from
1420 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1421 */
1422 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1423}
1424EXPORT_SYMBOL(iwl_apm_stop);
1425
1426
1427/*
1428 * Start up NIC's basic functionality after it has been reset
1429 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1430 * NOTE: This does not load uCode nor start the embedded processor
1431 */
1432int iwl_apm_init(struct iwl_priv *priv)
1433{
1434 int ret = 0;
1435 u16 lctl;
1436
1437 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1438
1439 /*
1440 * Use "set_bit" below rather than "write", to preserve any hardware
1441 * bits already set by default after reset.
1442 */
1443
1444 /* Disable L0S exit timer (platform NMI Work/Around) */
1445 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1446 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1447
1448 /*
1449 * Disable L0s without affecting L1;
1450 * don't wait for ICH L0s (ICH bug W/A)
1451 */
1452 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1453 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1454
1455 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1456 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1457
1458 /*
1459 * Enable HAP INTA (interrupt from management bus) to
1460 * wake device's PCI Express link L1a -> L0s
1461 * NOTE: This is no-op for 3945 (non-existant bit)
1462 */
1463 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1464 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1465
1466 /*
1467 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1468 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1469 * If so (likely), disable L0S, so device moves directly L0->L1;
1470 * costs negligible amount of power savings.
1471 * If not (unlikely), enable L0S, so there is at least some
1472 * power savings, even without L1.
1473 */
1474 if (priv->cfg->set_l0s) {
1475 lctl = iwl_pcie_link_ctl(priv);
1476 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1477 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1478 /* L1-ASPM enabled; disable(!) L0S */
1479 iwl_set_bit(priv, CSR_GIO_REG,
1480 CSR_GIO_REG_VAL_L0S_ENABLED);
1481 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1482 } else {
1483 /* L1-ASPM disabled; enable(!) L0S */
1484 iwl_clear_bit(priv, CSR_GIO_REG,
1485 CSR_GIO_REG_VAL_L0S_ENABLED);
1486 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1487 }
1488 }
1489
1490 /* Configure analog phase-lock-loop before activating to D0A */
1491 if (priv->cfg->pll_cfg_val)
1492 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1493
1494 /*
1495 * Set "initialization complete" bit to move adapter from
1496 * D0U* --> D0A* (powered-up active) state.
1497 */
1498 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1499
1500 /*
1501 * Wait for clock stabilization; once stabilized, access to
1502 * device-internal resources is supported, e.g. iwl_write_prph()
1503 * and accesses to uCode SRAM.
1504 */
1505 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1506 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1507 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1508 if (ret < 0) {
1509 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1510 goto out;
1511 }
1512
1513 /*
1514 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1515 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1516 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1517 * and don't need BSM to restore data after power-saving sleep.
1518 *
1519 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1520 * do not disable clocks. This preserves any hardware bits already
1521 * set by default in "CLK_CTRL_REG" after reset.
1522 */
1523 if (priv->cfg->use_bsm)
1524 iwl_write_prph(priv, APMG_CLK_EN_REG,
1525 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1526 else
1527 iwl_write_prph(priv, APMG_CLK_EN_REG,
1528 APMG_CLK_VAL_DMA_CLK_RQT);
1529 udelay(20);
1530
1531 /* Disable L1-Active */
1532 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1533 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1534
1535out:
1536 return ret;
1537}
1538EXPORT_SYMBOL(iwl_apm_init);
1539
1540
1541
1349void iwl_configure_filter(struct ieee80211_hw *hw, 1542void iwl_configure_filter(struct ieee80211_hw *hw,
1350 unsigned int changed_flags, 1543 unsigned int changed_flags,
1351 unsigned int *total_flags, 1544 unsigned int *total_flags,
@@ -1393,73 +1586,14 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
1393} 1586}
1394EXPORT_SYMBOL(iwl_configure_filter); 1587EXPORT_SYMBOL(iwl_configure_filter);
1395 1588
1396int iwl_setup_mac(struct iwl_priv *priv)
1397{
1398 int ret;
1399 struct ieee80211_hw *hw = priv->hw;
1400 hw->rate_control_algorithm = "iwl-agn-rs";
1401
1402 /* Tell mac80211 our characteristics */
1403 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1404 IEEE80211_HW_NOISE_DBM |
1405 IEEE80211_HW_AMPDU_AGGREGATION |
1406 IEEE80211_HW_SPECTRUM_MGMT;
1407
1408 if (!priv->cfg->broken_powersave)
1409 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
1410 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
1411
1412 hw->wiphy->interface_modes =
1413 BIT(NL80211_IFTYPE_STATION) |
1414 BIT(NL80211_IFTYPE_ADHOC);
1415
1416 hw->wiphy->custom_regulatory = true;
1417
1418 /* Firmware does not support this */
1419 hw->wiphy->disable_beacon_hints = true;
1420
1421 /*
1422 * For now, disable PS by default because it affects
1423 * RX performance significantly.
1424 */
1425 hw->wiphy->ps_default = false;
1426
1427 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
1428 /* we create the 802.11 header and a zero-length SSID element */
1429 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
1430
1431 /* Default value; 4 EDCA QOS priorities */
1432 hw->queues = 4;
1433
1434 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
1435
1436 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
1437 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1438 &priv->bands[IEEE80211_BAND_2GHZ];
1439 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
1440 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1441 &priv->bands[IEEE80211_BAND_5GHZ];
1442
1443 ret = ieee80211_register_hw(priv->hw);
1444 if (ret) {
1445 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
1446 return ret;
1447 }
1448 priv->mac80211_registered = 1;
1449
1450 return 0;
1451}
1452EXPORT_SYMBOL(iwl_setup_mac);
1453
1454int iwl_set_hw_params(struct iwl_priv *priv) 1589int iwl_set_hw_params(struct iwl_priv *priv)
1455{ 1590{
1456 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 1591 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1457 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 1592 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1458 if (priv->cfg->mod_params->amsdu_size_8K) 1593 if (priv->cfg->mod_params->amsdu_size_8K)
1459 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K; 1594 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1460 else 1595 else
1461 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K; 1596 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1462 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1463 1597
1464 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; 1598 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1465 1599
@@ -1471,71 +1605,6 @@ int iwl_set_hw_params(struct iwl_priv *priv)
1471} 1605}
1472EXPORT_SYMBOL(iwl_set_hw_params); 1606EXPORT_SYMBOL(iwl_set_hw_params);
1473 1607
1474int iwl_init_drv(struct iwl_priv *priv)
1475{
1476 int ret;
1477
1478 priv->ibss_beacon = NULL;
1479
1480 spin_lock_init(&priv->lock);
1481 spin_lock_init(&priv->sta_lock);
1482 spin_lock_init(&priv->hcmd_lock);
1483
1484 INIT_LIST_HEAD(&priv->free_frames);
1485
1486 mutex_init(&priv->mutex);
1487
1488 /* Clear the driver's (not device's) station table */
1489 iwl_clear_stations_table(priv);
1490
1491 priv->data_retry_limit = -1;
1492 priv->ieee_channels = NULL;
1493 priv->ieee_rates = NULL;
1494 priv->band = IEEE80211_BAND_2GHZ;
1495
1496 priv->iw_mode = NL80211_IFTYPE_STATION;
1497
1498 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
1499
1500 /* Choose which receivers/antennas to use */
1501 if (priv->cfg->ops->hcmd->set_rxon_chain)
1502 priv->cfg->ops->hcmd->set_rxon_chain(priv);
1503
1504 iwl_init_scan_params(priv);
1505
1506 iwl_reset_qos(priv);
1507
1508 priv->qos_data.qos_active = 0;
1509 priv->qos_data.qos_cap.val = 0;
1510
1511 priv->rates_mask = IWL_RATES_MASK;
1512 /* Set the tx_power_user_lmt to the lowest power level
1513 * this value will get overwritten by channel max power avg
1514 * from eeprom */
1515 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
1516
1517 ret = iwl_init_channel_map(priv);
1518 if (ret) {
1519 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
1520 goto err;
1521 }
1522
1523 ret = iwlcore_init_geos(priv);
1524 if (ret) {
1525 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
1526 goto err_free_channel_map;
1527 }
1528 iwlcore_init_hw_rates(priv, priv->ieee_rates);
1529
1530 return 0;
1531
1532err_free_channel_map:
1533 iwl_free_channel_map(priv);
1534err:
1535 return ret;
1536}
1537EXPORT_SYMBOL(iwl_init_drv);
1538
1539int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1608int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1540{ 1609{
1541 int ret = 0; 1610 int ret = 0;
@@ -1583,15 +1652,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1583} 1652}
1584EXPORT_SYMBOL(iwl_set_tx_power); 1653EXPORT_SYMBOL(iwl_set_tx_power);
1585 1654
1586void iwl_uninit_drv(struct iwl_priv *priv)
1587{
1588 iwl_calib_free_results(priv);
1589 iwlcore_free_geos(priv);
1590 iwl_free_channel_map(priv);
1591 kfree(priv->scan);
1592}
1593EXPORT_SYMBOL(iwl_uninit_drv);
1594
1595#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) 1655#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1596 1656
1597/* Free dram table */ 1657/* Free dram table */
@@ -1915,9 +1975,9 @@ EXPORT_SYMBOL(iwl_isr_legacy);
1915int iwl_send_bt_config(struct iwl_priv *priv) 1975int iwl_send_bt_config(struct iwl_priv *priv)
1916{ 1976{
1917 struct iwl_bt_cmd bt_cmd = { 1977 struct iwl_bt_cmd bt_cmd = {
1918 .flags = 3, 1978 .flags = BT_COEX_MODE_4W,
1919 .lead_time = 0xAA, 1979 .lead_time = BT_LEAD_TIME_DEF,
1920 .max_kill = 1, 1980 .max_kill = BT_MAX_KILL_DEF,
1921 .kill_ack_mask = 0, 1981 .kill_ack_mask = 0,
1922 .kill_cts_mask = 0, 1982 .kill_cts_mask = 0,
1923 }; 1983 };
@@ -1927,16 +1987,21 @@ int iwl_send_bt_config(struct iwl_priv *priv)
1927} 1987}
1928EXPORT_SYMBOL(iwl_send_bt_config); 1988EXPORT_SYMBOL(iwl_send_bt_config);
1929 1989
1930int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags) 1990int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
1931{ 1991{
1932 u32 stat_flags = 0; 1992 struct iwl_statistics_cmd statistics_cmd = {
1933 struct iwl_host_cmd cmd = { 1993 .configuration_flags =
1934 .id = REPLY_STATISTICS_CMD, 1994 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
1935 .flags = flags,
1936 .len = sizeof(stat_flags),
1937 .data = (u8 *) &stat_flags,
1938 }; 1995 };
1939 return iwl_send_cmd(priv, &cmd); 1996
1997 if (flags & CMD_ASYNC)
1998 return iwl_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
1999 sizeof(struct iwl_statistics_cmd),
2000 &statistics_cmd, NULL);
2001 else
2002 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
2003 sizeof(struct iwl_statistics_cmd),
2004 &statistics_cmd);
1940} 2005}
1941EXPORT_SYMBOL(iwl_send_statistics_request); 2006EXPORT_SYMBOL(iwl_send_statistics_request);
1942 2007
@@ -2077,10 +2142,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2077 spin_unlock_irqrestore(&priv->lock, flags); 2142 spin_unlock_irqrestore(&priv->lock, flags);
2078 priv->thermal_throttle.ct_kill_toggle = false; 2143 priv->thermal_throttle.ct_kill_toggle = false;
2079 2144
2080 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 2145 if (priv->cfg->support_ct_kill_exit) {
2081 case CSR_HW_REV_TYPE_1000:
2082 case CSR_HW_REV_TYPE_6x00:
2083 case CSR_HW_REV_TYPE_6x50:
2084 adv_cmd.critical_temperature_enter = 2146 adv_cmd.critical_temperature_enter =
2085 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2147 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2086 adv_cmd.critical_temperature_exit = 2148 adv_cmd.critical_temperature_exit =
@@ -2097,8 +2159,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2097 "exit is %d\n", 2159 "exit is %d\n",
2098 priv->hw_params.ct_kill_threshold, 2160 priv->hw_params.ct_kill_threshold,
2099 priv->hw_params.ct_kill_exit_threshold); 2161 priv->hw_params.ct_kill_exit_threshold);
2100 break; 2162 } else {
2101 default:
2102 cmd.critical_temperature_R = 2163 cmd.critical_temperature_R =
2103 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2164 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2104 2165
@@ -2111,7 +2172,6 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2111 "succeeded, " 2172 "succeeded, "
2112 "critical temperature is %d\n", 2173 "critical temperature is %d\n",
2113 priv->hw_params.ct_kill_threshold); 2174 priv->hw_params.ct_kill_threshold);
2114 break;
2115 } 2175 }
2116} 2176}
2117EXPORT_SYMBOL(iwl_rf_kill_ct_config); 2177EXPORT_SYMBOL(iwl_rf_kill_ct_config);
@@ -2143,7 +2203,7 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
2143 struct iwl_rx_mem_buffer *rxb) 2203 struct iwl_rx_mem_buffer *rxb)
2144{ 2204{
2145#ifdef CONFIG_IWLWIFI_DEBUG 2205#ifdef CONFIG_IWLWIFI_DEBUG
2146 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2206 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2147 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); 2207 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
2148 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", 2208 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
2149 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 2209 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -2154,7 +2214,7 @@ EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
2154void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 2214void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2155 struct iwl_rx_mem_buffer *rxb) 2215 struct iwl_rx_mem_buffer *rxb)
2156{ 2216{
2157 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2217 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2158 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 2218 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
2159 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 2219 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
2160 "notification for %s:\n", len, 2220 "notification for %s:\n", len,
@@ -2166,7 +2226,7 @@ EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
2166void iwl_rx_reply_error(struct iwl_priv *priv, 2226void iwl_rx_reply_error(struct iwl_priv *priv,
2167 struct iwl_rx_mem_buffer *rxb) 2227 struct iwl_rx_mem_buffer *rxb)
2168{ 2228{
2169 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2229 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2170 2230
2171 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 2231 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
2172 "seq 0x%04X ser 0x%08X\n", 2232 "seq 0x%04X ser 0x%08X\n",
@@ -2228,42 +2288,58 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
2228EXPORT_SYMBOL(iwl_mac_conf_tx); 2288EXPORT_SYMBOL(iwl_mac_conf_tx);
2229 2289
2230static void iwl_ht_conf(struct iwl_priv *priv, 2290static void iwl_ht_conf(struct iwl_priv *priv,
2231 struct ieee80211_bss_conf *bss_conf) 2291 struct ieee80211_bss_conf *bss_conf)
2232{ 2292{
2233 struct ieee80211_sta_ht_cap *ht_conf; 2293 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2234 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
2235 struct ieee80211_sta *sta; 2294 struct ieee80211_sta *sta;
2236 2295
2237 IWL_DEBUG_MAC80211(priv, "enter: \n"); 2296 IWL_DEBUG_MAC80211(priv, "enter: \n");
2238 2297
2239 if (!iwl_conf->is_ht) 2298 if (!ht_conf->is_ht)
2240 return; 2299 return;
2241 2300
2301 ht_conf->ht_protection =
2302 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2303 ht_conf->non_GF_STA_present =
2304 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2242 2305
2243 /* 2306 ht_conf->single_chain_sufficient = false;
2244 * It is totally wrong to base global information on something
2245 * that is valid only when associated, alas, this driver works
2246 * that way and I don't know how to fix it.
2247 */
2248 2307
2249 rcu_read_lock(); 2308 switch (priv->iw_mode) {
2250 sta = ieee80211_find_sta(priv->hw, priv->bssid); 2309 case NL80211_IFTYPE_STATION:
2251 if (!sta) { 2310 rcu_read_lock();
2311 sta = ieee80211_find_sta(priv->vif, priv->bssid);
2312 if (sta) {
2313 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2314 int maxstreams;
2315
2316 maxstreams = (ht_cap->mcs.tx_params &
2317 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2318 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2319 maxstreams += 1;
2320
2321 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2322 (ht_cap->mcs.rx_mask[2] == 0))
2323 ht_conf->single_chain_sufficient = true;
2324 if (maxstreams <= 1)
2325 ht_conf->single_chain_sufficient = true;
2326 } else {
2327 /*
2328 * If at all, this can only happen through a race
2329 * when the AP disconnects us while we're still
2330 * setting up the connection, in that case mac80211
2331 * will soon tell us about that.
2332 */
2333 ht_conf->single_chain_sufficient = true;
2334 }
2252 rcu_read_unlock(); 2335 rcu_read_unlock();
2253 return; 2336 break;
2337 case NL80211_IFTYPE_ADHOC:
2338 ht_conf->single_chain_sufficient = true;
2339 break;
2340 default:
2341 break;
2254 } 2342 }
2255 ht_conf = &sta->ht_cap;
2256
2257 iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
2258
2259 memcpy(&iwl_conf->mcs, &ht_conf->mcs, 16);
2260
2261 iwl_conf->ht_protection =
2262 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2263 iwl_conf->non_GF_STA_present =
2264 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2265
2266 rcu_read_unlock();
2267 2343
2268 IWL_DEBUG_MAC80211(priv, "leave\n"); 2344 IWL_DEBUG_MAC80211(priv, "leave\n");
2269} 2345}
@@ -2387,6 +2463,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2387 priv->timestamp = bss_conf->timestamp; 2463 priv->timestamp = bss_conf->timestamp;
2388 priv->assoc_capability = bss_conf->assoc_capability; 2464 priv->assoc_capability = bss_conf->assoc_capability;
2389 2465
2466 iwl_led_associate(priv);
2467
2390 /* 2468 /*
2391 * We have just associated, don't start scan too early 2469 * We have just associated, don't start scan too early
2392 * leave time for EAPOL exchange to complete. 2470 * leave time for EAPOL exchange to complete.
@@ -2397,9 +2475,20 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2397 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 2475 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2398 if (!iwl_is_rfkill(priv)) 2476 if (!iwl_is_rfkill(priv))
2399 priv->cfg->ops->lib->post_associate(priv); 2477 priv->cfg->ops->lib->post_associate(priv);
2400 } else 2478 } else {
2401 priv->assoc_id = 0; 2479 priv->assoc_id = 0;
2480 iwl_led_disassociate(priv);
2402 2481
2482 /*
2483 * inform the ucode that there is no longer an
2484 * association and that no more packets should be
2485 * send
2486 */
2487 priv->staging_rxon.filter_flags &=
2488 ~RXON_FILTER_ASSOC_MSK;
2489 priv->staging_rxon.assoc_id = 0;
2490 iwlcore_commit_rxon(priv);
2491 }
2403 } 2492 }
2404 2493
2405 if (changes && iwl_is_associated(priv) && priv->assoc_id) { 2494 if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2414,6 +2503,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2414 } 2503 }
2415 } 2504 }
2416 2505
2506 if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
2507 vif->bss_conf.enable_beacon) {
2508 memcpy(priv->staging_rxon.bssid_addr,
2509 bss_conf->bssid, ETH_ALEN);
2510 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2511 iwlcore_config_ap(priv);
2512 }
2513
2417 mutex_unlock(&priv->mutex); 2514 mutex_unlock(&priv->mutex);
2418 2515
2419 IWL_DEBUG_MAC80211(priv, "leave\n"); 2516 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2570,7 +2667,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2570 struct iwl_priv *priv = hw->priv; 2667 struct iwl_priv *priv = hw->priv;
2571 const struct iwl_channel_info *ch_info; 2668 const struct iwl_channel_info *ch_info;
2572 struct ieee80211_conf *conf = &hw->conf; 2669 struct ieee80211_conf *conf = &hw->conf;
2573 struct iwl_ht_info *ht_conf = &priv->current_ht_config; 2670 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2574 unsigned long flags = 0; 2671 unsigned long flags = 0;
2575 int ret = 0; 2672 int ret = 0;
2576 u16 ch; 2673 u16 ch;
@@ -2620,21 +2717,18 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2620 if (conf_is_ht40_minus(conf)) { 2717 if (conf_is_ht40_minus(conf)) {
2621 ht_conf->extension_chan_offset = 2718 ht_conf->extension_chan_offset =
2622 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 2719 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2623 ht_conf->supported_chan_width = 2720 ht_conf->is_40mhz = true;
2624 IWL_CHANNEL_WIDTH_40MHZ;
2625 } else if (conf_is_ht40_plus(conf)) { 2721 } else if (conf_is_ht40_plus(conf)) {
2626 ht_conf->extension_chan_offset = 2722 ht_conf->extension_chan_offset =
2627 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 2723 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2628 ht_conf->supported_chan_width = 2724 ht_conf->is_40mhz = true;
2629 IWL_CHANNEL_WIDTH_40MHZ;
2630 } else { 2725 } else {
2631 ht_conf->extension_chan_offset = 2726 ht_conf->extension_chan_offset =
2632 IEEE80211_HT_PARAM_CHA_SEC_NONE; 2727 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2633 ht_conf->supported_chan_width = 2728 ht_conf->is_40mhz = false;
2634 IWL_CHANNEL_WIDTH_20MHZ;
2635 } 2729 }
2636 } else 2730 } else
2637 ht_conf->supported_chan_width = IWL_CHANNEL_WIDTH_20MHZ; 2731 ht_conf->is_40mhz = false;
2638 /* Default to no protection. Protection mode will later be set 2732 /* Default to no protection. Protection mode will later be set
2639 * from BSS config in iwl_ht_conf */ 2733 * from BSS config in iwl_ht_conf */
2640 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 2734 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -2649,6 +2743,22 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2649 2743
2650 iwl_set_flags_for_band(priv, conf->channel->band); 2744 iwl_set_flags_for_band(priv, conf->channel->band);
2651 spin_unlock_irqrestore(&priv->lock, flags); 2745 spin_unlock_irqrestore(&priv->lock, flags);
2746 if (iwl_is_associated(priv) &&
2747 (le16_to_cpu(priv->active_rxon.channel) != ch) &&
2748 priv->cfg->ops->lib->set_channel_switch) {
2749 iwl_set_rate(priv);
2750 /*
2751 * at this point, staging_rxon has the
2752 * configuration for channel switch
2753 */
2754 ret = priv->cfg->ops->lib->set_channel_switch(priv,
2755 ch);
2756 if (!ret) {
2757 iwl_print_rx_config_cmd(priv);
2758 goto out;
2759 }
2760 priv->switch_rxon.switch_in_progress = false;
2761 }
2652 set_ch_out: 2762 set_ch_out:
2653 /* The list of supported rates and rate mask can be different 2763 /* The list of supported rates and rate mask can be different
2654 * for each band; since the band may have changed, reset 2764 * for each band; since the band may have changed, reset
@@ -2656,7 +2766,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2656 iwl_set_rate(priv); 2766 iwl_set_rate(priv);
2657 } 2767 }
2658 2768
2659 if (changed & IEEE80211_CONF_CHANGE_PS) { 2769 if (changed & (IEEE80211_CONF_CHANGE_PS |
2770 IEEE80211_CONF_CHANGE_IDLE)) {
2660 ret = iwl_power_update_mode(priv, false); 2771 ret = iwl_power_update_mode(priv, false);
2661 if (ret) 2772 if (ret)
2662 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); 2773 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
@@ -2740,7 +2851,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2740 IWL_DEBUG_MAC80211(priv, "enter\n"); 2851 IWL_DEBUG_MAC80211(priv, "enter\n");
2741 2852
2742 spin_lock_irqsave(&priv->lock, flags); 2853 spin_lock_irqsave(&priv->lock, flags);
2743 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info)); 2854 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2744 spin_unlock_irqrestore(&priv->lock, flags); 2855 spin_unlock_irqrestore(&priv->lock, flags);
2745 2856
2746 iwl_reset_qos(priv); 2857 iwl_reset_qos(priv);
@@ -2792,6 +2903,55 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2792} 2903}
2793EXPORT_SYMBOL(iwl_mac_reset_tsf); 2904EXPORT_SYMBOL(iwl_mac_reset_tsf);
2794 2905
2906int iwl_alloc_txq_mem(struct iwl_priv *priv)
2907{
2908 if (!priv->txq)
2909 priv->txq = kzalloc(
2910 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2911 GFP_KERNEL);
2912 if (!priv->txq) {
2913 IWL_ERR(priv, "Not enough memory for txq \n");
2914 return -ENOMEM;
2915 }
2916 return 0;
2917}
2918EXPORT_SYMBOL(iwl_alloc_txq_mem);
2919
2920void iwl_free_txq_mem(struct iwl_priv *priv)
2921{
2922 kfree(priv->txq);
2923 priv->txq = NULL;
2924}
2925EXPORT_SYMBOL(iwl_free_txq_mem);
2926
2927int iwl_send_wimax_coex(struct iwl_priv *priv)
2928{
2929 struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
2930
2931 if (priv->cfg->support_wimax_coexist) {
2932 /* UnMask wake up src at associated sleep */
2933 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2934
2935 /* UnMask wake up src at unassociated sleep */
2936 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
2937 memcpy(coex_cmd.sta_prio, cu_priorities,
2938 sizeof(struct iwl_wimax_coex_event_entry) *
2939 COEX_NUM_OF_EVENTS);
2940
2941 /* enabling the coexistence feature */
2942 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
2943
2944 /* enabling the priorities tables */
2945 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
2946 } else {
2947 /* coexistence is disabled */
2948 memset(&coex_cmd, 0, sizeof(coex_cmd));
2949 }
2950 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
2951 sizeof(coex_cmd), &coex_cmd);
2952}
2953EXPORT_SYMBOL(iwl_send_wimax_coex);
2954
2795#ifdef CONFIG_IWLWIFI_DEBUGFS 2955#ifdef CONFIG_IWLWIFI_DEBUGFS
2796 2956
2797#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) 2957#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -2929,15 +3089,11 @@ const char *get_ctrl_string(int cmd)
2929 } 3089 }
2930} 3090}
2931 3091
2932void iwl_clear_tx_stats(struct iwl_priv *priv) 3092void iwl_clear_traffic_stats(struct iwl_priv *priv)
2933{ 3093{
2934 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats)); 3094 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
2935
2936}
2937
2938void iwl_clear_rx_stats(struct iwl_priv *priv)
2939{
2940 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats)); 3095 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
3096 priv->led_tpt = 0;
2941} 3097}
2942 3098
2943/* 3099/*
@@ -3030,6 +3186,7 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
3030 stats->data_cnt++; 3186 stats->data_cnt++;
3031 stats->data_bytes += len; 3187 stats->data_bytes += len;
3032 } 3188 }
3189 iwl_leds_background(priv);
3033} 3190}
3034EXPORT_SYMBOL(iwl_update_stats); 3191EXPORT_SYMBOL(iwl_update_stats);
3035#endif 3192#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7754538c2194..cf7d3df0744e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -89,6 +89,7 @@ struct iwl_hcmd_ops {
89 int (*rxon_assoc)(struct iwl_priv *priv); 89 int (*rxon_assoc)(struct iwl_priv *priv);
90 int (*commit_rxon)(struct iwl_priv *priv); 90 int (*commit_rxon)(struct iwl_priv *priv);
91 void (*set_rxon_chain)(struct iwl_priv *priv); 91 void (*set_rxon_chain)(struct iwl_priv *priv);
92 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
92}; 93};
93 94
94struct iwl_hcmd_utils_ops { 95struct iwl_hcmd_utils_ops {
@@ -97,7 +98,8 @@ struct iwl_hcmd_utils_ops {
97 void (*gain_computation)(struct iwl_priv *priv, 98 void (*gain_computation)(struct iwl_priv *priv,
98 u32 *average_noise, 99 u32 *average_noise,
99 u16 min_average_noise_antennat_i, 100 u16 min_average_noise_antennat_i,
100 u32 min_average_noise); 101 u32 min_average_noise,
102 u8 default_chain);
101 void (*chain_noise_reset)(struct iwl_priv *priv); 103 void (*chain_noise_reset)(struct iwl_priv *priv);
102 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, 104 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
103 __le32 *tx_flags); 105 __le32 *tx_flags);
@@ -107,7 +109,6 @@ struct iwl_hcmd_utils_ops {
107 109
108struct iwl_apm_ops { 110struct iwl_apm_ops {
109 int (*init)(struct iwl_priv *priv); 111 int (*init)(struct iwl_priv *priv);
110 int (*reset)(struct iwl_priv *priv);
111 void (*stop)(struct iwl_priv *priv); 112 void (*stop)(struct iwl_priv *priv);
112 void (*config)(struct iwl_priv *priv); 113 void (*config)(struct iwl_priv *priv);
113 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 114 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
@@ -166,8 +167,9 @@ struct iwl_lib_ops {
166 int (*is_valid_rtc_data_addr)(u32 addr); 167 int (*is_valid_rtc_data_addr)(u32 addr);
167 /* 1st ucode load */ 168 /* 1st ucode load */
168 int (*load_ucode)(struct iwl_priv *priv); 169 int (*load_ucode)(struct iwl_priv *priv);
169 void (*dump_nic_event_log)(struct iwl_priv *priv); 170 void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log);
170 void (*dump_nic_error_log)(struct iwl_priv *priv); 171 void (*dump_nic_error_log)(struct iwl_priv *priv);
172 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
171 /* power management */ 173 /* power management */
172 struct iwl_apm_ops apm_ops; 174 struct iwl_apm_ops apm_ops;
173 175
@@ -185,18 +187,24 @@ struct iwl_lib_ops {
185 struct iwl_temp_ops temp_ops; 187 struct iwl_temp_ops temp_ops;
186}; 188};
187 189
190struct iwl_led_ops {
191 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
192 int (*on)(struct iwl_priv *priv);
193 int (*off)(struct iwl_priv *priv);
194};
195
188struct iwl_ops { 196struct iwl_ops {
189 const struct iwl_ucode_ops *ucode; 197 const struct iwl_ucode_ops *ucode;
190 const struct iwl_lib_ops *lib; 198 const struct iwl_lib_ops *lib;
191 const struct iwl_hcmd_ops *hcmd; 199 const struct iwl_hcmd_ops *hcmd;
192 const struct iwl_hcmd_utils_ops *utils; 200 const struct iwl_hcmd_utils_ops *utils;
201 const struct iwl_led_ops *led;
193}; 202};
194 203
195struct iwl_mod_params { 204struct iwl_mod_params {
196 int sw_crypto; /* def: 0 = using hardware encryption */ 205 int sw_crypto; /* def: 0 = using hardware encryption */
197 int disable_hw_scan; /* def: 0 = use h/w scan */ 206 int disable_hw_scan; /* def: 0 = use h/w scan */
198 int num_of_queues; /* def: HW dependent */ 207 int num_of_queues; /* def: HW dependent */
199 int num_of_ampdu_queues;/* def: HW dependent */
200 int disable_11n; /* def: 0 = 11n capabilities enabled */ 208 int disable_11n; /* def: 0 = 11n capabilities enabled */
201 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 209 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
202 int antenna; /* def: 0 = both antennas (use diversity) */ 210 int antenna; /* def: 0 = both antennas (use diversity) */
@@ -213,7 +221,15 @@ struct iwl_mod_params {
213 * @pa_type: used by 6000 series only to identify the type of Power Amplifier 221 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
214 * @max_ll_items: max number of OTP blocks 222 * @max_ll_items: max number of OTP blocks
215 * @shadow_ram_support: shadow support for OTP memory 223 * @shadow_ram_support: shadow support for OTP memory
224 * @led_compensation: compensate on the led on/off time per HW according
225 * to the deviation to achieve the desired led frequency.
226 * The detail algorithm is described in iwl-led.c
216 * @use_rts_for_ht: use rts/cts protection for HT traffic 227 * @use_rts_for_ht: use rts/cts protection for HT traffic
228 * @chain_noise_num_beacons: number of beacons used to compute chain noise
229 * @adv_thermal_throttle: support advance thermal throttle
230 * @support_ct_kill_exit: support ct kill exit condition
231 * @sm_ps_mode: spatial multiplexing power save mode
232 * @support_wimax_coexist: support wimax/wifi co-exist
217 * 233 *
218 * We enable the driver to be backward compatible wrt API version. The 234 * We enable the driver to be backward compatible wrt API version. The
219 * driver specifies which APIs it supports (with @ucode_api_max being the 235 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -245,18 +261,32 @@ struct iwl_cfg {
245 int eeprom_size; 261 int eeprom_size;
246 u16 eeprom_ver; 262 u16 eeprom_ver;
247 u16 eeprom_calib_ver; 263 u16 eeprom_calib_ver;
264 int num_of_queues; /* def: HW dependent */
265 int num_of_ampdu_queues;/* def: HW dependent */
248 const struct iwl_ops *ops; 266 const struct iwl_ops *ops;
249 const struct iwl_mod_params *mod_params; 267 const struct iwl_mod_params *mod_params;
250 u8 valid_tx_ant; 268 u8 valid_tx_ant;
251 u8 valid_rx_ant; 269 u8 valid_rx_ant;
252 bool need_pll_cfg; 270
271 /* for iwl_apm_init() */
272 u32 pll_cfg_val;
273 bool set_l0s;
274 bool use_bsm;
275
253 bool use_isr_legacy; 276 bool use_isr_legacy;
254 enum iwl_pa_type pa_type; 277 enum iwl_pa_type pa_type;
255 const u16 max_ll_items; 278 const u16 max_ll_items;
256 const bool shadow_ram_support; 279 const bool shadow_ram_support;
257 const bool ht_greenfield_support; 280 const bool ht_greenfield_support;
281 u16 led_compensation;
258 const bool broken_powersave; 282 const bool broken_powersave;
259 bool use_rts_for_ht; 283 bool use_rts_for_ht;
284 int chain_noise_num_beacons;
285 const bool supports_idle;
286 bool adv_thermal_throttle;
287 bool support_ct_kill_exit;
288 u8 sm_ps_mode;
289 const bool support_wimax_coexist;
260}; 290};
261 291
262/*************************** 292/***************************
@@ -275,7 +305,7 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv);
275int iwl_full_rxon_required(struct iwl_priv *priv); 305int iwl_full_rxon_required(struct iwl_priv *priv);
276void iwl_set_rxon_chain(struct iwl_priv *priv); 306void iwl_set_rxon_chain(struct iwl_priv *priv);
277int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 307int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
278void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info); 308void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
279u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 309u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
280 struct ieee80211_sta_ht_cap *sta_ht_inf); 310 struct ieee80211_sta_ht_cap *sta_ht_inf);
281void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band); 311void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
@@ -289,10 +319,7 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
289 unsigned int changed_flags, 319 unsigned int changed_flags,
290 unsigned int *total_flags, u64 multicast); 320 unsigned int *total_flags, u64 multicast);
291int iwl_hw_nic_init(struct iwl_priv *priv); 321int iwl_hw_nic_init(struct iwl_priv *priv);
292int iwl_setup_mac(struct iwl_priv *priv);
293int iwl_set_hw_params(struct iwl_priv *priv); 322int iwl_set_hw_params(struct iwl_priv *priv);
294int iwl_init_drv(struct iwl_priv *priv);
295void iwl_uninit_drv(struct iwl_priv *priv);
296bool iwl_is_monitor_mode(struct iwl_priv *priv); 323bool iwl_is_monitor_mode(struct iwl_priv *priv);
297void iwl_post_associate(struct iwl_priv *priv); 324void iwl_post_associate(struct iwl_priv *priv);
298void iwl_bss_info_changed(struct ieee80211_hw *hw, 325void iwl_bss_info_changed(struct ieee80211_hw *hw,
@@ -311,6 +338,11 @@ void iwl_config_ap(struct iwl_priv *priv);
311int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, 338int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
312 struct ieee80211_tx_queue_stats *stats); 339 struct ieee80211_tx_queue_stats *stats);
313void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 340void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
341int iwl_alloc_txq_mem(struct iwl_priv *priv);
342void iwl_free_txq_mem(struct iwl_priv *priv);
343void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
344 __le32 *tx_flags);
345int iwl_send_wimax_coex(struct iwl_priv *priv);
314#ifdef CONFIG_IWLWIFI_DEBUGFS 346#ifdef CONFIG_IWLWIFI_DEBUGFS
315int iwl_alloc_traffic_mem(struct iwl_priv *priv); 347int iwl_alloc_traffic_mem(struct iwl_priv *priv);
316void iwl_free_traffic_mem(struct iwl_priv *priv); 348void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -321,8 +353,7 @@ void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
321 u16 length, struct ieee80211_hdr *header); 353 u16 length, struct ieee80211_hdr *header);
322const char *get_mgmt_string(int cmd); 354const char *get_mgmt_string(int cmd);
323const char *get_ctrl_string(int cmd); 355const char *get_ctrl_string(int cmd);
324void iwl_clear_tx_stats(struct iwl_priv *priv); 356void iwl_clear_traffic_stats(struct iwl_priv *priv);
325void iwl_clear_rx_stats(struct iwl_priv *priv);
326void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, 357void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
327 u16 len); 358 u16 len);
328#else 359#else
@@ -358,6 +389,7 @@ static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
358 /* data */ 389 /* data */
359 stats->data_bytes += len; 390 stats->data_bytes += len;
360 } 391 }
392 iwl_leds_background(priv);
361} 393}
362#endif 394#endif
363/***************************************************** 395/*****************************************************
@@ -393,6 +425,8 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
393 struct iwl_rx_mem_buffer *rxb); 425 struct iwl_rx_mem_buffer *rxb);
394void iwl_rx_statistics(struct iwl_priv *priv, 426void iwl_rx_statistics(struct iwl_priv *priv,
395 struct iwl_rx_mem_buffer *rxb); 427 struct iwl_rx_mem_buffer *rxb);
428void iwl_reply_statistics(struct iwl_priv *priv,
429 struct iwl_rx_mem_buffer *rxb);
396void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 430void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
397 431
398/* TX helpers */ 432/* TX helpers */
@@ -511,7 +545,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
511 const void *data, 545 const void *data,
512 void (*callback)(struct iwl_priv *priv, 546 void (*callback)(struct iwl_priv *priv,
513 struct iwl_device_cmd *cmd, 547 struct iwl_device_cmd *cmd,
514 struct sk_buff *skb)); 548 struct iwl_rx_packet *pkt));
515 549
516int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 550int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
517 551
@@ -544,15 +578,12 @@ int iwl_pci_resume(struct pci_dev *pdev);
544/***************************************************** 578/*****************************************************
545* Error Handling Debugging 579* Error Handling Debugging
546******************************************************/ 580******************************************************/
547#ifdef CONFIG_IWLWIFI_DEBUG
548void iwl_dump_nic_event_log(struct iwl_priv *priv);
549void iwl_dump_nic_error_log(struct iwl_priv *priv); 581void iwl_dump_nic_error_log(struct iwl_priv *priv);
582void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
583#ifdef CONFIG_IWLWIFI_DEBUG
584void iwl_print_rx_config_cmd(struct iwl_priv *priv);
550#else 585#else
551static inline void iwl_dump_nic_event_log(struct iwl_priv *priv) 586static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv)
552{
553}
554
555static inline void iwl_dump_nic_error_log(struct iwl_priv *priv)
556{ 587{
557} 588}
558#endif 589#endif
@@ -571,6 +602,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
571#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */ 602#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
572#define STATUS_INT_ENABLED 2 603#define STATUS_INT_ENABLED 2
573#define STATUS_RF_KILL_HW 3 604#define STATUS_RF_KILL_HW 3
605#define STATUS_CT_KILL 4
574#define STATUS_INIT 5 606#define STATUS_INIT 5
575#define STATUS_ALIVE 6 607#define STATUS_ALIVE 6
576#define STATUS_READY 7 608#define STATUS_READY 7
@@ -615,6 +647,11 @@ static inline int iwl_is_rfkill(struct iwl_priv *priv)
615 return iwl_is_rfkill_hw(priv); 647 return iwl_is_rfkill_hw(priv);
616} 648}
617 649
650static inline int iwl_is_ctkill(struct iwl_priv *priv)
651{
652 return test_bit(STATUS_CT_KILL, &priv->status);
653}
654
618static inline int iwl_is_ready_rf(struct iwl_priv *priv) 655static inline int iwl_is_ready_rf(struct iwl_priv *priv)
619{ 656{
620 657
@@ -626,7 +663,8 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
626 663
627extern void iwl_rf_kill_ct_config(struct iwl_priv *priv); 664extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
628extern int iwl_send_bt_config(struct iwl_priv *priv); 665extern int iwl_send_bt_config(struct iwl_priv *priv);
629extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags); 666extern int iwl_send_statistics_request(struct iwl_priv *priv,
667 u8 flags, bool clear);
630extern int iwl_verify_ucode(struct iwl_priv *priv); 668extern int iwl_verify_ucode(struct iwl_priv *priv);
631extern int iwl_send_lq_cmd(struct iwl_priv *priv, 669extern int iwl_send_lq_cmd(struct iwl_priv *priv,
632 struct iwl_link_quality_cmd *lq, u8 flags); 670 struct iwl_link_quality_cmd *lq, u8 flags);
@@ -636,6 +674,9 @@ extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
636 struct iwl_rx_mem_buffer *rxb); 674 struct iwl_rx_mem_buffer *rxb);
637void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, 675void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
638 struct iwl_rx_mem_buffer *rxb); 676 struct iwl_rx_mem_buffer *rxb);
677void iwl_apm_stop(struct iwl_priv *priv);
678int iwl_apm_stop_master(struct iwl_priv *priv);
679int iwl_apm_init(struct iwl_priv *priv);
639 680
640void iwl_setup_rxon_timing(struct iwl_priv *priv); 681void iwl_setup_rxon_timing(struct iwl_priv *priv);
641static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 682static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
@@ -655,5 +696,4 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
655{ 696{
656 return priv->hw->wiphy->bands[band]; 697 return priv->hw->wiphy->bands[band];
657} 698}
658
659#endif /* __iwl_core_h__ */ 699#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 06437d13e73e..a7bfae01f19b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -62,11 +62,29 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_csr_h__ 63#ifndef __iwl_csr_h__
64#define __iwl_csr_h__ 64#define __iwl_csr_h__
65/*=== CSR (control and status registers) ===*/ 65/*
66 * CSR (control and status registers)
67 *
68 * CSR registers are mapped directly into PCI bus space, and are accessible
69 * whenever platform supplies power to device, even when device is in
70 * low power states due to driver-invoked device resets
71 * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
72 *
73 * Use iwl_write32() and iwl_read32() family to access these registers;
74 * these provide simple PCI bus access, without waking up the MAC.
75 * Do not use iwl_write_direct32() family for these registers;
76 * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
77 * The MAC (uCode processor, etc.) does not need to be powered up for accessing
78 * the CSR registers.
79 *
80 * NOTE: Newer devices using one-time-programmable (OTP) memory
81 * require device to be awake in order to read this memory
82 * via CSR_EEPROM and CSR_OTP registers
83 */
66#define CSR_BASE (0x000) 84#define CSR_BASE (0x000)
67 85
68#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */ 86#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
69#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */ 87#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
70#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */ 88#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
71#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */ 89#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
72#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/ 90#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
@@ -74,43 +92,67 @@
74#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/ 92#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
75#define CSR_GP_CNTRL (CSR_BASE+0x024) 93#define CSR_GP_CNTRL (CSR_BASE+0x024)
76 94
95/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
96#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
97
77/* 98/*
78 * Hardware revision info 99 * Hardware revision info
79 * Bit fields: 100 * Bit fields:
80 * 31-8: Reserved 101 * 31-8: Reserved
81 * 7-4: Type of device: 0x0 = 4965, 0xd = 3945 102 * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
82 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D 103 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
83 * 1-0: "Dash" value, as in A-1, etc. 104 * 1-0: "Dash" (-) value, as in A-1, etc.
84 * 105 *
85 * NOTE: Revision step affects calculation of CCK txpower for 4965. 106 * NOTE: Revision step affects calculation of CCK txpower for 4965.
107 * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
86 */ 108 */
87#define CSR_HW_REV (CSR_BASE+0x028) 109#define CSR_HW_REV (CSR_BASE+0x028)
88 110
89/* EEPROM reads */ 111/*
112 * EEPROM and OTP (one-time-programmable) memory reads
113 *
114 * NOTE: For (newer) devices using OTP, device must be awake, initialized via
115 * apm_ops.init() in order to read. Older devices (3945/4965/5000)
116 * use EEPROM and do not require this.
117 */
90#define CSR_EEPROM_REG (CSR_BASE+0x02c) 118#define CSR_EEPROM_REG (CSR_BASE+0x02c)
91#define CSR_EEPROM_GP (CSR_BASE+0x030) 119#define CSR_EEPROM_GP (CSR_BASE+0x030)
92#define CSR_OTP_GP_REG (CSR_BASE+0x034) 120#define CSR_OTP_GP_REG (CSR_BASE+0x034)
121
93#define CSR_GIO_REG (CSR_BASE+0x03C) 122#define CSR_GIO_REG (CSR_BASE+0x03C)
94#define CSR_GP_UCODE_REG (CSR_BASE+0x048) 123#define CSR_GP_UCODE_REG (CSR_BASE+0x048)
95#define CSR_GP_DRIVER_REG (CSR_BASE+0x050) 124#define CSR_GP_DRIVER_REG (CSR_BASE+0x050)
125
126/*
127 * UCODE-DRIVER GP (general purpose) mailbox registers.
128 * SET/CLR registers set/clear bit(s) if "1" is written.
129 */
96#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054) 130#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
97#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058) 131#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
98#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c) 132#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
99#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060) 133#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
134
100#define CSR_LED_REG (CSR_BASE+0x094) 135#define CSR_LED_REG (CSR_BASE+0x094)
101#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) 136#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
137
138/* GIO Chicken Bits (PCI Express bus link power management) */
102#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) 139#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
103 140
104#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
105/* Analog phase-lock-loop configuration */ 141/* Analog phase-lock-loop configuration */
106#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c) 142#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
143
107/* 144/*
108 * Indicates hardware rev, to determine CCK backoff for txpower calculation. 145 * CSR Hardware Revision Workaround Register. Indicates hardware rev;
146 * "step" determines CCK backoff for txpower calculation. Used for 4965 only.
147 * See also CSR_HW_REV register.
109 * Bit fields: 148 * Bit fields:
110 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step 149 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
150 * 1-0: "Dash" (-) value, as in C-1, etc.
111 */ 151 */
112#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) 152#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
113#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) 153
154#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
155#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
114 156
115/* Bits for CSR_HW_IF_CONFIG_REG */ 157/* Bits for CSR_HW_IF_CONFIG_REG */
116#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 158#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
@@ -125,14 +167,14 @@
125#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000) 167#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
126#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000) 168#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
127 169
128#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000) 170#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
129#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000) 171#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
130#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) 172#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
131#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) 173#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
132#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) 174#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
133 175
134#define CSR_INT_PERIODIC_DIS (0x00) 176#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
135#define CSR_INT_PERIODIC_ENA (0xFF) 177#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
136 178
137/* interrupt flags in INTA, set by uCode or hardware (e.g. dma), 179/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
138 * acknowledged (reset) by host writing "1" to flagged bits. */ 180 * acknowledged (reset) by host writing "1" to flagged bits. */
@@ -195,8 +237,46 @@
195#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) 237#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
196#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) 238#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
197#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) 239#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
240#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
198 241
199/* GP (general purpose) CONTROL */ 242/*
243 * GP (general purpose) CONTROL REGISTER
244 * Bit fields:
245 * 27: HW_RF_KILL_SW
246 * Indicates state of (platform's) hardware RF-Kill switch
247 * 26-24: POWER_SAVE_TYPE
248 * Indicates current power-saving mode:
249 * 000 -- No power saving
250 * 001 -- MAC power-down
251 * 010 -- PHY (radio) power-down
252 * 011 -- Error
253 * 9-6: SYS_CONFIG
254 * Indicates current system configuration, reflecting pins on chip
255 * as forced high/low by device circuit board.
256 * 4: GOING_TO_SLEEP
257 * Indicates MAC is entering a power-saving sleep power-down.
258 * Not a good time to access device-internal resources.
259 * 3: MAC_ACCESS_REQ
260 * Host sets this to request and maintain MAC wakeup, to allow host
261 * access to device-internal resources. Host must wait for
262 * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
263 * device registers.
264 * 2: INIT_DONE
265 * Host sets this to put device into fully operational D0 power mode.
266 * Host resets this after SW_RESET to put device into low power mode.
267 * 0: MAC_CLOCK_READY
268 * Indicates MAC (ucode processor, etc.) is powered up and can run.
269 * Internal resources are accessible.
270 * NOTE: This does not indicate that the processor is actually running.
271 * NOTE: This does not indicate that 4965 or 3945 has completed
272 * init or post-power-down restore of internal SRAM memory.
273 * Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
274 * SRAM is restored and uCode is in normal operation mode.
275 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
276 * do not need to save/restore it.
277 * NOTE: After device reset, this bit remains "0" until host sets
278 * INIT_DONE
279 */
200#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) 280#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
201#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004) 281#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
202#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008) 282#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
@@ -229,18 +309,58 @@
229#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000) 309#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
230 310
231/* EEPROM GP */ 311/* EEPROM GP */
232#define CSR_EEPROM_GP_VALID_MSK (0x00000007) 312#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
233#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
234#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 313#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
314#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000)
315#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001)
316#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
317#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
318
319/* One-time-programmable memory general purpose reg */
235#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ 320#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */
236#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ 321#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */
237#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ 322#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */
238#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ 323#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */
239 324
325/* GP REG */
326#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
327#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
328#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
329#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
330#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
331
332
240/* CSR GIO */ 333/* CSR GIO */
241#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) 334#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
242 335
243/* UCODE DRV GP */ 336/*
337 * UCODE-DRIVER GP (general purpose) mailbox register 1
338 * Host driver and uCode write and/or read this register to communicate with
339 * each other.
340 * Bit fields:
341 * 4: UCODE_DISABLE
342 * Host sets this to request permanent halt of uCode, same as
343 * sending CARD_STATE command with "halt" bit set.
344 * 3: CT_KILL_EXIT
345 * Host sets this to request exit from CT_KILL state, i.e. host thinks
346 * device temperature is low enough to continue normal operation.
347 * 2: CMD_BLOCKED
348 * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
349 * to release uCode to clear all Tx and command queues, enter
350 * unassociated mode, and power down.
351 * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
352 * 1: SW_BIT_RFKILL
353 * Host sets this when issuing CARD_STATE command to request
354 * device sleep.
355 * 0: MAC_SLEEP
356 * uCode sets this when preparing a power-saving power-down.
357 * uCode resets this when power-up is complete and SRAM is sane.
358 * NOTE: 3945/4965 saves internal SRAM data to host when powering down,
359 * and must restore this data after powering back up.
360 * MAC_SLEEP is the best indication that restore is complete.
361 * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
362 * do not need to save/restore it.
363 */
244#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001) 364#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
245#define CSR_UCODE_SW_BIT_RFKILL (0x00000002) 365#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
246#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004) 366#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
@@ -253,7 +373,7 @@
253#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002) 373#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
254 374
255 375
256/* GI Chicken Bits */ 376/* GIO Chicken Bits (PCI Express bus link power management) */
257#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000) 377#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
258#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000) 378#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
259 379
@@ -273,8 +393,23 @@
273#define CSR_DRAM_INT_TBL_ENABLE (1 << 31) 393#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
274#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27) 394#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
275 395
276/*=== HBUS (Host-side Bus) ===*/ 396/*
397 * HBUS (Host-side Bus)
398 *
399 * HBUS registers are mapped directly into PCI bus space, but are used
400 * to indirectly access device's internal memory or registers that
401 * may be powered-down.
402 *
403 * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
404 * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
405 * to make sure the MAC (uCode processor, etc.) is powered up for accessing
406 * internal resources.
407 *
408 * Do not use iwl_write32()/iwl_read32() family to access these registers;
409 * these provide only simple PCI bus access, without waking up the MAC.
410 */
277#define HBUS_BASE (0x400) 411#define HBUS_BASE (0x400)
412
278/* 413/*
279 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM 414 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
280 * structures, error log, event log, verifying uCode load). 415 * structures, error log, event log, verifying uCode load).
@@ -289,6 +424,10 @@
289#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018) 424#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
290#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c) 425#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
291 426
427/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
428#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
429#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
430
292/* 431/*
293 * Registers for accessing device's internal peripheral registers 432 * Registers for accessing device's internal peripheral registers
294 * (e.g. SCD, BSM, etc.). First write to address register, 433 * (e.g. SCD, BSM, etc.). First write to address register,
@@ -303,16 +442,12 @@
303#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 442#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
304 443
305/* 444/*
306 * Per-Tx-queue write pointer (index, really!) (3945 and 4965). 445 * Per-Tx-queue write pointer (index, really!)
307 * Indicates index to next TFD that driver will fill (1 past latest filled). 446 * Indicates index to next TFD that driver will fill (1 past latest filled).
308 * Bit usage: 447 * Bit usage:
309 * 0-7: queue write index 448 * 0-7: queue write index
310 * 11-8: queue selector 449 * 11-8: queue selector
311 */ 450 */
312#define HBUS_TARG_WRPTR (HBUS_BASE+0x060) 451#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
313#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
314
315#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
316
317 452
318#endif /* !__iwl_csr_h__ */ 453#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index cbc62904655d..d61293ab67c9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -84,9 +84,7 @@ struct iwl_debugfs {
84 struct dentry *file_interrupt; 84 struct dentry *file_interrupt;
85 struct dentry *file_qos; 85 struct dentry *file_qos;
86 struct dentry *file_thermal_throttling; 86 struct dentry *file_thermal_throttling;
87#ifdef CONFIG_IWLWIFI_LEDS
88 struct dentry *file_led; 87 struct dentry *file_led;
89#endif
90 struct dentry *file_disable_ht40; 88 struct dentry *file_disable_ht40;
91 struct dentry *file_sleep_level_override; 89 struct dentry *file_sleep_level_override;
92 struct dentry *file_current_sleep_command; 90 struct dentry *file_current_sleep_command;
@@ -108,6 +106,9 @@ struct iwl_debugfs {
108 struct dentry *file_sensitivity; 106 struct dentry *file_sensitivity;
109 struct dentry *file_chain_noise; 107 struct dentry *file_chain_noise;
110 struct dentry *file_tx_power; 108 struct dentry *file_tx_power;
109 struct dentry *file_power_save_status;
110 struct dentry *file_clear_ucode_statistics;
111 struct dentry *file_clear_traffic_statistics;
111 } dbgfs_debug_files; 112 } dbgfs_debug_files;
112 u32 sram_offset; 113 u32 sram_offset;
113 u32 sram_len; 114 u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a198bcf61022..21e0f6699daf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -47,9 +47,9 @@
47 goto err; \ 47 goto err; \
48} while (0) 48} while (0)
49 49
50#define DEBUGFS_ADD_FILE(name, parent) do { \ 50#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
51 dbgfs->dbgfs_##parent##_files.file_##name = \ 51 dbgfs->dbgfs_##parent##_files.file_##name = \
52 debugfs_create_file(#name, S_IWUSR | S_IRUSR, \ 52 debugfs_create_file(#name, mode, \
53 dbgfs->dir_##parent, priv, \ 53 dbgfs->dir_##parent, priv, \
54 &iwl_dbgfs_##name##_ops); \ 54 &iwl_dbgfs_##name##_ops); \
55 if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \ 55 if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \
@@ -131,21 +131,22 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
131 131
132 int cnt; 132 int cnt;
133 ssize_t ret; 133 ssize_t ret;
134 const size_t bufsz = 100 + sizeof(char) * 24 * (MANAGEMENT_MAX + CONTROL_MAX); 134 const size_t bufsz = 100 +
135 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
135 buf = kzalloc(bufsz, GFP_KERNEL); 136 buf = kzalloc(bufsz, GFP_KERNEL);
136 if (!buf) 137 if (!buf)
137 return -ENOMEM; 138 return -ENOMEM;
138 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); 139 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
139 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { 140 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
140 pos += scnprintf(buf + pos, bufsz - pos, 141 pos += scnprintf(buf + pos, bufsz - pos,
141 "\t%s\t\t: %u\n", 142 "\t%25s\t\t: %u\n",
142 get_mgmt_string(cnt), 143 get_mgmt_string(cnt),
143 priv->tx_stats.mgmt[cnt]); 144 priv->tx_stats.mgmt[cnt]);
144 } 145 }
145 pos += scnprintf(buf + pos, bufsz - pos, "Control\n"); 146 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
146 for (cnt = 0; cnt < CONTROL_MAX; cnt++) { 147 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
147 pos += scnprintf(buf + pos, bufsz - pos, 148 pos += scnprintf(buf + pos, bufsz - pos,
148 "\t%s\t\t: %u\n", 149 "\t%25s\t\t: %u\n",
149 get_ctrl_string(cnt), 150 get_ctrl_string(cnt),
150 priv->tx_stats.ctrl[cnt]); 151 priv->tx_stats.ctrl[cnt]);
151 } 152 }
@@ -159,7 +160,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
159 return ret; 160 return ret;
160} 161}
161 162
162static ssize_t iwl_dbgfs_tx_statistics_write(struct file *file, 163static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
163 const char __user *user_buf, 164 const char __user *user_buf,
164 size_t count, loff_t *ppos) 165 size_t count, loff_t *ppos)
165{ 166{
@@ -174,8 +175,7 @@ static ssize_t iwl_dbgfs_tx_statistics_write(struct file *file,
174 return -EFAULT; 175 return -EFAULT;
175 if (sscanf(buf, "%x", &clear_flag) != 1) 176 if (sscanf(buf, "%x", &clear_flag) != 1)
176 return -EFAULT; 177 return -EFAULT;
177 if (clear_flag == 1) 178 iwl_clear_traffic_stats(priv);
178 iwl_clear_tx_stats(priv);
179 179
180 return count; 180 return count;
181} 181}
@@ -190,7 +190,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
190 int cnt; 190 int cnt;
191 ssize_t ret; 191 ssize_t ret;
192 const size_t bufsz = 100 + 192 const size_t bufsz = 100 +
193 sizeof(char) * 24 * (MANAGEMENT_MAX + CONTROL_MAX); 193 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
194 buf = kzalloc(bufsz, GFP_KERNEL); 194 buf = kzalloc(bufsz, GFP_KERNEL);
195 if (!buf) 195 if (!buf)
196 return -ENOMEM; 196 return -ENOMEM;
@@ -198,14 +198,14 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
198 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n"); 198 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
199 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) { 199 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
200 pos += scnprintf(buf + pos, bufsz - pos, 200 pos += scnprintf(buf + pos, bufsz - pos,
201 "\t%s\t\t: %u\n", 201 "\t%25s\t\t: %u\n",
202 get_mgmt_string(cnt), 202 get_mgmt_string(cnt),
203 priv->rx_stats.mgmt[cnt]); 203 priv->rx_stats.mgmt[cnt]);
204 } 204 }
205 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n"); 205 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
206 for (cnt = 0; cnt < CONTROL_MAX; cnt++) { 206 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
207 pos += scnprintf(buf + pos, bufsz - pos, 207 pos += scnprintf(buf + pos, bufsz - pos,
208 "\t%s\t\t: %u\n", 208 "\t%25s\t\t: %u\n",
209 get_ctrl_string(cnt), 209 get_ctrl_string(cnt),
210 priv->rx_stats.ctrl[cnt]); 210 priv->rx_stats.ctrl[cnt]);
211 } 211 }
@@ -220,26 +220,6 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
220 return ret; 220 return ret;
221} 221}
222 222
223static ssize_t iwl_dbgfs_rx_statistics_write(struct file *file,
224 const char __user *user_buf,
225 size_t count, loff_t *ppos)
226{
227 struct iwl_priv *priv = file->private_data;
228 u32 clear_flag;
229 char buf[8];
230 int buf_size;
231
232 memset(buf, 0, sizeof(buf));
233 buf_size = min(count, sizeof(buf) - 1);
234 if (copy_from_user(buf, user_buf, buf_size))
235 return -EFAULT;
236 if (sscanf(buf, "%x", &clear_flag) != 1)
237 return -EFAULT;
238 if (clear_flag == 1)
239 iwl_clear_rx_stats(priv);
240 return count;
241}
242
243#define BYTE1_MASK 0x000000ff; 223#define BYTE1_MASK 0x000000ff;
244#define BYTE2_MASK 0x0000ffff; 224#define BYTE2_MASK 0x0000ffff;
245#define BYTE3_MASK 0x00ffffff; 225#define BYTE3_MASK 0x00ffffff;
@@ -248,13 +228,29 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
248 size_t count, loff_t *ppos) 228 size_t count, loff_t *ppos)
249{ 229{
250 u32 val; 230 u32 val;
251 char buf[1024]; 231 char *buf;
252 ssize_t ret; 232 ssize_t ret;
253 int i; 233 int i;
254 int pos = 0; 234 int pos = 0;
255 struct iwl_priv *priv = (struct iwl_priv *)file->private_data; 235 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
256 const size_t bufsz = sizeof(buf); 236 size_t bufsz;
257 237
238 /* default is to dump the entire data segment */
239 if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) {
240 priv->dbgfs->sram_offset = 0x800000;
241 if (priv->ucode_type == UCODE_INIT)
242 priv->dbgfs->sram_len = priv->ucode_init_data.len;
243 else
244 priv->dbgfs->sram_len = priv->ucode_data.len;
245 }
246 bufsz = 30 + priv->dbgfs->sram_len * sizeof(char) * 10;
247 buf = kmalloc(bufsz, GFP_KERNEL);
248 if (!buf)
249 return -ENOMEM;
250 pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
251 priv->dbgfs->sram_len);
252 pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
253 priv->dbgfs->sram_offset);
258 for (i = priv->dbgfs->sram_len; i > 0; i -= 4) { 254 for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
259 val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \ 255 val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
260 priv->dbgfs->sram_len - i); 256 priv->dbgfs->sram_len - i);
@@ -271,11 +267,14 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
271 break; 267 break;
272 } 268 }
273 } 269 }
270 if (!(i % 16))
271 pos += scnprintf(buf + pos, bufsz - pos, "\n");
274 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val); 272 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
275 } 273 }
276 pos += scnprintf(buf + pos, bufsz - pos, "\n"); 274 pos += scnprintf(buf + pos, bufsz - pos, "\n");
277 275
278 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 276 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
277 kfree(buf);
279 return ret; 278 return ret;
280} 279}
281 280
@@ -335,8 +334,6 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
335 pos += scnprintf(buf + pos, bufsz - pos, 334 pos += scnprintf(buf + pos, bufsz - pos,
336 "flags: 0x%x\n", 335 "flags: 0x%x\n",
337 station->sta.station_flags_msk); 336 station->sta.station_flags_msk);
338 pos += scnprintf(buf + pos, bufsz - pos,
339 "ps_status: %u\n", station->ps_status);
340 pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n"); 337 pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
341 pos += scnprintf(buf + pos, bufsz - pos, 338 pos += scnprintf(buf + pos, bufsz - pos,
342 "seq_num\t\ttxq_id"); 339 "seq_num\t\ttxq_id");
@@ -383,6 +380,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
383 int pos = 0, ofs = 0, buf_size = 0; 380 int pos = 0, ofs = 0, buf_size = 0;
384 const u8 *ptr; 381 const u8 *ptr;
385 char *buf; 382 char *buf;
383 u16 eeprom_ver;
386 size_t eeprom_len = priv->cfg->eeprom_size; 384 size_t eeprom_len = priv->cfg->eeprom_size;
387 buf_size = 4 * eeprom_len + 256; 385 buf_size = 4 * eeprom_len + 256;
388 386
@@ -403,9 +401,11 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
403 IWL_ERR(priv, "Can not allocate Buffer\n"); 401 IWL_ERR(priv, "Can not allocate Buffer\n");
404 return -ENOMEM; 402 return -ENOMEM;
405 } 403 }
406 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", 404 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
405 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
406 "version: 0x%x\n",
407 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) 407 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
408 ? "OTP" : "EEPROM"); 408 ? "OTP" : "EEPROM", eeprom_ver);
409 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 409 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -436,7 +436,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
436 if (sscanf(buf, "%d", &event_log_flag) != 1) 436 if (sscanf(buf, "%d", &event_log_flag) != 1)
437 return -EFAULT; 437 return -EFAULT;
438 if (event_log_flag == 1) 438 if (event_log_flag == 1)
439 priv->cfg->ops->lib->dump_nic_event_log(priv); 439 priv->cfg->ops->lib->dump_nic_event_log(priv, true);
440 440
441 return count; 441 return count;
442} 442}
@@ -532,6 +532,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
532 test_bit(STATUS_INT_ENABLED, &priv->status)); 532 test_bit(STATUS_INT_ENABLED, &priv->status));
533 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 533 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
534 test_bit(STATUS_RF_KILL_HW, &priv->status)); 534 test_bit(STATUS_RF_KILL_HW, &priv->status));
535 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
536 test_bit(STATUS_CT_KILL, &priv->status));
535 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", 537 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
536 test_bit(STATUS_INIT, &priv->status)); 538 test_bit(STATUS_INIT, &priv->status));
537 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", 539 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
@@ -672,7 +674,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
672 return ret; 674 return ret;
673} 675}
674 676
675#ifdef CONFIG_IWLWIFI_LEDS
676static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 677static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
677 size_t count, loff_t *ppos) 678 size_t count, loff_t *ppos)
678{ 679{
@@ -697,7 +698,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
697 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 698 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
698 return ret; 699 return ret;
699} 700}
700#endif
701 701
702static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, 702static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
703 char __user *user_buf, 703 char __user *user_buf,
@@ -798,15 +798,20 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
798 * valid here. However, let's not confuse them and present 798 * valid here. However, let's not confuse them and present
799 * IWL_POWER_INDEX_1 as "1", not "0". 799 * IWL_POWER_INDEX_1 as "1", not "0".
800 */ 800 */
801 if (value > 0) 801 if (value == 0)
802 return -EINVAL;
803 else if (value > 0)
802 value -= 1; 804 value -= 1;
803 805
804 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) 806 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
805 return -EINVAL; 807 return -EINVAL;
806 808
809 if (!iwl_is_ready_rf(priv))
810 return -EAGAIN;
811
807 priv->power_data.debug_sleep_level_override = value; 812 priv->power_data.debug_sleep_level_override = value;
808 813
809 iwl_power_update_mode(priv, false); 814 iwl_power_update_mode(priv, true);
810 815
811 return count; 816 return count;
812} 817}
@@ -861,9 +866,7 @@ DEBUGFS_READ_FILE_OPS(channels);
861DEBUGFS_READ_FILE_OPS(status); 866DEBUGFS_READ_FILE_OPS(status);
862DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 867DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
863DEBUGFS_READ_FILE_OPS(qos); 868DEBUGFS_READ_FILE_OPS(qos);
864#ifdef CONFIG_IWLWIFI_LEDS
865DEBUGFS_READ_FILE_OPS(led); 869DEBUGFS_READ_FILE_OPS(led);
866#endif
867DEBUGFS_READ_FILE_OPS(thermal_throttling); 870DEBUGFS_READ_FILE_OPS(thermal_throttling);
868DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); 871DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
869DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 872DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -881,10 +884,14 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
881 struct iwl_rx_queue *rxq = &priv->rxq; 884 struct iwl_rx_queue *rxq = &priv->rxq;
882 char *buf; 885 char *buf;
883 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + 886 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
884 (IWL_MAX_NUM_QUEUES * 32 * 8) + 400; 887 (priv->cfg->num_of_queues * 32 * 8) + 400;
885 const u8 *ptr; 888 const u8 *ptr;
886 ssize_t ret; 889 ssize_t ret;
887 890
891 if (!priv->txq) {
892 IWL_ERR(priv, "txq not ready\n");
893 return -EAGAIN;
894 }
888 buf = kzalloc(bufsz, GFP_KERNEL); 895 buf = kzalloc(bufsz, GFP_KERNEL);
889 if (!buf) { 896 if (!buf) {
890 IWL_ERR(priv, "Can not allocate buffer\n"); 897 IWL_ERR(priv, "Can not allocate buffer\n");
@@ -976,8 +983,12 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
976 int pos = 0; 983 int pos = 0;
977 int cnt; 984 int cnt;
978 int ret; 985 int ret;
979 const size_t bufsz = sizeof(char) * 60 * IWL_MAX_NUM_QUEUES; 986 const size_t bufsz = sizeof(char) * 64 * priv->cfg->num_of_queues;
980 987
988 if (!priv->txq) {
989 IWL_ERR(priv, "txq not ready\n");
990 return -EAGAIN;
991 }
981 buf = kzalloc(bufsz, GFP_KERNEL); 992 buf = kzalloc(bufsz, GFP_KERNEL);
982 if (!buf) 993 if (!buf)
983 return -ENOMEM; 994 return -ENOMEM;
@@ -1028,10 +1039,6 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1028 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1039 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1029} 1040}
1030 1041
1031#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
1032#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
1033#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
1034
1035static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, 1042static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
1036 int bufsz) 1043 int bufsz)
1037{ 1044{
@@ -1068,17 +1075,17 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1068 sizeof(struct statistics_rx_non_phy) * 20 + 1075 sizeof(struct statistics_rx_non_phy) * 20 +
1069 sizeof(struct statistics_rx_ht_phy) * 20 + 400; 1076 sizeof(struct statistics_rx_ht_phy) * 20 + 400;
1070 ssize_t ret; 1077 ssize_t ret;
1071 struct statistics_rx_phy *ofdm; 1078 struct statistics_rx_phy *ofdm, *accum_ofdm;
1072 struct statistics_rx_phy *cck; 1079 struct statistics_rx_phy *cck, *accum_cck;
1073 struct statistics_rx_non_phy *general; 1080 struct statistics_rx_non_phy *general, *accum_general;
1074 struct statistics_rx_ht_phy *ht; 1081 struct statistics_rx_ht_phy *ht, *accum_ht;
1075 1082
1076 if (!iwl_is_alive(priv)) 1083 if (!iwl_is_alive(priv))
1077 return -EAGAIN; 1084 return -EAGAIN;
1078 1085
1079 /* make request to uCode to retrieve statistics information */ 1086 /* make request to uCode to retrieve statistics information */
1080 mutex_lock(&priv->mutex); 1087 mutex_lock(&priv->mutex);
1081 ret = iwl_send_statistics_request(priv, 0); 1088 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1082 mutex_unlock(&priv->mutex); 1089 mutex_unlock(&priv->mutex);
1083 1090
1084 if (ret) { 1091 if (ret) {
@@ -1100,155 +1107,268 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1100 cck = &priv->statistics.rx.cck; 1107 cck = &priv->statistics.rx.cck;
1101 general = &priv->statistics.rx.general; 1108 general = &priv->statistics.rx.general;
1102 ht = &priv->statistics.rx.ofdm_ht; 1109 ht = &priv->statistics.rx.ofdm_ht;
1110 accum_ofdm = &priv->accum_statistics.rx.ofdm;
1111 accum_cck = &priv->accum_statistics.rx.cck;
1112 accum_general = &priv->accum_statistics.rx.general;
1113 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
1103 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1114 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1104 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n"); 1115 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
1105 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n", 1116 pos += scnprintf(buf + pos, bufsz - pos,
1106 le32_to_cpu(ofdm->ina_cnt)); 1117 "\t\t\tcurrent\t\t\taccumulative\n");
1107 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n", 1118 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
1108 le32_to_cpu(ofdm->fina_cnt)); 1119 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
1109 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1120 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
1110 le32_to_cpu(ofdm->plcp_err)); 1121 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
1111 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1122 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1112 le32_to_cpu(ofdm->crc32_err)); 1123 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
1113 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1124 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1114 le32_to_cpu(ofdm->overrun_err)); 1125 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
1115 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1126 pos += scnprintf(buf + pos, bufsz - pos,
1116 le32_to_cpu(ofdm->early_overrun_err)); 1127 "overrun_err:\t\t%u\t\t\t%u\n",
1117 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1128 le32_to_cpu(ofdm->overrun_err),
1118 le32_to_cpu(ofdm->crc32_good)); 1129 accum_ofdm->overrun_err);
1119 pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n", 1130 pos += scnprintf(buf + pos, bufsz - pos,
1120 le32_to_cpu(ofdm->false_alarm_cnt)); 1131 "early_overrun_err:\t%u\t\t\t%u\n",
1121 pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n", 1132 le32_to_cpu(ofdm->early_overrun_err),
1122 le32_to_cpu(ofdm->fina_sync_err_cnt)); 1133 accum_ofdm->early_overrun_err);
1123 pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n", 1134 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1124 le32_to_cpu(ofdm->sfd_timeout)); 1135 le32_to_cpu(ofdm->crc32_good),
1125 pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n", 1136 accum_ofdm->crc32_good);
1126 le32_to_cpu(ofdm->fina_timeout)); 1137 pos += scnprintf(buf + pos, bufsz - pos,
1127 pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n", 1138 "false_alarm_cnt:\t%u\t\t\t%u\n",
1128 le32_to_cpu(ofdm->unresponded_rts)); 1139 le32_to_cpu(ofdm->false_alarm_cnt),
1129 pos += scnprintf(buf + pos, bufsz - pos, 1140 accum_ofdm->false_alarm_cnt);
1130 "rxe_frame_limit_overrun: %u\n", 1141 pos += scnprintf(buf + pos, bufsz - pos,
1131 le32_to_cpu(ofdm->rxe_frame_limit_overrun)); 1142 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
1132 pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n", 1143 le32_to_cpu(ofdm->fina_sync_err_cnt),
1133 le32_to_cpu(ofdm->sent_ack_cnt)); 1144 accum_ofdm->fina_sync_err_cnt);
1134 pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n", 1145 pos += scnprintf(buf + pos, bufsz - pos,
1135 le32_to_cpu(ofdm->sent_cts_cnt)); 1146 "sfd_timeout:\t\t%u\t\t\t%u\n",
1136 pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n", 1147 le32_to_cpu(ofdm->sfd_timeout),
1137 le32_to_cpu(ofdm->sent_ba_rsp_cnt)); 1148 accum_ofdm->sfd_timeout);
1138 pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n", 1149 pos += scnprintf(buf + pos, bufsz - pos,
1139 le32_to_cpu(ofdm->dsp_self_kill)); 1150 "fina_timeout:\t\t%u\t\t\t%u\n",
1140 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1151 le32_to_cpu(ofdm->fina_timeout),
1141 le32_to_cpu(ofdm->mh_format_err)); 1152 accum_ofdm->fina_timeout);
1142 pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n", 1153 pos += scnprintf(buf + pos, bufsz - pos,
1143 le32_to_cpu(ofdm->re_acq_main_rssi_sum)); 1154 "unresponded_rts:\t%u\t\t\t%u\n",
1155 le32_to_cpu(ofdm->unresponded_rts),
1156 accum_ofdm->unresponded_rts);
1157 pos += scnprintf(buf + pos, bufsz - pos,
1158 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
1159 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1160 accum_ofdm->rxe_frame_limit_overrun);
1161 pos += scnprintf(buf + pos, bufsz - pos,
1162 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
1163 le32_to_cpu(ofdm->sent_ack_cnt),
1164 accum_ofdm->sent_ack_cnt);
1165 pos += scnprintf(buf + pos, bufsz - pos,
1166 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
1167 le32_to_cpu(ofdm->sent_cts_cnt),
1168 accum_ofdm->sent_cts_cnt);
1169 pos += scnprintf(buf + pos, bufsz - pos,
1170 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
1171 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1172 accum_ofdm->sent_ba_rsp_cnt);
1173 pos += scnprintf(buf + pos, bufsz - pos,
1174 "dsp_self_kill:\t\t%u\t\t\t%u\n",
1175 le32_to_cpu(ofdm->dsp_self_kill),
1176 accum_ofdm->dsp_self_kill);
1177 pos += scnprintf(buf + pos, bufsz - pos,
1178 "mh_format_err:\t\t%u\t\t\t%u\n",
1179 le32_to_cpu(ofdm->mh_format_err),
1180 accum_ofdm->mh_format_err);
1181 pos += scnprintf(buf + pos, bufsz - pos,
1182 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
1183 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1184 accum_ofdm->re_acq_main_rssi_sum);
1144 1185
1145 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n"); 1186 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
1146 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n", 1187 pos += scnprintf(buf + pos, bufsz - pos,
1147 le32_to_cpu(cck->ina_cnt)); 1188 "\t\t\tcurrent\t\t\taccumulative\n");
1148 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n", 1189 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
1149 le32_to_cpu(cck->fina_cnt)); 1190 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
1150 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1191 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
1151 le32_to_cpu(cck->plcp_err)); 1192 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
1152 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1193 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1153 le32_to_cpu(cck->crc32_err)); 1194 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
1154 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1195 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1155 le32_to_cpu(cck->overrun_err)); 1196 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
1156 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1197 pos += scnprintf(buf + pos, bufsz - pos,
1157 le32_to_cpu(cck->early_overrun_err)); 1198 "overrun_err:\t\t%u\t\t\t%u\n",
1158 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1199 le32_to_cpu(cck->overrun_err),
1159 le32_to_cpu(cck->crc32_good)); 1200 accum_cck->overrun_err);
1160 pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n", 1201 pos += scnprintf(buf + pos, bufsz - pos,
1161 le32_to_cpu(cck->false_alarm_cnt)); 1202 "early_overrun_err:\t%u\t\t\t%u\n",
1162 pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n", 1203 le32_to_cpu(cck->early_overrun_err),
1163 le32_to_cpu(cck->fina_sync_err_cnt)); 1204 accum_cck->early_overrun_err);
1164 pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n", 1205 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1165 le32_to_cpu(cck->sfd_timeout)); 1206 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
1166 pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n", 1207 pos += scnprintf(buf + pos, bufsz - pos,
1167 le32_to_cpu(cck->fina_timeout)); 1208 "false_alarm_cnt:\t%u\t\t\t%u\n",
1168 pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n", 1209 le32_to_cpu(cck->false_alarm_cnt),
1169 le32_to_cpu(cck->unresponded_rts)); 1210 accum_cck->false_alarm_cnt);
1170 pos += scnprintf(buf + pos, bufsz - pos, 1211 pos += scnprintf(buf + pos, bufsz - pos,
1171 "rxe_frame_limit_overrun: %u\n", 1212 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
1172 le32_to_cpu(cck->rxe_frame_limit_overrun)); 1213 le32_to_cpu(cck->fina_sync_err_cnt),
1173 pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n", 1214 accum_cck->fina_sync_err_cnt);
1174 le32_to_cpu(cck->sent_ack_cnt)); 1215 pos += scnprintf(buf + pos, bufsz - pos,
1175 pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n", 1216 "sfd_timeout:\t\t%u\t\t\t%u\n",
1176 le32_to_cpu(cck->sent_cts_cnt)); 1217 le32_to_cpu(cck->sfd_timeout),
1177 pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n", 1218 accum_cck->sfd_timeout);
1178 le32_to_cpu(cck->sent_ba_rsp_cnt)); 1219 pos += scnprintf(buf + pos, bufsz - pos,
1179 pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n", 1220 "fina_timeout:\t\t%u\t\t\t%u\n",
1180 le32_to_cpu(cck->dsp_self_kill)); 1221 le32_to_cpu(cck->fina_timeout),
1181 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1222 accum_cck->fina_timeout);
1182 le32_to_cpu(cck->mh_format_err)); 1223 pos += scnprintf(buf + pos, bufsz - pos,
1183 pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n", 1224 "unresponded_rts:\t%u\t\t\t%u\n",
1184 le32_to_cpu(cck->re_acq_main_rssi_sum)); 1225 le32_to_cpu(cck->unresponded_rts),
1226 accum_cck->unresponded_rts);
1227 pos += scnprintf(buf + pos, bufsz - pos,
1228 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
1229 le32_to_cpu(cck->rxe_frame_limit_overrun),
1230 accum_cck->rxe_frame_limit_overrun);
1231 pos += scnprintf(buf + pos, bufsz - pos,
1232 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
1233 le32_to_cpu(cck->sent_ack_cnt),
1234 accum_cck->sent_ack_cnt);
1235 pos += scnprintf(buf + pos, bufsz - pos,
1236 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
1237 le32_to_cpu(cck->sent_cts_cnt),
1238 accum_cck->sent_cts_cnt);
1239 pos += scnprintf(buf + pos, bufsz - pos,
1240 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
1241 le32_to_cpu(cck->sent_ba_rsp_cnt),
1242 accum_cck->sent_ba_rsp_cnt);
1243 pos += scnprintf(buf + pos, bufsz - pos,
1244 "dsp_self_kill:\t\t%u\t\t\t%u\n",
1245 le32_to_cpu(cck->dsp_self_kill),
1246 accum_cck->dsp_self_kill);
1247 pos += scnprintf(buf + pos, bufsz - pos,
1248 "mh_format_err:\t\t%u\t\t\t%u\n",
1249 le32_to_cpu(cck->mh_format_err),
1250 accum_cck->mh_format_err);
1251 pos += scnprintf(buf + pos, bufsz - pos,
1252 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
1253 le32_to_cpu(cck->re_acq_main_rssi_sum),
1254 accum_cck->re_acq_main_rssi_sum);
1185 1255
1186 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n"); 1256 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
1187 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts: %u\n", 1257 pos += scnprintf(buf + pos, bufsz - pos,
1188 le32_to_cpu(general->bogus_cts)); 1258 "\t\t\tcurrent\t\t\taccumulative\n");
1189 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack: %u\n", 1259 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
1190 le32_to_cpu(general->bogus_ack)); 1260 le32_to_cpu(general->bogus_cts),
1191 pos += scnprintf(buf + pos, bufsz - pos, "non_bssid_frames: %u\n", 1261 accum_general->bogus_cts);
1192 le32_to_cpu(general->non_bssid_frames)); 1262 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
1193 pos += scnprintf(buf + pos, bufsz - pos, "filtered_frames: %u\n", 1263 le32_to_cpu(general->bogus_ack),
1194 le32_to_cpu(general->filtered_frames)); 1264 accum_general->bogus_ack);
1195 pos += scnprintf(buf + pos, bufsz - pos, "non_channel_beacons: %u\n", 1265 pos += scnprintf(buf + pos, bufsz - pos,
1196 le32_to_cpu(general->non_channel_beacons)); 1266 "non_bssid_frames:\t%u\t\t\t%u\n",
1197 pos += scnprintf(buf + pos, bufsz - pos, "channel_beacons: %u\n", 1267 le32_to_cpu(general->non_bssid_frames),
1198 le32_to_cpu(general->channel_beacons)); 1268 accum_general->non_bssid_frames);
1199 pos += scnprintf(buf + pos, bufsz - pos, "num_missed_bcon: %u\n", 1269 pos += scnprintf(buf + pos, bufsz - pos,
1200 le32_to_cpu(general->num_missed_bcon)); 1270 "filtered_frames:\t%u\t\t\t%u\n",
1201 pos += scnprintf(buf + pos, bufsz - pos, 1271 le32_to_cpu(general->filtered_frames),
1202 "adc_rx_saturation_time: %u\n", 1272 accum_general->filtered_frames);
1203 le32_to_cpu(general->adc_rx_saturation_time)); 1273 pos += scnprintf(buf + pos, bufsz - pos,
1204 pos += scnprintf(buf + pos, bufsz - pos, 1274 "non_channel_beacons:\t%u\t\t\t%u\n",
1205 "ina_detection_search_time: %u\n", 1275 le32_to_cpu(general->non_channel_beacons),
1206 le32_to_cpu(general->ina_detection_search_time)); 1276 accum_general->non_channel_beacons);
1207 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_a: %u\n", 1277 pos += scnprintf(buf + pos, bufsz - pos,
1208 le32_to_cpu(general->beacon_silence_rssi_a)); 1278 "channel_beacons:\t%u\t\t\t%u\n",
1209 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_b: %u\n", 1279 le32_to_cpu(general->channel_beacons),
1210 le32_to_cpu(general->beacon_silence_rssi_b)); 1280 accum_general->channel_beacons);
1211 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_c: %u\n", 1281 pos += scnprintf(buf + pos, bufsz - pos,
1212 le32_to_cpu(general->beacon_silence_rssi_c)); 1282 "num_missed_bcon:\t%u\t\t\t%u\n",
1213 pos += scnprintf(buf + pos, bufsz - pos, 1283 le32_to_cpu(general->num_missed_bcon),
1214 "interference_data_flag: %u\n", 1284 accum_general->num_missed_bcon);
1215 le32_to_cpu(general->interference_data_flag)); 1285 pos += scnprintf(buf + pos, bufsz - pos,
1216 pos += scnprintf(buf + pos, bufsz - pos, "channel_load: %u\n", 1286 "adc_rx_saturation_time:\t%u\t\t\t%u\n",
1217 le32_to_cpu(general->channel_load)); 1287 le32_to_cpu(general->adc_rx_saturation_time),
1218 pos += scnprintf(buf + pos, bufsz - pos, "dsp_false_alarms: %u\n", 1288 accum_general->adc_rx_saturation_time);
1219 le32_to_cpu(general->dsp_false_alarms)); 1289 pos += scnprintf(buf + pos, bufsz - pos,
1220 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_a: %u\n", 1290 "ina_detect_search_tm:\t%u\t\t\t%u\n",
1221 le32_to_cpu(general->beacon_rssi_a)); 1291 le32_to_cpu(general->ina_detection_search_time),
1222 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_b: %u\n", 1292 accum_general->ina_detection_search_time);
1223 le32_to_cpu(general->beacon_rssi_b)); 1293 pos += scnprintf(buf + pos, bufsz - pos,
1224 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_c: %u\n", 1294 "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
1225 le32_to_cpu(general->beacon_rssi_c)); 1295 le32_to_cpu(general->beacon_silence_rssi_a),
1226 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_a: %u\n", 1296 accum_general->beacon_silence_rssi_a);
1227 le32_to_cpu(general->beacon_energy_a)); 1297 pos += scnprintf(buf + pos, bufsz - pos,
1228 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_b: %u\n", 1298 "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
1229 le32_to_cpu(general->beacon_energy_b)); 1299 le32_to_cpu(general->beacon_silence_rssi_b),
1230 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_c: %u\n", 1300 accum_general->beacon_silence_rssi_b);
1231 le32_to_cpu(general->beacon_energy_c)); 1301 pos += scnprintf(buf + pos, bufsz - pos,
1302 "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
1303 le32_to_cpu(general->beacon_silence_rssi_c),
1304 accum_general->beacon_silence_rssi_c);
1305 pos += scnprintf(buf + pos, bufsz - pos,
1306 "interference_data_flag:\t%u\t\t\t%u\n",
1307 le32_to_cpu(general->interference_data_flag),
1308 accum_general->interference_data_flag);
1309 pos += scnprintf(buf + pos, bufsz - pos,
1310 "channel_load:\t\t%u\t\t\t%u\n",
1311 le32_to_cpu(general->channel_load),
1312 accum_general->channel_load);
1313 pos += scnprintf(buf + pos, bufsz - pos,
1314 "dsp_false_alarms:\t%u\t\t\t%u\n",
1315 le32_to_cpu(general->dsp_false_alarms),
1316 accum_general->dsp_false_alarms);
1317 pos += scnprintf(buf + pos, bufsz - pos,
1318 "beacon_rssi_a:\t\t%u\t\t\t%u\n",
1319 le32_to_cpu(general->beacon_rssi_a),
1320 accum_general->beacon_rssi_a);
1321 pos += scnprintf(buf + pos, bufsz - pos,
1322 "beacon_rssi_b:\t\t%u\t\t\t%u\n",
1323 le32_to_cpu(general->beacon_rssi_b),
1324 accum_general->beacon_rssi_b);
1325 pos += scnprintf(buf + pos, bufsz - pos,
1326 "beacon_rssi_c:\t\t%u\t\t\t%u\n",
1327 le32_to_cpu(general->beacon_rssi_c),
1328 accum_general->beacon_rssi_c);
1329 pos += scnprintf(buf + pos, bufsz - pos,
1330 "beacon_energy_a:\t%u\t\t\t%u\n",
1331 le32_to_cpu(general->beacon_energy_a),
1332 accum_general->beacon_energy_a);
1333 pos += scnprintf(buf + pos, bufsz - pos,
1334 "beacon_energy_b:\t%u\t\t\t%u\n",
1335 le32_to_cpu(general->beacon_energy_b),
1336 accum_general->beacon_energy_b);
1337 pos += scnprintf(buf + pos, bufsz - pos,
1338 "beacon_energy_c:\t%u\t\t\t%u\n",
1339 le32_to_cpu(general->beacon_energy_c),
1340 accum_general->beacon_energy_c);
1232 1341
1233 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n"); 1342 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
1234 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1343 pos += scnprintf(buf + pos, bufsz - pos,
1235 le32_to_cpu(ht->plcp_err)); 1344 "\t\t\tcurrent\t\t\taccumulative\n");
1236 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1345 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1237 le32_to_cpu(ht->overrun_err)); 1346 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
1238 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1347 pos += scnprintf(buf + pos, bufsz - pos,
1239 le32_to_cpu(ht->early_overrun_err)); 1348 "overrun_err:\t\t%u\t\t\t%u\n",
1240 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1349 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
1241 le32_to_cpu(ht->crc32_good)); 1350 pos += scnprintf(buf + pos, bufsz - pos,
1242 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1351 "early_overrun_err:\t%u\t\t\t%u\n",
1243 le32_to_cpu(ht->crc32_err)); 1352 le32_to_cpu(ht->early_overrun_err),
1244 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1353 accum_ht->early_overrun_err);
1245 le32_to_cpu(ht->mh_format_err)); 1354 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1246 pos += scnprintf(buf + pos, bufsz - pos, "agg_crc32_good: %u\n", 1355 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
1247 le32_to_cpu(ht->agg_crc32_good)); 1356 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1248 pos += scnprintf(buf + pos, bufsz - pos, "agg_mpdu_cnt: %u\n", 1357 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
1249 le32_to_cpu(ht->agg_mpdu_cnt)); 1358 pos += scnprintf(buf + pos, bufsz - pos,
1250 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt: %u\n", 1359 "mh_format_err:\t\t%u\t\t\t%u\n",
1251 le32_to_cpu(ht->agg_cnt)); 1360 le32_to_cpu(ht->mh_format_err),
1361 accum_ht->mh_format_err);
1362 pos += scnprintf(buf + pos, bufsz - pos,
1363 "agg_crc32_good:\t\t%u\t\t\t%u\n",
1364 le32_to_cpu(ht->agg_crc32_good),
1365 accum_ht->agg_crc32_good);
1366 pos += scnprintf(buf + pos, bufsz - pos,
1367 "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
1368 le32_to_cpu(ht->agg_mpdu_cnt),
1369 accum_ht->agg_mpdu_cnt);
1370 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
1371 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
1252 1372
1253 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1373 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1254 kfree(buf); 1374 kfree(buf);
@@ -1264,14 +1384,14 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1264 char *buf; 1384 char *buf;
1265 int bufsz = (sizeof(struct statistics_tx) * 24) + 250; 1385 int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
1266 ssize_t ret; 1386 ssize_t ret;
1267 struct statistics_tx *tx; 1387 struct statistics_tx *tx, *accum_tx;
1268 1388
1269 if (!iwl_is_alive(priv)) 1389 if (!iwl_is_alive(priv))
1270 return -EAGAIN; 1390 return -EAGAIN;
1271 1391
1272 /* make request to uCode to retrieve statistics information */ 1392 /* make request to uCode to retrieve statistics information */
1273 mutex_lock(&priv->mutex); 1393 mutex_lock(&priv->mutex);
1274 ret = iwl_send_statistics_request(priv, 0); 1394 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1275 mutex_unlock(&priv->mutex); 1395 mutex_unlock(&priv->mutex);
1276 1396
1277 if (ret) { 1397 if (ret) {
@@ -1290,62 +1410,107 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1290 * might not reflect the current uCode activity 1410 * might not reflect the current uCode activity
1291 */ 1411 */
1292 tx = &priv->statistics.tx; 1412 tx = &priv->statistics.tx;
1413 accum_tx = &priv->accum_statistics.tx;
1293 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1414 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1294 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n"); 1415 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
1295 pos += scnprintf(buf + pos, bufsz - pos, "preamble: %u\n", 1416 pos += scnprintf(buf + pos, bufsz - pos,
1296 le32_to_cpu(tx->preamble_cnt)); 1417 "\t\t\tcurrent\t\t\taccumulative\n");
1297 pos += scnprintf(buf + pos, bufsz - pos, "rx_detected_cnt: %u\n", 1418 pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
1298 le32_to_cpu(tx->rx_detected_cnt)); 1419 le32_to_cpu(tx->preamble_cnt),
1299 pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_defer_cnt: %u\n", 1420 accum_tx->preamble_cnt);
1300 le32_to_cpu(tx->bt_prio_defer_cnt)); 1421 pos += scnprintf(buf + pos, bufsz - pos,
1301 pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_kill_cnt: %u\n", 1422 "rx_detected_cnt:\t\t%u\t\t\t%u\n",
1302 le32_to_cpu(tx->bt_prio_kill_cnt)); 1423 le32_to_cpu(tx->rx_detected_cnt),
1303 pos += scnprintf(buf + pos, bufsz - pos, "few_bytes_cnt: %u\n", 1424 accum_tx->rx_detected_cnt);
1304 le32_to_cpu(tx->few_bytes_cnt)); 1425 pos += scnprintf(buf + pos, bufsz - pos,
1305 pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout: %u\n", 1426 "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
1306 le32_to_cpu(tx->cts_timeout)); 1427 le32_to_cpu(tx->bt_prio_defer_cnt),
1307 pos += scnprintf(buf + pos, bufsz - pos, "ack_timeout: %u\n", 1428 accum_tx->bt_prio_defer_cnt);
1308 le32_to_cpu(tx->ack_timeout)); 1429 pos += scnprintf(buf + pos, bufsz - pos,
1309 pos += scnprintf(buf + pos, bufsz - pos, "expected_ack_cnt: %u\n", 1430 "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
1310 le32_to_cpu(tx->expected_ack_cnt)); 1431 le32_to_cpu(tx->bt_prio_kill_cnt),
1311 pos += scnprintf(buf + pos, bufsz - pos, "actual_ack_cnt: %u\n", 1432 accum_tx->bt_prio_kill_cnt);
1312 le32_to_cpu(tx->actual_ack_cnt)); 1433 pos += scnprintf(buf + pos, bufsz - pos,
1313 pos += scnprintf(buf + pos, bufsz - pos, "dump_msdu_cnt: %u\n", 1434 "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
1314 le32_to_cpu(tx->dump_msdu_cnt)); 1435 le32_to_cpu(tx->few_bytes_cnt),
1315 pos += scnprintf(buf + pos, bufsz - pos, 1436 accum_tx->few_bytes_cnt);
1316 "burst_abort_next_frame_mismatch_cnt: %u\n", 1437 pos += scnprintf(buf + pos, bufsz - pos,
1317 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt)); 1438 "cts_timeout:\t\t\t%u\t\t\t%u\n",
1318 pos += scnprintf(buf + pos, bufsz - pos, 1439 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
1319 "burst_abort_missing_next_frame_cnt: %u\n", 1440 pos += scnprintf(buf + pos, bufsz - pos,
1320 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt)); 1441 "ack_timeout:\t\t\t%u\t\t\t%u\n",
1321 pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout_collision: %u\n", 1442 le32_to_cpu(tx->ack_timeout),
1322 le32_to_cpu(tx->cts_timeout_collision)); 1443 accum_tx->ack_timeout);
1323 pos += scnprintf(buf + pos, bufsz - pos, 1444 pos += scnprintf(buf + pos, bufsz - pos,
1324 "ack_or_ba_timeout_collision: %u\n", 1445 "expected_ack_cnt:\t\t%u\t\t\t%u\n",
1325 le32_to_cpu(tx->ack_or_ba_timeout_collision)); 1446 le32_to_cpu(tx->expected_ack_cnt),
1326 pos += scnprintf(buf + pos, bufsz - pos, "agg ba_timeout: %u\n", 1447 accum_tx->expected_ack_cnt);
1327 le32_to_cpu(tx->agg.ba_timeout)); 1448 pos += scnprintf(buf + pos, bufsz - pos,
1328 pos += scnprintf(buf + pos, bufsz - pos, 1449 "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
1329 "agg ba_reschedule_frames: %u\n", 1450 le32_to_cpu(tx->actual_ack_cnt),
1330 le32_to_cpu(tx->agg.ba_reschedule_frames)); 1451 accum_tx->actual_ack_cnt);
1331 pos += scnprintf(buf + pos, bufsz - pos, 1452 pos += scnprintf(buf + pos, bufsz - pos,
1332 "agg scd_query_agg_frame_cnt: %u\n", 1453 "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
1333 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt)); 1454 le32_to_cpu(tx->dump_msdu_cnt),
1334 pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_no_agg: %u\n", 1455 accum_tx->dump_msdu_cnt);
1335 le32_to_cpu(tx->agg.scd_query_no_agg)); 1456 pos += scnprintf(buf + pos, bufsz - pos,
1336 pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_agg: %u\n", 1457 "abort_nxt_frame_mismatch:"
1337 le32_to_cpu(tx->agg.scd_query_agg)); 1458 "\t%u\t\t\t%u\n",
1338 pos += scnprintf(buf + pos, bufsz - pos, 1459 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1339 "agg scd_query_mismatch: %u\n", 1460 accum_tx->burst_abort_next_frame_mismatch_cnt);
1340 le32_to_cpu(tx->agg.scd_query_mismatch)); 1461 pos += scnprintf(buf + pos, bufsz - pos,
1341 pos += scnprintf(buf + pos, bufsz - pos, "agg frame_not_ready: %u\n", 1462 "abort_missing_nxt_frame:"
1342 le32_to_cpu(tx->agg.frame_not_ready)); 1463 "\t%u\t\t\t%u\n",
1343 pos += scnprintf(buf + pos, bufsz - pos, "agg underrun: %u\n", 1464 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1344 le32_to_cpu(tx->agg.underrun)); 1465 accum_tx->burst_abort_missing_next_frame_cnt);
1345 pos += scnprintf(buf + pos, bufsz - pos, "agg bt_prio_kill: %u\n", 1466 pos += scnprintf(buf + pos, bufsz - pos,
1346 le32_to_cpu(tx->agg.bt_prio_kill)); 1467 "cts_timeout_collision:\t\t%u\t\t\t%u\n",
1347 pos += scnprintf(buf + pos, bufsz - pos, "agg rx_ba_rsp_cnt: %u\n", 1468 le32_to_cpu(tx->cts_timeout_collision),
1348 le32_to_cpu(tx->agg.rx_ba_rsp_cnt)); 1469 accum_tx->cts_timeout_collision);
1470 pos += scnprintf(buf + pos, bufsz - pos,
1471 "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
1472 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1473 accum_tx->ack_or_ba_timeout_collision);
1474 pos += scnprintf(buf + pos, bufsz - pos,
1475 "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
1476 le32_to_cpu(tx->agg.ba_timeout),
1477 accum_tx->agg.ba_timeout);
1478 pos += scnprintf(buf + pos, bufsz - pos,
1479 "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
1480 le32_to_cpu(tx->agg.ba_reschedule_frames),
1481 accum_tx->agg.ba_reschedule_frames);
1482 pos += scnprintf(buf + pos, bufsz - pos,
1483 "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
1484 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1485 accum_tx->agg.scd_query_agg_frame_cnt);
1486 pos += scnprintf(buf + pos, bufsz - pos,
1487 "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
1488 le32_to_cpu(tx->agg.scd_query_no_agg),
1489 accum_tx->agg.scd_query_no_agg);
1490 pos += scnprintf(buf + pos, bufsz - pos,
1491 "agg scd_query_agg:\t\t%u\t\t\t%u\n",
1492 le32_to_cpu(tx->agg.scd_query_agg),
1493 accum_tx->agg.scd_query_agg);
1494 pos += scnprintf(buf + pos, bufsz - pos,
1495 "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
1496 le32_to_cpu(tx->agg.scd_query_mismatch),
1497 accum_tx->agg.scd_query_mismatch);
1498 pos += scnprintf(buf + pos, bufsz - pos,
1499 "agg frame_not_ready:\t\t%u\t\t\t%u\n",
1500 le32_to_cpu(tx->agg.frame_not_ready),
1501 accum_tx->agg.frame_not_ready);
1502 pos += scnprintf(buf + pos, bufsz - pos,
1503 "agg underrun:\t\t\t%u\t\t\t%u\n",
1504 le32_to_cpu(tx->agg.underrun),
1505 accum_tx->agg.underrun);
1506 pos += scnprintf(buf + pos, bufsz - pos,
1507 "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
1508 le32_to_cpu(tx->agg.bt_prio_kill),
1509 accum_tx->agg.bt_prio_kill);
1510 pos += scnprintf(buf + pos, bufsz - pos,
1511 "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
1512 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1513 accum_tx->agg.rx_ba_rsp_cnt);
1349 1514
1350 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1515 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1351 kfree(buf); 1516 kfree(buf);
@@ -1361,16 +1526,16 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1361 char *buf; 1526 char *buf;
1362 int bufsz = sizeof(struct statistics_general) * 4 + 250; 1527 int bufsz = sizeof(struct statistics_general) * 4 + 250;
1363 ssize_t ret; 1528 ssize_t ret;
1364 struct statistics_general *general; 1529 struct statistics_general *general, *accum_general;
1365 struct statistics_dbg *dbg; 1530 struct statistics_dbg *dbg, *accum_dbg;
1366 struct statistics_div *div; 1531 struct statistics_div *div, *accum_div;
1367 1532
1368 if (!iwl_is_alive(priv)) 1533 if (!iwl_is_alive(priv))
1369 return -EAGAIN; 1534 return -EAGAIN;
1370 1535
1371 /* make request to uCode to retrieve statistics information */ 1536 /* make request to uCode to retrieve statistics information */
1372 mutex_lock(&priv->mutex); 1537 mutex_lock(&priv->mutex);
1373 ret = iwl_send_statistics_request(priv, 0); 1538 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1374 mutex_unlock(&priv->mutex); 1539 mutex_unlock(&priv->mutex);
1375 1540
1376 if (ret) { 1541 if (ret) {
@@ -1391,34 +1556,53 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1391 general = &priv->statistics.general; 1556 general = &priv->statistics.general;
1392 dbg = &priv->statistics.general.dbg; 1557 dbg = &priv->statistics.general.dbg;
1393 div = &priv->statistics.general.div; 1558 div = &priv->statistics.general.div;
1559 accum_general = &priv->accum_statistics.general;
1560 accum_dbg = &priv->accum_statistics.general.dbg;
1561 accum_div = &priv->accum_statistics.general.div;
1394 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1562 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1395 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n"); 1563 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
1396 pos += scnprintf(buf + pos, bufsz - pos, "temperature: %u\n", 1564 pos += scnprintf(buf + pos, bufsz - pos,
1565 "\t\t\tcurrent\t\t\taccumulative\n");
1566 pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
1397 le32_to_cpu(general->temperature)); 1567 le32_to_cpu(general->temperature));
1398 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m: %u\n", 1568 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
1399 le32_to_cpu(general->temperature_m)); 1569 le32_to_cpu(general->temperature_m));
1400 pos += scnprintf(buf + pos, bufsz - pos, "burst_check: %u\n", 1570 pos += scnprintf(buf + pos, bufsz - pos,
1401 le32_to_cpu(dbg->burst_check)); 1571 "burst_check:\t\t\t%u\t\t\t%u\n",
1402 pos += scnprintf(buf + pos, bufsz - pos, "burst_count: %u\n", 1572 le32_to_cpu(dbg->burst_check),
1403 le32_to_cpu(dbg->burst_count)); 1573 accum_dbg->burst_check);
1404 pos += scnprintf(buf + pos, bufsz - pos, "sleep_time: %u\n", 1574 pos += scnprintf(buf + pos, bufsz - pos,
1405 le32_to_cpu(general->sleep_time)); 1575 "burst_count:\t\t\t%u\t\t\t%u\n",
1406 pos += scnprintf(buf + pos, bufsz - pos, "slots_out: %u\n", 1576 le32_to_cpu(dbg->burst_count),
1407 le32_to_cpu(general->slots_out)); 1577 accum_dbg->burst_count);
1408 pos += scnprintf(buf + pos, bufsz - pos, "slots_idle: %u\n", 1578 pos += scnprintf(buf + pos, bufsz - pos,
1409 le32_to_cpu(general->slots_idle)); 1579 "sleep_time:\t\t\t%u\t\t\t%u\n",
1410 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp: %u\n", 1580 le32_to_cpu(general->sleep_time),
1581 accum_general->sleep_time);
1582 pos += scnprintf(buf + pos, bufsz - pos,
1583 "slots_out:\t\t\t%u\t\t\t%u\n",
1584 le32_to_cpu(general->slots_out),
1585 accum_general->slots_out);
1586 pos += scnprintf(buf + pos, bufsz - pos,
1587 "slots_idle:\t\t\t%u\t\t\t%u\n",
1588 le32_to_cpu(general->slots_idle),
1589 accum_general->slots_idle);
1590 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
1411 le32_to_cpu(general->ttl_timestamp)); 1591 le32_to_cpu(general->ttl_timestamp));
1412 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a: %u\n", 1592 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
1413 le32_to_cpu(div->tx_on_a)); 1593 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
1414 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b: %u\n", 1594 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
1415 le32_to_cpu(div->tx_on_b)); 1595 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
1416 pos += scnprintf(buf + pos, bufsz - pos, "exec_time: %u\n", 1596 pos += scnprintf(buf + pos, bufsz - pos,
1417 le32_to_cpu(div->exec_time)); 1597 "exec_time:\t\t\t%u\t\t\t%u\n",
1418 pos += scnprintf(buf + pos, bufsz - pos, "probe_time: %u\n", 1598 le32_to_cpu(div->exec_time), accum_div->exec_time);
1419 le32_to_cpu(div->probe_time)); 1599 pos += scnprintf(buf + pos, bufsz - pos,
1420 pos += scnprintf(buf + pos, bufsz - pos, "rx_enable_counter: %u\n", 1600 "probe_time:\t\t\t%u\t\t\t%u\n",
1421 le32_to_cpu(general->rx_enable_counter)); 1601 le32_to_cpu(div->probe_time), accum_div->probe_time);
1602 pos += scnprintf(buf + pos, bufsz - pos,
1603 "rx_enable_counter:\t\t%u\t\t\t%u\n",
1604 le32_to_cpu(general->rx_enable_counter),
1605 accum_general->rx_enable_counter);
1422 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1606 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1423 kfree(buf); 1607 kfree(buf);
1424 return ret; 1608 return ret;
@@ -1579,7 +1763,7 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1579 else { 1763 else {
1580 /* make request to uCode to retrieve statistics information */ 1764 /* make request to uCode to retrieve statistics information */
1581 mutex_lock(&priv->mutex); 1765 mutex_lock(&priv->mutex);
1582 ret = iwl_send_statistics_request(priv, 0); 1766 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1583 mutex_unlock(&priv->mutex); 1767 mutex_unlock(&priv->mutex);
1584 1768
1585 if (ret) { 1769 if (ret) {
@@ -1614,8 +1798,55 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1614 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1798 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1615} 1799}
1616 1800
1617DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics); 1801static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1618DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics); 1802 char __user *user_buf,
1803 size_t count, loff_t *ppos)
1804{
1805 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1806 char buf[60];
1807 int pos = 0;
1808 const size_t bufsz = sizeof(buf);
1809 u32 pwrsave_status;
1810
1811 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1812 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1813
1814 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1815 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1816 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1817 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1818 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1819 "error");
1820
1821 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1822}
1823
1824static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
1825 const char __user *user_buf,
1826 size_t count, loff_t *ppos)
1827{
1828 struct iwl_priv *priv = file->private_data;
1829 char buf[8];
1830 int buf_size;
1831 int clear;
1832
1833 memset(buf, 0, sizeof(buf));
1834 buf_size = min(count, sizeof(buf) - 1);
1835 if (copy_from_user(buf, user_buf, buf_size))
1836 return -EFAULT;
1837 if (sscanf(buf, "%d", &clear) != 1)
1838 return -EFAULT;
1839
1840 /* make request to uCode to retrieve statistics information */
1841 mutex_lock(&priv->mutex);
1842 iwl_send_statistics_request(priv, CMD_SYNC, true);
1843 mutex_unlock(&priv->mutex);
1844
1845 return count;
1846}
1847
1848DEBUGFS_READ_FILE_OPS(rx_statistics);
1849DEBUGFS_READ_FILE_OPS(tx_statistics);
1619DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1850DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
1620DEBUGFS_READ_FILE_OPS(rx_queue); 1851DEBUGFS_READ_FILE_OPS(rx_queue);
1621DEBUGFS_READ_FILE_OPS(tx_queue); 1852DEBUGFS_READ_FILE_OPS(tx_queue);
@@ -1625,6 +1856,9 @@ DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1625DEBUGFS_READ_FILE_OPS(sensitivity); 1856DEBUGFS_READ_FILE_OPS(sensitivity);
1626DEBUGFS_READ_FILE_OPS(chain_noise); 1857DEBUGFS_READ_FILE_OPS(chain_noise);
1627DEBUGFS_READ_FILE_OPS(tx_power); 1858DEBUGFS_READ_FILE_OPS(tx_power);
1859DEBUGFS_READ_FILE_OPS(power_save_status);
1860DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
1861DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
1628 1862
1629/* 1863/*
1630 * Create the debugfs files and directories 1864 * Create the debugfs files and directories
@@ -1653,33 +1887,34 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1653 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv); 1887 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
1654 DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv); 1888 DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
1655 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv); 1889 DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
1656 DEBUGFS_ADD_FILE(nvm, data); 1890 DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
1657 DEBUGFS_ADD_FILE(sram, data); 1891 DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
1658 DEBUGFS_ADD_FILE(log_event, data); 1892 DEBUGFS_ADD_FILE(log_event, data, S_IWUSR);
1659 DEBUGFS_ADD_FILE(stations, data); 1893 DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
1660 DEBUGFS_ADD_FILE(channels, data); 1894 DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
1661 DEBUGFS_ADD_FILE(status, data); 1895 DEBUGFS_ADD_FILE(status, data, S_IRUSR);
1662 DEBUGFS_ADD_FILE(interrupt, data); 1896 DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR);
1663 DEBUGFS_ADD_FILE(qos, data); 1897 DEBUGFS_ADD_FILE(qos, data, S_IRUSR);
1664#ifdef CONFIG_IWLWIFI_LEDS 1898 DEBUGFS_ADD_FILE(led, data, S_IRUSR);
1665 DEBUGFS_ADD_FILE(led, data); 1899 DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR);
1666#endif 1900 DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR);
1667 DEBUGFS_ADD_FILE(sleep_level_override, data); 1901 DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR);
1668 DEBUGFS_ADD_FILE(current_sleep_command, data); 1902 DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR);
1669 DEBUGFS_ADD_FILE(thermal_throttling, data); 1903 DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR);
1670 DEBUGFS_ADD_FILE(disable_ht40, data); 1904 DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR);
1671 DEBUGFS_ADD_FILE(rx_statistics, debug); 1905 DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR);
1672 DEBUGFS_ADD_FILE(tx_statistics, debug); 1906 DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR);
1673 DEBUGFS_ADD_FILE(traffic_log, debug); 1907 DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR);
1674 DEBUGFS_ADD_FILE(rx_queue, debug); 1908 DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR);
1675 DEBUGFS_ADD_FILE(tx_queue, debug); 1909 DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
1676 DEBUGFS_ADD_FILE(tx_power, debug); 1910 DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
1911 DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
1677 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1912 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1678 DEBUGFS_ADD_FILE(ucode_rx_stats, debug); 1913 DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
1679 DEBUGFS_ADD_FILE(ucode_tx_stats, debug); 1914 DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
1680 DEBUGFS_ADD_FILE(ucode_general_stats, debug); 1915 DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
1681 DEBUGFS_ADD_FILE(sensitivity, debug); 1916 DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
1682 DEBUGFS_ADD_FILE(chain_noise, debug); 1917 DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
1683 } 1918 }
1684 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal); 1919 DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
1685 DEBUGFS_ADD_BOOL(disable_chain_noise, rf, 1920 DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
@@ -1716,9 +1951,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1716 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status); 1951 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
1717 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt); 1952 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
1718 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos); 1953 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
1719#ifdef CONFIG_IWLWIFI_LEDS
1720 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led); 1954 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
1721#endif
1722 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling); 1955 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
1723 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40); 1956 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
1724 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 1957 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
@@ -1728,6 +1961,11 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1728 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue); 1961 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
1729 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue); 1962 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
1730 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power); 1963 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
1964 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
1965 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1966 file_clear_ucode_statistics);
1967 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1968 file_clear_traffic_statistics);
1731 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1969 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1732 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 1970 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1733 file_ucode_rx_stats); 1971 file_ucode_rx_stats);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 028d50599550..2673e9a4db92 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,7 +43,6 @@
43#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-4965-hw.h" 44#include "iwl-4965-hw.h"
45#include "iwl-3945-hw.h" 45#include "iwl-3945-hw.h"
46#include "iwl-3945-led.h"
47#include "iwl-led.h" 46#include "iwl-led.h"
48#include "iwl-power.h" 47#include "iwl-power.h"
49#include "iwl-agn-rs.h" 48#include "iwl-agn-rs.h"
@@ -53,21 +52,23 @@ extern struct iwl_cfg iwl4965_agn_cfg;
53extern struct iwl_cfg iwl5300_agn_cfg; 52extern struct iwl_cfg iwl5300_agn_cfg;
54extern struct iwl_cfg iwl5100_agn_cfg; 53extern struct iwl_cfg iwl5100_agn_cfg;
55extern struct iwl_cfg iwl5350_agn_cfg; 54extern struct iwl_cfg iwl5350_agn_cfg;
56extern struct iwl_cfg iwl5100_bg_cfg; 55extern struct iwl_cfg iwl5100_bgn_cfg;
57extern struct iwl_cfg iwl5100_abg_cfg; 56extern struct iwl_cfg iwl5100_abg_cfg;
58extern struct iwl_cfg iwl5150_agn_cfg; 57extern struct iwl_cfg iwl5150_agn_cfg;
59extern struct iwl_cfg iwl6000h_2agn_cfg; 58extern struct iwl_cfg iwl5150_abg_cfg;
60extern struct iwl_cfg iwl6000i_2agn_cfg; 59extern struct iwl_cfg iwl6000i_2agn_cfg;
60extern struct iwl_cfg iwl6000i_2abg_cfg;
61extern struct iwl_cfg iwl6000i_2bg_cfg;
61extern struct iwl_cfg iwl6000_3agn_cfg; 62extern struct iwl_cfg iwl6000_3agn_cfg;
62extern struct iwl_cfg iwl6050_2agn_cfg; 63extern struct iwl_cfg iwl6050_2agn_cfg;
63extern struct iwl_cfg iwl6050_3agn_cfg; 64extern struct iwl_cfg iwl6050_2abg_cfg;
64extern struct iwl_cfg iwl1000_bgn_cfg; 65extern struct iwl_cfg iwl1000_bgn_cfg;
66extern struct iwl_cfg iwl1000_bg_cfg;
65 67
66struct iwl_tx_queue; 68struct iwl_tx_queue;
67 69
68/* shared structures from iwl-5000.c */ 70/* shared structures from iwl-5000.c */
69extern struct iwl_mod_params iwl50_mod_params; 71extern struct iwl_mod_params iwl50_mod_params;
70extern struct iwl_ops iwl5000_ops;
71extern struct iwl_ucode_ops iwl5000_ucode; 72extern struct iwl_ucode_ops iwl5000_ucode;
72extern struct iwl_lib_ops iwl5000_lib; 73extern struct iwl_lib_ops iwl5000_lib;
73extern struct iwl_hcmd_ops iwl5000_hcmd; 74extern struct iwl_hcmd_ops iwl5000_hcmd;
@@ -81,9 +82,6 @@ extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
81 __le32 *tx_flags); 82 __le32 *tx_flags);
82extern int iwl5000_calc_rssi(struct iwl_priv *priv, 83extern int iwl5000_calc_rssi(struct iwl_priv *priv,
83 struct iwl_rx_phy_res *rx_resp); 84 struct iwl_rx_phy_res *rx_resp);
84extern int iwl5000_apm_init(struct iwl_priv *priv);
85extern void iwl5000_apm_stop(struct iwl_priv *priv);
86extern int iwl5000_apm_reset(struct iwl_priv *priv);
87extern void iwl5000_nic_config(struct iwl_priv *priv); 85extern void iwl5000_nic_config(struct iwl_priv *priv);
88extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv); 86extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
89extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 87extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -144,12 +142,13 @@ extern void iwl5000_temperature(struct iwl_priv *priv);
144#define DEFAULT_LONG_RETRY_LIMIT 4U 142#define DEFAULT_LONG_RETRY_LIMIT 4U
145 143
146struct iwl_rx_mem_buffer { 144struct iwl_rx_mem_buffer {
147 dma_addr_t real_dma_addr; 145 dma_addr_t page_dma;
148 dma_addr_t aligned_dma_addr; 146 struct page *page;
149 struct sk_buff *skb;
150 struct list_head list; 147 struct list_head list;
151}; 148};
152 149
150#define rxb_addr(r) page_address(r->page)
151
153/* defined below */ 152/* defined below */
154struct iwl_device_cmd; 153struct iwl_device_cmd;
155 154
@@ -165,7 +164,7 @@ struct iwl_cmd_meta {
165 */ 164 */
166 void (*callback)(struct iwl_priv *priv, 165 void (*callback)(struct iwl_priv *priv,
167 struct iwl_device_cmd *cmd, 166 struct iwl_device_cmd *cmd,
168 struct sk_buff *skb); 167 struct iwl_rx_packet *pkt);
169 168
170 /* The CMD_SIZE_HUGE flag bit indicates that the command 169 /* The CMD_SIZE_HUGE flag bit indicates that the command
171 * structure is stored at the end of the shared queue memory. */ 170 * structure is stored at the end of the shared queue memory. */
@@ -293,9 +292,6 @@ struct iwl_channel_info {
293 292
294 /* HT40 channel info */ 293 /* HT40 channel info */
295 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */ 294 s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
296 s8 ht40_curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) */
297 s8 ht40_min_power; /* always 0 */
298 s8 ht40_scan_power; /* (dBm) eeprom, direct scans, any rate */
299 u8 ht40_flags; /* flags copied from EEPROM */ 295 u8 ht40_flags; /* flags copied from EEPROM */
300 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */ 296 u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
301 297
@@ -321,6 +317,13 @@ struct iwl_channel_info {
321 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ 317 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
322#define IWL_MIN_NUM_QUEUES 10 318#define IWL_MIN_NUM_QUEUES 10
323 319
320/*
321 * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00,
322 * the driver maps it into the appropriate device FIFO for the
323 * uCode.
324 */
325#define IWL_CMD_QUEUE_NUM 4
326
324/* Power management (not Tx power) structures */ 327/* Power management (not Tx power) structures */
325 328
326enum iwl_pwr_src { 329enum iwl_pwr_src {
@@ -356,7 +359,14 @@ enum {
356 CMD_WANT_SKB = (1 << 2), 359 CMD_WANT_SKB = (1 << 2),
357}; 360};
358 361
359#define IWL_CMD_MAX_PAYLOAD 320 362#define DEF_CMD_PAYLOAD_SIZE 320
363
364/*
365 * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
366 * SNAP header and alignment. It should also be big enough for 802.11
367 * control frames.
368 */
369#define IWL_LINK_HDR_MAX 64
360 370
361/** 371/**
362 * struct iwl_device_cmd 372 * struct iwl_device_cmd
@@ -373,7 +383,8 @@ struct iwl_device_cmd {
373 u16 val16; 383 u16 val16;
374 u32 val32; 384 u32 val32;
375 struct iwl_tx_cmd tx; 385 struct iwl_tx_cmd tx;
376 u8 payload[IWL_CMD_MAX_PAYLOAD]; 386 struct iwl6000_channel_switch_cmd chswitch;
387 u8 payload[DEF_CMD_PAYLOAD_SIZE];
377 } __attribute__ ((packed)) cmd; 388 } __attribute__ ((packed)) cmd;
378} __attribute__ ((packed)); 389} __attribute__ ((packed));
379 390
@@ -382,21 +393,15 @@ struct iwl_device_cmd {
382 393
383struct iwl_host_cmd { 394struct iwl_host_cmd {
384 const void *data; 395 const void *data;
385 struct sk_buff *reply_skb; 396 unsigned long reply_page;
386 void (*callback)(struct iwl_priv *priv, 397 void (*callback)(struct iwl_priv *priv,
387 struct iwl_device_cmd *cmd, 398 struct iwl_device_cmd *cmd,
388 struct sk_buff *skb); 399 struct iwl_rx_packet *pkt);
389 u32 flags; 400 u32 flags;
390 u16 len; 401 u16 len;
391 u8 id; 402 u8 id;
392}; 403};
393 404
394/*
395 * RX related structures and functions
396 */
397#define RX_FREE_BUFFERS 64
398#define RX_LOW_WATERMARK 8
399
400#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 405#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
401#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 406#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
402#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 407#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
@@ -502,12 +507,11 @@ union iwl_ht_rate_supp {
502#define CFG_HT_MPDU_DENSITY_4USEC (0x5) 507#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
503#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC 508#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
504 509
505struct iwl_ht_info { 510struct iwl_ht_config {
506 /* self configuration data */ 511 /* self configuration data */
507 u8 is_ht; 512 bool is_ht;
508 u8 supported_chan_width; 513 bool is_40mhz;
509 u8 sm_ps; 514 bool single_chain_sufficient;
510 struct ieee80211_mcs_info mcs;
511 /* BSS related data */ 515 /* BSS related data */
512 u8 extension_chan_offset; 516 u8 extension_chan_offset;
513 u8 ht_protection; 517 u8 ht_protection;
@@ -541,26 +545,27 @@ struct iwl_qos_info {
541 struct iwl_qosparam_cmd def_qos_parm; 545 struct iwl_qosparam_cmd def_qos_parm;
542}; 546};
543 547
544#define STA_PS_STATUS_WAKE 0
545#define STA_PS_STATUS_SLEEP 1
546
547
548struct iwl3945_station_entry {
549 struct iwl3945_addsta_cmd sta;
550 struct iwl_tid_data tid[MAX_TID_COUNT];
551 u8 used;
552 u8 ps_status;
553 struct iwl_hw_key keyinfo;
554};
555
556struct iwl_station_entry { 548struct iwl_station_entry {
557 struct iwl_addsta_cmd sta; 549 struct iwl_addsta_cmd sta;
558 struct iwl_tid_data tid[MAX_TID_COUNT]; 550 struct iwl_tid_data tid[MAX_TID_COUNT];
559 u8 used; 551 u8 used;
560 u8 ps_status;
561 struct iwl_hw_key keyinfo; 552 struct iwl_hw_key keyinfo;
562}; 553};
563 554
555/*
556 * iwl_station_priv: Driver's private station information
557 *
558 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
559 * in the structure for use by driver. This structure is places in that
560 * space.
561 */
562struct iwl_station_priv {
563 struct iwl_lq_sta lq_sta;
564 atomic_t pending_frames;
565 bool client;
566 bool asleep;
567};
568
564/* one for each uCode image (inst/data, boot/init/runtime) */ 569/* one for each uCode image (inst/data, boot/init/runtime) */
565struct fw_desc { 570struct fw_desc {
566 void *v_addr; /* access by driver */ 571 void *v_addr; /* access by driver */
@@ -622,6 +627,10 @@ struct iwl_sensitivity_ranges {
622 u16 auto_corr_max_cck_mrc; 627 u16 auto_corr_max_cck_mrc;
623 u16 auto_corr_min_cck; 628 u16 auto_corr_min_cck;
624 u16 auto_corr_min_cck_mrc; 629 u16 auto_corr_min_cck_mrc;
630
631 u16 barker_corr_th_min;
632 u16 barker_corr_th_min_mrc;
633 u16 nrg_th_cca;
625}; 634};
626 635
627 636
@@ -639,7 +648,7 @@ struct iwl_sensitivity_ranges {
639 * @valid_tx/rx_ant: usable antennas 648 * @valid_tx/rx_ant: usable antennas
640 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 649 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
641 * @max_rxq_log: Log-base-2 of max_rxq_size 650 * @max_rxq_log: Log-base-2 of max_rxq_size
642 * @rx_buf_size: Rx buffer size 651 * @rx_page_order: Rx buffer page order
643 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR 652 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
644 * @max_stations: 653 * @max_stations:
645 * @bcast_sta_id: 654 * @bcast_sta_id:
@@ -662,9 +671,8 @@ struct iwl_hw_params {
662 u8 valid_rx_ant; 671 u8 valid_rx_ant;
663 u16 max_rxq_size; 672 u16 max_rxq_size;
664 u16 max_rxq_log; 673 u16 max_rxq_log;
665 u32 rx_buf_size; 674 u32 rx_page_order;
666 u32 rx_wrt_ptr_reg; 675 u32 rx_wrt_ptr_reg;
667 u32 max_pkt_size;
668 u8 max_stations; 676 u8 max_stations;
669 u8 bcast_sta_id; 677 u8 bcast_sta_id;
670 u8 ht40_channel; 678 u8 ht40_channel;
@@ -711,7 +719,11 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i)
711 719
712static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) 720static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
713{ 721{
714 /* This is for scan command, the big buffer at end of command array */ 722 /*
723 * This is for init calibration result and scan command which
724 * required buffer > TFD_MAX_PAYLOAD_SIZE,
725 * the big buffer at end of command array
726 */
715 if (is_huge) 727 if (is_huge)
716 return q->n_window; /* must be power of 2 */ 728 return q->n_window; /* must be power of 2 */
717 729
@@ -726,9 +738,6 @@ struct iwl_dma_ptr {
726 size_t size; 738 size_t size;
727}; 739};
728 740
729#define IWL_CHANNEL_WIDTH_20MHZ 0
730#define IWL_CHANNEL_WIDTH_40MHZ 1
731
732#define IWL_OPERATION_MODE_AUTO 0 741#define IWL_OPERATION_MODE_AUTO 0
733#define IWL_OPERATION_MODE_HT_ONLY 1 742#define IWL_OPERATION_MODE_HT_ONLY 1
734#define IWL_OPERATION_MODE_MIXED 2 743#define IWL_OPERATION_MODE_MIXED 2
@@ -741,7 +750,8 @@ struct iwl_dma_ptr {
741 750
742/* Sensitivity and chain noise calibration */ 751/* Sensitivity and chain noise calibration */
743#define INITIALIZATION_VALUE 0xFFFF 752#define INITIALIZATION_VALUE 0xFFFF
744#define CAL_NUM_OF_BEACONS 20 753#define IWL4965_CAL_NUM_BEACONS 20
754#define IWL_CAL_NUM_BEACONS 16
745#define MAXIMUM_ALLOWED_PATHLOSS 15 755#define MAXIMUM_ALLOWED_PATHLOSS 15
746 756
747#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 757#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
@@ -845,6 +855,10 @@ struct iwl_sensitivity_data {
845 s32 nrg_auto_corr_silence_diff; 855 s32 nrg_auto_corr_silence_diff;
846 u32 num_in_cck_no_fa; 856 u32 num_in_cck_no_fa;
847 u32 nrg_th_ofdm; 857 u32 nrg_th_ofdm;
858
859 u16 barker_corr_th_min;
860 u16 barker_corr_th_min_mrc;
861 u16 nrg_th_cca;
848}; 862};
849 863
850/* Chain noise (differential Rx gain) calib data */ 864/* Chain noise (differential Rx gain) calib data */
@@ -894,13 +908,11 @@ enum iwl_access_mode {
894/** 908/**
895 * enum iwl_pa_type - Power Amplifier type 909 * enum iwl_pa_type - Power Amplifier type
896 * @IWL_PA_SYSTEM: based on uCode configuration 910 * @IWL_PA_SYSTEM: based on uCode configuration
897 * @IWL_PA_HYBRID: use both Internal and external PA
898 * @IWL_PA_INTERNAL: use Internal only 911 * @IWL_PA_INTERNAL: use Internal only
899 */ 912 */
900enum iwl_pa_type { 913enum iwl_pa_type {
901 IWL_PA_SYSTEM = 0, 914 IWL_PA_SYSTEM = 0,
902 IWL_PA_HYBRID = 1, 915 IWL_PA_INTERNAL = 1,
903 IWL_PA_INTERNAL = 2,
904}; 916};
905 917
906/* interrupt statistics */ 918/* interrupt statistics */
@@ -961,7 +973,16 @@ struct traffic_stats {
961}; 973};
962#endif 974#endif
963 975
964#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */ 976/*
977 * iwl_switch_rxon: "channel switch" structure
978 *
979 * @ switch_in_progress: channel switch in progress
980 * @ channel: new channel
981 */
982struct iwl_switch_rxon {
983 bool switch_in_progress;
984 __le16 channel;
985};
965 986
966struct iwl_priv { 987struct iwl_priv {
967 988
@@ -976,7 +997,7 @@ struct iwl_priv {
976 int frames_count; 997 int frames_count;
977 998
978 enum ieee80211_band band; 999 enum ieee80211_band band;
979 int alloc_rxb_skb; 1000 int alloc_rxb_page;
980 1001
981 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 1002 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
982 struct iwl_rx_mem_buffer *rxb); 1003 struct iwl_rx_mem_buffer *rxb);
@@ -1056,21 +1077,18 @@ struct iwl_priv {
1056 const struct iwl_rxon_cmd active_rxon; 1077 const struct iwl_rxon_cmd active_rxon;
1057 struct iwl_rxon_cmd staging_rxon; 1078 struct iwl_rxon_cmd staging_rxon;
1058 1079
1059 struct iwl_rxon_cmd recovery_rxon; 1080 struct iwl_switch_rxon switch_rxon;
1060 1081
1061 /* 1st responses from initialize and runtime uCode images. 1082 /* 1st responses from initialize and runtime uCode images.
1062 * 4965's initialize alive response contains some calibration data. */ 1083 * 4965's initialize alive response contains some calibration data. */
1063 struct iwl_init_alive_resp card_alive_init; 1084 struct iwl_init_alive_resp card_alive_init;
1064 struct iwl_alive_resp card_alive; 1085 struct iwl_alive_resp card_alive;
1065 1086
1066#ifdef CONFIG_IWLWIFI_LEDS
1067 unsigned long last_blink_time; 1087 unsigned long last_blink_time;
1068 u8 last_blink_rate; 1088 u8 last_blink_rate;
1069 u8 allow_blinking; 1089 u8 allow_blinking;
1070 u64 led_tpt; 1090 u64 led_tpt;
1071 struct iwl_led led[IWL_LED_TRG_MAX]; 1091
1072 unsigned int rxtxpackets;
1073#endif
1074 u16 active_rate; 1092 u16 active_rate;
1075 u16 active_rate_basic; 1093 u16 active_rate_basic;
1076 1094
@@ -1080,11 +1098,10 @@ struct iwl_priv {
1080 struct iwl_chain_noise_data chain_noise_data; 1098 struct iwl_chain_noise_data chain_noise_data;
1081 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1099 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1082 1100
1083 struct iwl_ht_info current_ht_config; 1101 struct iwl_ht_config current_ht_config;
1084 u8 last_phy_res[100]; 1102 u8 last_phy_res[100];
1085 1103
1086 /* Rate scaling data */ 1104 /* Rate scaling data */
1087 s8 data_retry_limit;
1088 u8 retry_rate; 1105 u8 retry_rate;
1089 1106
1090 wait_queue_head_t wait_command_queue; 1107 wait_queue_head_t wait_command_queue;
@@ -1093,7 +1110,7 @@ struct iwl_priv {
1093 1110
1094 /* Rx and Tx DMA processing queues */ 1111 /* Rx and Tx DMA processing queues */
1095 struct iwl_rx_queue rxq; 1112 struct iwl_rx_queue rxq;
1096 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES]; 1113 struct iwl_tx_queue *txq;
1097 unsigned long txq_ctx_active_msk; 1114 unsigned long txq_ctx_active_msk;
1098 struct iwl_dma_ptr kw; /* keep warm address */ 1115 struct iwl_dma_ptr kw; /* keep warm address */
1099 struct iwl_dma_ptr scd_bc_tbls; 1116 struct iwl_dma_ptr scd_bc_tbls;
@@ -1116,7 +1133,9 @@ struct iwl_priv {
1116 struct iwl_tt_mgmt thermal_throttle; 1133 struct iwl_tt_mgmt thermal_throttle;
1117 1134
1118 struct iwl_notif_statistics statistics; 1135 struct iwl_notif_statistics statistics;
1119 unsigned long last_statistics_time; 1136#ifdef CONFIG_IWLWIFI_DEBUG
1137 struct iwl_notif_statistics accum_statistics;
1138#endif
1120 1139
1121 /* context information */ 1140 /* context information */
1122 u16 rates_mask; 1141 u16 rates_mask;
@@ -1216,6 +1235,7 @@ struct iwl_priv {
1216 /* TX Power */ 1235 /* TX Power */
1217 s8 tx_power_user_lmt; 1236 s8 tx_power_user_lmt;
1218 s8 tx_power_device_lmt; 1237 s8 tx_power_device_lmt;
1238 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
1219 1239
1220 1240
1221#ifdef CONFIG_IWLWIFI_DEBUG 1241#ifdef CONFIG_IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
new file mode 100644
index 000000000000..e7d88d1da15d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -0,0 +1,14 @@
1#include <linux/module.h>
2
3/* sparse doesn't like tracepoint macros */
4#ifndef __CHECKER__
5#define CREATE_TRACE_POINTS
6#include "iwl-devtrace.h"
7
8EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
9EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
10EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
11EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
12EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
13EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
14#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
new file mode 100644
index 000000000000..21361968ab7e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -0,0 +1,197 @@
1#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWLWIFI_DEVICE_TRACE
3
4#include <linux/tracepoint.h>
5#include "iwl-dev.h"
6
7#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
8#undef TRACE_EVENT
9#define TRACE_EVENT(name, proto, ...) \
10static inline void trace_ ## name(proto) {}
11#endif
12
13#define PRIV_ENTRY __field(struct iwl_priv *, priv)
14#define PRIV_ASSIGN __entry->priv = priv
15
16#undef TRACE_SYSTEM
17#define TRACE_SYSTEM iwlwifi_io
18
19TRACE_EVENT(iwlwifi_dev_ioread32,
20 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
21 TP_ARGS(priv, offs, val),
22 TP_STRUCT__entry(
23 PRIV_ENTRY
24 __field(u32, offs)
25 __field(u32, val)
26 ),
27 TP_fast_assign(
28 PRIV_ASSIGN;
29 __entry->offs = offs;
30 __entry->val = val;
31 ),
32 TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val)
33);
34
35TRACE_EVENT(iwlwifi_dev_iowrite8,
36 TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
37 TP_ARGS(priv, offs, val),
38 TP_STRUCT__entry(
39 PRIV_ENTRY
40 __field(u32, offs)
41 __field(u8, val)
42 ),
43 TP_fast_assign(
44 PRIV_ASSIGN;
45 __entry->offs = offs;
46 __entry->val = val;
47 ),
48 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
49);
50
51TRACE_EVENT(iwlwifi_dev_iowrite32,
52 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
53 TP_ARGS(priv, offs, val),
54 TP_STRUCT__entry(
55 PRIV_ENTRY
56 __field(u32, offs)
57 __field(u32, val)
58 ),
59 TP_fast_assign(
60 PRIV_ASSIGN;
61 __entry->offs = offs;
62 __entry->val = val;
63 ),
64 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
65);
66
67#undef TRACE_SYSTEM
68#define TRACE_SYSTEM iwlwifi
69
70TRACE_EVENT(iwlwifi_dev_hcmd,
71 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
72 TP_ARGS(priv, hcmd, len, flags),
73 TP_STRUCT__entry(
74 PRIV_ENTRY
75 __dynamic_array(u8, hcmd, len)
76 __field(u32, flags)
77 ),
78 TP_fast_assign(
79 PRIV_ASSIGN;
80 memcpy(__get_dynamic_array(hcmd), hcmd, len);
81 __entry->flags = flags;
82 ),
83 TP_printk("[%p] hcmd %#.2x (%ssync)",
84 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
85 __entry->flags & CMD_ASYNC ? "a" : "")
86);
87
88TRACE_EVENT(iwlwifi_dev_rx,
89 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
90 TP_ARGS(priv, rxbuf, len),
91 TP_STRUCT__entry(
92 PRIV_ENTRY
93 __dynamic_array(u8, rxbuf, len)
94 ),
95 TP_fast_assign(
96 PRIV_ASSIGN;
97 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
98 ),
99 TP_printk("[%p] RX cmd %#.2x",
100 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
101);
102
103TRACE_EVENT(iwlwifi_dev_tx,
104 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
105 void *buf0, size_t buf0_len,
106 void *buf1, size_t buf1_len),
107 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
108 TP_STRUCT__entry(
109 PRIV_ENTRY
110
111 __field(size_t, framelen)
112 __dynamic_array(u8, tfd, tfdlen)
113
114 /*
115 * Do not insert between or below these items,
116 * we want to keep the frame together (except
117 * for the possible padding).
118 */
119 __dynamic_array(u8, buf0, buf0_len)
120 __dynamic_array(u8, buf1, buf1_len)
121 ),
122 TP_fast_assign(
123 PRIV_ASSIGN;
124 __entry->framelen = buf0_len + buf1_len;
125 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
126 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
127 memcpy(__get_dynamic_array(buf1), buf1, buf0_len);
128 ),
129 TP_printk("[%p] TX %.2x (%zu bytes)",
130 __entry->priv,
131 ((u8 *)__get_dynamic_array(buf0))[0],
132 __entry->framelen)
133);
134
135TRACE_EVENT(iwlwifi_dev_ucode_error,
136 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
137 u32 data1, u32 data2, u32 line, u32 blink1,
138 u32 blink2, u32 ilink1, u32 ilink2),
139 TP_ARGS(priv, desc, time, data1, data2, line,
140 blink1, blink2, ilink1, ilink2),
141 TP_STRUCT__entry(
142 PRIV_ENTRY
143 __field(u32, desc)
144 __field(u32, time)
145 __field(u32, data1)
146 __field(u32, data2)
147 __field(u32, line)
148 __field(u32, blink1)
149 __field(u32, blink2)
150 __field(u32, ilink1)
151 __field(u32, ilink2)
152 ),
153 TP_fast_assign(
154 PRIV_ASSIGN;
155 __entry->desc = desc;
156 __entry->time = time;
157 __entry->data1 = data1;
158 __entry->data2 = data2;
159 __entry->line = line;
160 __entry->blink1 = blink1;
161 __entry->blink2 = blink2;
162 __entry->ilink1 = ilink1;
163 __entry->ilink2 = ilink2;
164 ),
165 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
166 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
167 __entry->priv, __entry->desc, __entry->time, __entry->data1,
168 __entry->data2, __entry->line, __entry->blink1,
169 __entry->blink2, __entry->ilink1, __entry->ilink2)
170);
171
172TRACE_EVENT(iwlwifi_dev_ucode_event,
173 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
174 TP_ARGS(priv, time, data, ev),
175 TP_STRUCT__entry(
176 PRIV_ENTRY
177
178 __field(u32, time)
179 __field(u32, data)
180 __field(u32, ev)
181 ),
182 TP_fast_assign(
183 PRIV_ASSIGN;
184 __entry->time = time;
185 __entry->data = data;
186 __entry->ev = ev;
187 ),
188 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
189 __entry->priv, __entry->time, __entry->data, __entry->ev)
190);
191#endif /* __IWLWIFI_DEVICE_TRACE */
192
193#undef TRACE_INCLUDE_PATH
194#define TRACE_INCLUDE_PATH .
195#undef TRACE_INCLUDE_FILE
196#define TRACE_INCLUDE_FILE iwl-devtrace
197#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index e14c9952a935..3946e5c03f81 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -215,12 +215,35 @@ static const struct iwl_txpwr_section enhinfo[] = {
215 215
216int iwlcore_eeprom_verify_signature(struct iwl_priv *priv) 216int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
217{ 217{
218 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 218 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
219 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 219 int ret = 0;
220 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); 220
221 return -ENOENT; 221 IWL_DEBUG_INFO(priv, "EEPROM signature=0x%08x\n", gp);
222 switch (gp) {
223 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
224 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
225 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
226 gp);
227 ret = -ENOENT;
228 }
229 break;
230 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
231 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
232 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
233 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
234 ret = -ENOENT;
235 }
236 break;
237 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
238 default:
239 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
240 "EEPROM_GP=0x%08x\n",
241 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
242 ? "OTP" : "EEPROM", gp);
243 ret = -ENOENT;
244 break;
222 } 245 }
223 return 0; 246 return ret;
224} 247}
225EXPORT_SYMBOL(iwlcore_eeprom_verify_signature); 248EXPORT_SYMBOL(iwlcore_eeprom_verify_signature);
226 249
@@ -283,7 +306,8 @@ int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
283 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); 306 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
284 307
285 /* See if we got it */ 308 /* See if we got it */
286 ret = iwl_poll_direct_bit(priv, CSR_HW_IF_CONFIG_REG, 309 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
310 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
287 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 311 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
288 EEPROM_SEM_TIMEOUT); 312 EEPROM_SEM_TIMEOUT);
289 if (ret >= 0) { 313 if (ret >= 0) {
@@ -322,7 +346,8 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
322 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 346 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
323 347
324 /* wait for clock to be ready */ 348 /* wait for clock to be ready */
325 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 349 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
350 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
326 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 351 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
327 25000); 352 25000);
328 if (ret < 0) 353 if (ret < 0)
@@ -333,6 +358,14 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
333 udelay(5); 358 udelay(5);
334 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, 359 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
335 APMG_PS_CTRL_VAL_RESET_REQ); 360 APMG_PS_CTRL_VAL_RESET_REQ);
361
362 /*
363 * CSR auto clock gate disable bit -
364 * this is only applicable for HW with OTP shadow RAM
365 */
366 if (priv->cfg->shadow_ram_support)
367 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
368 CSR_RESET_LINK_PWR_MGMT_DISABLED);
336 } 369 }
337 return ret; 370 return ret;
338} 371}
@@ -345,7 +378,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
345 378
346 _iwl_write32(priv, CSR_EEPROM_REG, 379 _iwl_write32(priv, CSR_EEPROM_REG,
347 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 380 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
348 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG, 381 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
382 CSR_EEPROM_REG_READ_VALID_MSK,
349 CSR_EEPROM_REG_READ_VALID_MSK, 383 CSR_EEPROM_REG_READ_VALID_MSK,
350 IWL_EEPROM_ACCESS_TIMEOUT); 384 IWL_EEPROM_ACCESS_TIMEOUT);
351 if (ret < 0) { 385 if (ret < 0) {
@@ -484,6 +518,11 @@ int iwl_eeprom_init(struct iwl_priv *priv)
484 } 518 }
485 e = (u16 *)priv->eeprom; 519 e = (u16 *)priv->eeprom;
486 520
521 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
522 /* OTP reads require powered-up chip */
523 priv->cfg->ops->lib->apm_ops.init(priv);
524 }
525
487 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv); 526 ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
488 if (ret < 0) { 527 if (ret < 0) {
489 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); 528 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
@@ -498,7 +537,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
498 ret = -ENOENT; 537 ret = -ENOENT;
499 goto err; 538 goto err;
500 } 539 }
540
501 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { 541 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
542
502 ret = iwl_init_otp_access(priv); 543 ret = iwl_init_otp_access(priv);
503 if (ret) { 544 if (ret) {
504 IWL_ERR(priv, "Failed to initialize OTP access.\n"); 545 IWL_ERR(priv, "Failed to initialize OTP access.\n");
@@ -529,6 +570,13 @@ int iwl_eeprom_init(struct iwl_priv *priv)
529 e[cache_addr / 2] = eeprom_data; 570 e[cache_addr / 2] = eeprom_data;
530 cache_addr += sizeof(u16); 571 cache_addr += sizeof(u16);
531 } 572 }
573
574 /*
575 * Now that OTP reads are complete, reset chip to save
576 * power until we load uCode during "up".
577 */
578 priv->cfg->ops->lib->apm_ops.stop(priv);
579
532 } else { 580 } else {
533 /* eeprom is an array of 16bit values */ 581 /* eeprom is an array of 16bit values */
534 for (addr = 0; addr < sz; addr += sizeof(u16)) { 582 for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -537,7 +585,8 @@ int iwl_eeprom_init(struct iwl_priv *priv)
537 _iwl_write32(priv, CSR_EEPROM_REG, 585 _iwl_write32(priv, CSR_EEPROM_REG,
538 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 586 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
539 587
540 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG, 588 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
589 CSR_EEPROM_REG_READ_VALID_MSK,
541 CSR_EEPROM_REG_READ_VALID_MSK, 590 CSR_EEPROM_REG_READ_VALID_MSK,
542 IWL_EEPROM_ACCESS_TIMEOUT); 591 IWL_EEPROM_ACCESS_TIMEOUT);
543 if (ret < 0) { 592 if (ret < 0) {
@@ -705,9 +754,6 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
705 754
706 ch_info->ht40_eeprom = *eeprom_ch; 755 ch_info->ht40_eeprom = *eeprom_ch;
707 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg; 756 ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
708 ch_info->ht40_curr_txpow = eeprom_ch->max_power_avg;
709 ch_info->ht40_min_power = 0;
710 ch_info->ht40_scan_power = eeprom_ch->max_power_avg;
711 ch_info->ht40_flags = eeprom_ch->flags; 757 ch_info->ht40_flags = eeprom_ch->flags;
712 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel; 758 ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
713 759
@@ -719,7 +765,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
719 * find the highest tx power from all chains for the channel 765 * find the highest tx power from all chains for the channel
720 */ 766 */
721static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv, 767static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
722 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, int element) 768 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
769 int element, s8 *max_txpower_in_half_dbm)
723{ 770{
724 s8 max_txpower_avg = 0; /* (dBm) */ 771 s8 max_txpower_avg = 0; /* (dBm) */
725 772
@@ -751,10 +798,14 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
751 (enhanced_txpower[element].mimo3_max > max_txpower_avg)) 798 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
752 max_txpower_avg = enhanced_txpower[element].mimo3_max; 799 max_txpower_avg = enhanced_txpower[element].mimo3_max;
753 800
754 /* max. tx power in EEPROM is in 1/2 dBm format 801 /*
755 * convert from 1/2 dBm to dBm 802 * max. tx power in EEPROM is in 1/2 dBm format
803 * convert from 1/2 dBm to dBm (round-up convert)
804 * but we also do not want to loss 1/2 dBm resolution which
805 * will impact performance
756 */ 806 */
757 return max_txpower_avg >> 1; 807 *max_txpower_in_half_dbm = max_txpower_avg;
808 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
758} 809}
759 810
760/** 811/**
@@ -763,7 +814,7 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
763 */ 814 */
764static s8 iwl_update_common_txpower(struct iwl_priv *priv, 815static s8 iwl_update_common_txpower(struct iwl_priv *priv,
765 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 816 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
766 int section, int element) 817 int section, int element, s8 *max_txpower_in_half_dbm)
767{ 818{
768 struct iwl_channel_info *ch_info; 819 struct iwl_channel_info *ch_info;
769 int ch; 820 int ch;
@@ -777,25 +828,25 @@ static s8 iwl_update_common_txpower(struct iwl_priv *priv,
777 if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX) 828 if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX)
778 is_ht40 = true; 829 is_ht40 = true;
779 max_txpower_avg = 830 max_txpower_avg =
780 iwl_get_max_txpower_avg(priv, enhanced_txpower, element); 831 iwl_get_max_txpower_avg(priv, enhanced_txpower,
832 element, max_txpower_in_half_dbm);
833
781 ch_info = priv->channel_info; 834 ch_info = priv->channel_info;
782 835
783 for (ch = 0; ch < priv->channel_count; ch++) { 836 for (ch = 0; ch < priv->channel_count; ch++) {
784 /* find matching band and update tx power if needed */ 837 /* find matching band and update tx power if needed */
785 if ((ch_info->band == enhinfo[section].band) && 838 if ((ch_info->band == enhinfo[section].band) &&
786 (ch_info->max_power_avg < max_txpower_avg) && (!is_ht40)) { 839 (ch_info->max_power_avg < max_txpower_avg) &&
840 (!is_ht40)) {
787 /* Update regulatory-based run-time data */ 841 /* Update regulatory-based run-time data */
788 ch_info->max_power_avg = ch_info->curr_txpow = 842 ch_info->max_power_avg = ch_info->curr_txpow =
789 max_txpower_avg; 843 max_txpower_avg;
790 ch_info->scan_power = max_txpower_avg; 844 ch_info->scan_power = max_txpower_avg;
791 } 845 }
792 if ((ch_info->band == enhinfo[section].band) && is_ht40 && 846 if ((ch_info->band == enhinfo[section].band) && is_ht40 &&
793 ch_info->ht40_max_power_avg &&
794 (ch_info->ht40_max_power_avg < max_txpower_avg)) { 847 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
795 /* Update regulatory-based run-time data */ 848 /* Update regulatory-based run-time data */
796 ch_info->ht40_max_power_avg = max_txpower_avg; 849 ch_info->ht40_max_power_avg = max_txpower_avg;
797 ch_info->ht40_curr_txpow = max_txpower_avg;
798 ch_info->ht40_scan_power = max_txpower_avg;
799 } 850 }
800 ch_info++; 851 ch_info++;
801 } 852 }
@@ -808,7 +859,7 @@ static s8 iwl_update_common_txpower(struct iwl_priv *priv,
808 */ 859 */
809static s8 iwl_update_channel_txpower(struct iwl_priv *priv, 860static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
810 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 861 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
811 int section, int element) 862 int section, int element, s8 *max_txpower_in_half_dbm)
812{ 863{
813 struct iwl_channel_info *ch_info; 864 struct iwl_channel_info *ch_info;
814 int ch; 865 int ch;
@@ -817,7 +868,8 @@ static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
817 868
818 channel = enhinfo[section].iwl_eeprom_section_channel[element]; 869 channel = enhinfo[section].iwl_eeprom_section_channel[element];
819 max_txpower_avg = 870 max_txpower_avg =
820 iwl_get_max_txpower_avg(priv, enhanced_txpower, element); 871 iwl_get_max_txpower_avg(priv, enhanced_txpower,
872 element, max_txpower_in_half_dbm);
821 873
822 ch_info = priv->channel_info; 874 ch_info = priv->channel_info;
823 for (ch = 0; ch < priv->channel_count; ch++) { 875 for (ch = 0; ch < priv->channel_count; ch++) {
@@ -831,12 +883,9 @@ static s8 iwl_update_channel_txpower(struct iwl_priv *priv,
831 ch_info->scan_power = max_txpower_avg; 883 ch_info->scan_power = max_txpower_avg;
832 } 884 }
833 if ((enhinfo[section].is_ht40) && 885 if ((enhinfo[section].is_ht40) &&
834 (ch_info->ht40_max_power_avg) &&
835 (ch_info->ht40_max_power_avg < max_txpower_avg)) { 886 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
836 /* Update regulatory-based run-time data */ 887 /* Update regulatory-based run-time data */
837 ch_info->ht40_max_power_avg = max_txpower_avg; 888 ch_info->ht40_max_power_avg = max_txpower_avg;
838 ch_info->ht40_curr_txpow = max_txpower_avg;
839 ch_info->ht40_scan_power = max_txpower_avg;
840 } 889 }
841 break; 890 break;
842 } 891 }
@@ -855,6 +904,7 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
855 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower; 904 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower;
856 u32 offset; 905 u32 offset;
857 s8 max_txpower_avg; /* (dBm) */ 906 s8 max_txpower_avg; /* (dBm) */
907 s8 max_txpower_in_half_dbm; /* (half-dBm) */
858 908
859 /* Loop through all the sections 909 /* Loop through all the sections
860 * adjust bands and channel's max tx power 910 * adjust bands and channel's max tx power
@@ -867,20 +917,43 @@ void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
867 enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *) 917 enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *)
868 iwl_eeprom_query_addr(priv, offset); 918 iwl_eeprom_query_addr(priv, offset);
869 919
920 /*
921 * check for valid entry -
922 * different version of EEPROM might contain different set
923 * of enhanced tx power table
924 * always check for valid entry before process
925 * the information
926 */
927 if (!enhanced_txpower->common || enhanced_txpower->reserved)
928 continue;
929
870 for (element = 0; element < eeprom_section_count; element++) { 930 for (element = 0; element < eeprom_section_count; element++) {
871 if (enhinfo[section].is_common) 931 if (enhinfo[section].is_common)
872 max_txpower_avg = 932 max_txpower_avg =
873 iwl_update_common_txpower(priv, 933 iwl_update_common_txpower(priv,
874 enhanced_txpower, section, element); 934 enhanced_txpower, section,
935 element,
936 &max_txpower_in_half_dbm);
875 else 937 else
876 max_txpower_avg = 938 max_txpower_avg =
877 iwl_update_channel_txpower(priv, 939 iwl_update_channel_txpower(priv,
878 enhanced_txpower, section, element); 940 enhanced_txpower, section,
941 element,
942 &max_txpower_in_half_dbm);
879 943
880 /* Update the tx_power_user_lmt to the highest power 944 /* Update the tx_power_user_lmt to the highest power
881 * supported by any channel */ 945 * supported by any channel */
882 if (max_txpower_avg > priv->tx_power_user_lmt) 946 if (max_txpower_avg > priv->tx_power_user_lmt)
883 priv->tx_power_user_lmt = max_txpower_avg; 947 priv->tx_power_user_lmt = max_txpower_avg;
948
949 /*
950 * Update the tx_power_lmt_in_half_dbm to
951 * the highest power supported by any channel
952 */
953 if (max_txpower_in_half_dbm >
954 priv->tx_power_lmt_in_half_dbm)
955 priv->tx_power_lmt_in_half_dbm =
956 max_txpower_in_half_dbm;
884 } 957 }
885 } 958 }
886} 959}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 80b9e45d9b9c..5cd2b66bbe45 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -63,6 +63,8 @@
63#ifndef __iwl_eeprom_h__ 63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__ 64#define __iwl_eeprom_h__
65 65
66#include <net/mac80211.h>
67
66struct iwl_priv; 68struct iwl_priv;
67 69
68/* 70/*
@@ -125,19 +127,21 @@ struct iwl_eeprom_channel {
125 * Enhanced regulatory tx power portion of eeprom image can be broken down 127 * Enhanced regulatory tx power portion of eeprom image can be broken down
126 * into individual structures; each one is 8 bytes in size and contain the 128 * into individual structures; each one is 8 bytes in size and contain the
127 * following information 129 * following information
130 * @common: (desc + channel) not used by driver, should _NOT_ be "zero"
128 * @chain_a_max_pwr: chain a max power in 1/2 dBm 131 * @chain_a_max_pwr: chain a max power in 1/2 dBm
129 * @chain_b_max_pwr: chain b max power in 1/2 dBm 132 * @chain_b_max_pwr: chain b max power in 1/2 dBm
130 * @chain_c_max_pwr: chain c max power in 1/2 dBm 133 * @chain_c_max_pwr: chain c max power in 1/2 dBm
134 * @reserved: not used, should be "zero"
131 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm 135 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
132 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm 136 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
133 * 137 *
134 */ 138 */
135struct iwl_eeprom_enhanced_txpwr { 139struct iwl_eeprom_enhanced_txpwr {
136 u16 reserved; 140 u16 common;
137 s8 chain_a_max; 141 s8 chain_a_max;
138 s8 chain_b_max; 142 s8 chain_b_max;
139 s8 chain_c_max; 143 s8 chain_c_max;
140 s8 reserved1; 144 s8 reserved;
141 s8 mimo2_max; 145 s8 mimo2_max;
142 s8 mimo3_max; 146 s8 mimo3_max;
143} __attribute__ ((packed)); 147} __attribute__ ((packed));
@@ -256,6 +260,15 @@ struct iwl_eeprom_enhanced_txpwr {
256#define EEPROM_5050_TX_POWER_VERSION (4) 260#define EEPROM_5050_TX_POWER_VERSION (4)
257#define EEPROM_5050_EEPROM_VERSION (0x21E) 261#define EEPROM_5050_EEPROM_VERSION (0x21E)
258 262
263/* 1000 Specific */
264#define EEPROM_1000_EEPROM_VERSION (0x15C)
265
266/* 6x00 Specific */
267#define EEPROM_6000_EEPROM_VERSION (0x434)
268
269/* 6x50 Specific */
270#define EEPROM_6050_EEPROM_VERSION (0x532)
271
259/* OTP */ 272/* OTP */
260/* lower blocks contain EEPROM image and calibration data */ 273/* lower blocks contain EEPROM image and calibration data */
261#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ 274#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
@@ -370,12 +383,10 @@ struct iwl_eeprom_calib_info {
370#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ 383#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
371#define EEPROM_VERSION (2*0x44) /* 2 bytes */ 384#define EEPROM_VERSION (2*0x44) /* 2 bytes */
372#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ 385#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
373#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
374#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ 386#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
375#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ 387#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
376#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ 388#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
377#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */ 389#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
378#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
379 390
380/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ 391/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
381#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ 392#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
@@ -387,7 +398,12 @@ struct iwl_eeprom_calib_info {
387 398
388#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 399#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
389#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 400#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
390#define EEPROM_5000_RF_CFG_TYPE_MAX 0x3 401
402/* Radio Config for 5000 and up */
403#define EEPROM_RF_CONFIG_TYPE_R3x3 0x0
404#define EEPROM_RF_CONFIG_TYPE_R2x2 0x1
405#define EEPROM_RF_CONFIG_TYPE_R1x2 0x2
406#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
391 407
392/* 408/*
393 * Per-channel regulatory data. 409 * Per-channel regulatory data.
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index a6856daf14cb..a23165948202 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -56,6 +56,8 @@ const char *get_cmd_string(u8 cmd)
56 IWL_CMD(REPLY_LEDS_CMD); 56 IWL_CMD(REPLY_LEDS_CMD);
57 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 57 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
58 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 58 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
59 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
60 IWL_CMD(COEX_EVENT_CMD);
59 IWL_CMD(RADAR_NOTIFICATION); 61 IWL_CMD(RADAR_NOTIFICATION);
60 IWL_CMD(REPLY_QUIET_CMD); 62 IWL_CMD(REPLY_QUIET_CMD);
61 IWL_CMD(REPLY_CHANNEL_SWITCH); 63 IWL_CMD(REPLY_CHANNEL_SWITCH);
@@ -93,6 +95,8 @@ const char *get_cmd_string(u8 cmd)
93 IWL_CMD(CALIBRATION_RES_NOTIFICATION); 95 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
94 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); 96 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
95 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 97 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
98 IWL_CMD(TEMPERATURE_NOTIFICATION);
99 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
96 default: 100 default:
97 return "UNKNOWN"; 101 return "UNKNOWN";
98 102
@@ -104,17 +108,8 @@ EXPORT_SYMBOL(get_cmd_string);
104 108
105static void iwl_generic_cmd_callback(struct iwl_priv *priv, 109static void iwl_generic_cmd_callback(struct iwl_priv *priv,
106 struct iwl_device_cmd *cmd, 110 struct iwl_device_cmd *cmd,
107 struct sk_buff *skb) 111 struct iwl_rx_packet *pkt)
108{ 112{
109 struct iwl_rx_packet *pkt = NULL;
110
111 if (!skb) {
112 IWL_ERR(priv, "Error: Response NULL in %s.\n",
113 get_cmd_string(cmd->hdr.cmd));
114 return;
115 }
116
117 pkt = (struct iwl_rx_packet *)skb->data;
118 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 113 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
119 IWL_ERR(priv, "Bad return from %s (0x%08X)\n", 114 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
120 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 115 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -205,18 +200,18 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
205 } 200 }
206 201
207 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { 202 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
208 IWL_DEBUG_INFO(priv, "Command %s aborted: RF KILL Switch\n", 203 IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
209 get_cmd_string(cmd->id)); 204 get_cmd_string(cmd->id));
210 ret = -ECANCELED; 205 ret = -ECANCELED;
211 goto fail; 206 goto fail;
212 } 207 }
213 if (test_bit(STATUS_FW_ERROR, &priv->status)) { 208 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
214 IWL_DEBUG_INFO(priv, "Command %s failed: FW Error\n", 209 IWL_ERR(priv, "Command %s failed: FW Error\n",
215 get_cmd_string(cmd->id)); 210 get_cmd_string(cmd->id));
216 ret = -EIO; 211 ret = -EIO;
217 goto fail; 212 goto fail;
218 } 213 }
219 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_skb) { 214 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
220 IWL_ERR(priv, "Error: Response NULL in '%s'\n", 215 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
221 get_cmd_string(cmd->id)); 216 get_cmd_string(cmd->id));
222 ret = -EIO; 217 ret = -EIO;
@@ -238,9 +233,9 @@ cancel:
238 ~CMD_WANT_SKB; 233 ~CMD_WANT_SKB;
239 } 234 }
240fail: 235fail:
241 if (cmd->reply_skb) { 236 if (cmd->reply_page) {
242 dev_kfree_skb_any(cmd->reply_skb); 237 free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
243 cmd->reply_skb = NULL; 238 cmd->reply_page = 0;
244 } 239 }
245out: 240out:
246 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status); 241 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
@@ -273,7 +268,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
273 u8 id, u16 len, const void *data, 268 u8 id, u16 len, const void *data,
274 void (*callback)(struct iwl_priv *priv, 269 void (*callback)(struct iwl_priv *priv,
275 struct iwl_device_cmd *cmd, 270 struct iwl_device_cmd *cmd,
276 struct sk_buff *skb)) 271 struct iwl_rx_packet *pkt))
277{ 272{
278 struct iwl_host_cmd cmd = { 273 struct iwl_host_cmd cmd = {
279 .id = id, 274 .id = id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index d30cb0275d19..e552d4c4bdbe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33 33
34#include "iwl-debug.h" 34#include "iwl-debug.h"
35#include "iwl-devtrace.h"
35 36
36/* 37/*
37 * IO, register, and NIC memory access functions 38 * IO, register, and NIC memory access functions
@@ -61,7 +62,32 @@
61 * 62 *
62 */ 63 */
63 64
64#define _iwl_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs)) 65static inline void _iwl_write8(struct iwl_priv *priv, u32 ofs, u8 val)
66{
67 trace_iwlwifi_dev_iowrite8(priv, ofs, val);
68 iowrite8(val, priv->hw_base + ofs);
69}
70
71#ifdef CONFIG_IWLWIFI_DEBUG
72static inline void __iwl_write8(const char *f, u32 l, struct iwl_priv *priv,
73 u32 ofs, u8 val)
74{
75 IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
76 _iwl_write8(priv, ofs, val);
77}
78#define iwl_write8(priv, ofs, val) \
79 __iwl_write8(__FILE__, __LINE__, priv, ofs, val)
80#else
81#define iwl_write8(priv, ofs, val) _iwl_write8(priv, ofs, val)
82#endif
83
84
85static inline void _iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
86{
87 trace_iwlwifi_dev_iowrite32(priv, ofs, val);
88 iowrite32(val, priv->hw_base + ofs);
89}
90
65#ifdef CONFIG_IWLWIFI_DEBUG 91#ifdef CONFIG_IWLWIFI_DEBUG
66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv, 92static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
67 u32 ofs, u32 val) 93 u32 ofs, u32 val)
@@ -75,7 +101,13 @@ static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val) 101#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
76#endif 102#endif
77 103
78#define _iwl_read32(priv, ofs) ioread32((priv)->hw_base + (ofs)) 104static inline u32 _iwl_read32(struct iwl_priv *priv, u32 ofs)
105{
106 u32 val = ioread32(priv->hw_base + ofs);
107 trace_iwlwifi_dev_ioread32(priv, ofs, val);
108 return val;
109}
110
79#ifdef CONFIG_IWLWIFI_DEBUG 111#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) 112static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
81{ 113{
@@ -188,6 +220,26 @@ static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
188 220
189 /* this bit wakes up the NIC */ 221 /* this bit wakes up the NIC */
190 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 222 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
223
224 /*
225 * These bits say the device is running, and should keep running for
226 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
227 * but they do not indicate that embedded SRAM is restored yet;
228 * 3945 and 4965 have volatile SRAM, and must save/restore contents
229 * to/from host DRAM when sleeping/waking for power-saving.
230 * Each direction takes approximately 1/4 millisecond; with this
231 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
232 * series of register accesses are expected (e.g. reading Event Log),
233 * to keep device from sleeping.
234 *
235 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
236 * SRAM is okay/restored. We don't check that here because this call
237 * is just for hardware register access; but GP1 MAC_SLEEP check is a
238 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
239 *
240 * 5000 series and later (including 1000 series) have non-volatile SRAM,
241 * and do not save/restore SRAM when power cycling.
242 */
191 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL, 243 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
192 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 244 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
193 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 245 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index f420c99e7240..46c7a95b88f0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -42,15 +42,11 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44 44
45#ifdef CONFIG_IWLWIFI_DEBUG 45/* default: IWL_LED_BLINK(0) using blinking index table */
46static const char *led_type_str[] = { 46static int led_mode;
47 __stringify(IWL_LED_TRG_TX), 47module_param(led_mode, int, S_IRUGO);
48 __stringify(IWL_LED_TRG_RX), 48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
49 __stringify(IWL_LED_TRG_ASSOC), 49 "(default 0)\n");
50 __stringify(IWL_LED_TRG_RADIO),
51 NULL
52};
53#endif /* CONFIG_IWLWIFI_DEBUG */
54 50
55 51
56static const struct { 52static const struct {
@@ -65,11 +61,11 @@ static const struct {
65 {70, 65, 65}, 61 {70, 65, 65},
66 {50, 75, 75}, 62 {50, 75, 75},
67 {20, 85, 85}, 63 {20, 85, 85},
68 {15, 95, 95 }, 64 {10, 95, 95},
69 {10, 110, 110}, 65 {5, 110, 110},
70 {5, 130, 130}, 66 {1, 130, 130},
71 {0, 167, 167}, 67 {0, 167, 167},
72/* SOLID_ON */ 68 /* SOLID_ON */
73 {-1, IWL_LED_SOLID, 0} 69 {-1, IWL_LED_SOLID, 0}
74}; 70};
75 71
@@ -78,191 +74,74 @@ static const struct {
78#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */ 74#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
79#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1) 75#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
80 76
81/* [0-256] -> [0..8] FIXME: we need [0..10] */ 77/*
82static inline int iwl_brightness_to_idx(enum led_brightness brightness) 78 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
83{ 79 * Led blink rate analysis showed an average deviation of 0% on 3945,
84 return fls(0x000000FF & (u32)brightness); 80 * 5% on 4965 HW and 20% on 5000 series and up.
85} 81 * Need to compensate on the led on/off time per HW according to the deviation
86 82 * to achieve the desired led frequency
87/* Send led command */ 83 * The calculation is: (100-averageDeviation)/100 * blinkTime
88static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) 84 * For code efficiency the calculation will be:
85 * compensation = (100 - averageDeviation) * 64 / 100
86 * NewBlinkTime = (compensation * BlinkTime) / 64
87 */
88static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
89 u8 time, u16 compensation)
89{ 90{
90 struct iwl_host_cmd cmd = { 91 if (!compensation) {
91 .id = REPLY_LEDS_CMD, 92 IWL_ERR(priv, "undefined blink compensation: "
92 .len = sizeof(struct iwl_led_cmd), 93 "use pre-defined blinking time\n");
93 .data = led_cmd, 94 return time;
94 .flags = CMD_ASYNC, 95 }
95 .callback = NULL,
96 };
97 u32 reg;
98
99 reg = iwl_read32(priv, CSR_LED_REG);
100 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
101 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
102 96
103 return iwl_send_cmd(priv, &cmd); 97 return (u8)((time * compensation) >> 6);
104} 98}
105 99
106/* Set led pattern command */ 100/* Set led pattern command */
107static int iwl_led_pattern(struct iwl_priv *priv, int led_id, 101static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
108 unsigned int idx)
109{ 102{
110 struct iwl_led_cmd led_cmd = { 103 struct iwl_led_cmd led_cmd = {
111 .id = led_id, 104 .id = IWL_LED_LINK,
112 .interval = IWL_DEF_LED_INTRVL 105 .interval = IWL_DEF_LED_INTRVL
113 }; 106 };
114 107
115 BUG_ON(idx > IWL_MAX_BLINK_TBL); 108 BUG_ON(idx > IWL_MAX_BLINK_TBL);
116 109
117 led_cmd.on = blink_tbl[idx].on_time; 110 IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
118 led_cmd.off = blink_tbl[idx].off_time; 111 priv->cfg->led_compensation);
119 112 led_cmd.on =
120 return iwl_send_led_cmd(priv, &led_cmd); 113 iwl_blink_compensation(priv, blink_tbl[idx].on_time,
121} 114 priv->cfg->led_compensation);
122 115 led_cmd.off =
123/* Set led register off */ 116 iwl_blink_compensation(priv, blink_tbl[idx].off_time,
124static int iwl_led_on_reg(struct iwl_priv *priv, int led_id) 117 priv->cfg->led_compensation);
125{
126 IWL_DEBUG_LED(priv, "led on %d\n", led_id);
127 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
128 return 0;
129}
130 118
131#if 0 119 return priv->cfg->ops->led->cmd(priv, &led_cmd);
132/* Set led on command */
133static int iwl_led_on(struct iwl_priv *priv, int led_id)
134{
135 struct iwl_led_cmd led_cmd = {
136 .id = led_id,
137 .on = IWL_LED_SOLID,
138 .off = 0,
139 .interval = IWL_DEF_LED_INTRVL
140 };
141 return iwl_send_led_cmd(priv, &led_cmd);
142} 120}
143 121
144/* Set led off command */ 122int iwl_led_start(struct iwl_priv *priv)
145int iwl_led_off(struct iwl_priv *priv, int led_id)
146{ 123{
147 struct iwl_led_cmd led_cmd = { 124 return priv->cfg->ops->led->on(priv);
148 .id = led_id,
149 .on = 0,
150 .off = 0,
151 .interval = IWL_DEF_LED_INTRVL
152 };
153 IWL_DEBUG_LED(priv, "led off %d\n", led_id);
154 return iwl_send_led_cmd(priv, &led_cmd);
155} 125}
156#endif 126EXPORT_SYMBOL(iwl_led_start);
157
158 127
159/* Set led register off */ 128int iwl_led_associate(struct iwl_priv *priv)
160static int iwl_led_off_reg(struct iwl_priv *priv, int led_id)
161{
162 IWL_DEBUG_LED(priv, "LED Reg off\n");
163 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
164 return 0;
165}
166
167/*
168 * Set led register in case of disassociation according to rfkill state
169 */
170static int iwl_led_associate(struct iwl_priv *priv, int led_id)
171{ 129{
172 IWL_DEBUG_LED(priv, "Associated\n"); 130 IWL_DEBUG_LED(priv, "Associated\n");
173 priv->allow_blinking = 1; 131 if (led_mode == IWL_LED_BLINK)
174 return iwl_led_on_reg(priv, led_id); 132 priv->allow_blinking = 1;
175} 133 priv->last_blink_time = jiffies;
176static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
177{
178 priv->allow_blinking = 0;
179
180 return 0;
181}
182
183/*
184 * brightness call back function for Tx/Rx LED
185 */
186static int iwl_led_associated(struct iwl_priv *priv, int led_id)
187{
188 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
189 !test_bit(STATUS_READY, &priv->status))
190 return 0;
191
192 134
193 /* start counting Tx/Rx bytes */
194 if (!priv->last_blink_time && priv->allow_blinking)
195 priv->last_blink_time = jiffies;
196 return 0; 135 return 0;
197} 136}
198 137
199/* 138int iwl_led_disassociate(struct iwl_priv *priv)
200 * brightness call back for association and radio
201 */
202static void iwl_led_brightness_set(struct led_classdev *led_cdev,
203 enum led_brightness brightness)
204{ 139{
205 struct iwl_led *led = container_of(led_cdev, struct iwl_led, led_dev); 140 priv->allow_blinking = 0;
206 struct iwl_priv *priv = led->priv;
207
208 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
209 return;
210
211
212 IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
213 led_type_str[led->type], brightness);
214 switch (brightness) {
215 case LED_FULL:
216 if (led->led_on)
217 led->led_on(priv, IWL_LED_LINK);
218 break;
219 case LED_OFF:
220 if (led->led_off)
221 led->led_off(priv, IWL_LED_LINK);
222 break;
223 default:
224 if (led->led_pattern) {
225 int idx = iwl_brightness_to_idx(brightness);
226 led->led_pattern(priv, IWL_LED_LINK, idx);
227 }
228 break;
229 }
230}
231
232
233
234/*
235 * Register led class with the system
236 */
237static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
238 enum led_type type, u8 set_led,
239 char *trigger)
240{
241 struct device *device = wiphy_dev(priv->hw->wiphy);
242 int ret;
243
244 led->led_dev.name = led->name;
245 led->led_dev.brightness_set = iwl_led_brightness_set;
246 led->led_dev.default_trigger = trigger;
247
248 led->priv = priv;
249 led->type = type;
250
251 ret = led_classdev_register(device, &led->led_dev);
252 if (ret) {
253 IWL_ERR(priv, "Error: failed to register led handler.\n");
254 return ret;
255 }
256
257 led->registered = 1;
258
259 if (set_led && led->led_on)
260 led->led_on(priv, IWL_LED_LINK);
261 141
262 return 0; 142 return 0;
263} 143}
264 144
265
266/* 145/*
267 * calculate blink rate according to last second Tx/Rx activities 146 * calculate blink rate according to last second Tx/Rx activities
268 */ 147 */
@@ -288,7 +167,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
288 i = IWL_MAX_BLINK_TBL; 167 i = IWL_MAX_BLINK_TBL;
289 else 168 else
290 for (i = 0; i < IWL_MAX_BLINK_TBL; i++) 169 for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
291 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) 170 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
292 break; 171 break;
293 172
294 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i); 173 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
@@ -317,8 +196,7 @@ void iwl_leds_background(struct iwl_priv *priv)
317 priv->last_blink_time = 0; 196 priv->last_blink_time = 0;
318 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { 197 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
319 priv->last_blink_rate = IWL_SOLID_BLINK_IDX; 198 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
320 iwl_led_pattern(priv, IWL_LED_LINK, 199 iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
321 IWL_SOLID_BLINK_IDX);
322 } 200 }
323 return; 201 return;
324 } 202 }
@@ -331,111 +209,17 @@ void iwl_leds_background(struct iwl_priv *priv)
331 209
332 /* call only if blink rate change */ 210 /* call only if blink rate change */
333 if (blink_idx != priv->last_blink_rate) 211 if (blink_idx != priv->last_blink_rate)
334 iwl_led_pattern(priv, IWL_LED_LINK, blink_idx); 212 iwl_led_pattern(priv, blink_idx);
335 213
336 priv->last_blink_time = jiffies; 214 priv->last_blink_time = jiffies;
337 priv->last_blink_rate = blink_idx; 215 priv->last_blink_rate = blink_idx;
338} 216}
217EXPORT_SYMBOL(iwl_leds_background);
339 218
340/* Register all led handler */ 219void iwl_leds_init(struct iwl_priv *priv)
341int iwl_leds_register(struct iwl_priv *priv)
342{ 220{
343 char *trigger;
344 int ret;
345
346 priv->last_blink_rate = 0; 221 priv->last_blink_rate = 0;
347 priv->led_tpt = 0;
348 priv->last_blink_time = 0; 222 priv->last_blink_time = 0;
349 priv->allow_blinking = 0; 223 priv->allow_blinking = 0;
350
351 trigger = ieee80211_get_radio_led_name(priv->hw);
352 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
353 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
354 wiphy_name(priv->hw->wiphy));
355
356 priv->led[IWL_LED_TRG_RADIO].led_on = iwl_led_on_reg;
357 priv->led[IWL_LED_TRG_RADIO].led_off = iwl_led_off_reg;
358 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
359
360 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
361 IWL_LED_TRG_RADIO, 1, trigger);
362 if (ret)
363 goto exit_fail;
364
365 trigger = ieee80211_get_assoc_led_name(priv->hw);
366 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
367 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
368 wiphy_name(priv->hw->wiphy));
369
370 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
371 IWL_LED_TRG_ASSOC, 0, trigger);
372
373 /* for assoc always turn led on */
374 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl_led_associate;
375 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl_led_disassociate;
376 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
377
378 if (ret)
379 goto exit_fail;
380
381 trigger = ieee80211_get_rx_led_name(priv->hw);
382 snprintf(priv->led[IWL_LED_TRG_RX].name,
383 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
384 wiphy_name(priv->hw->wiphy));
385
386 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
387 IWL_LED_TRG_RX, 0, trigger);
388
389 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
390 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
391 priv->led[IWL_LED_TRG_RX].led_pattern = iwl_led_pattern;
392
393 if (ret)
394 goto exit_fail;
395
396 trigger = ieee80211_get_tx_led_name(priv->hw);
397 snprintf(priv->led[IWL_LED_TRG_TX].name,
398 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
399 wiphy_name(priv->hw->wiphy));
400
401 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
402 IWL_LED_TRG_TX, 0, trigger);
403
404 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
405 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
406 priv->led[IWL_LED_TRG_TX].led_pattern = iwl_led_pattern;
407
408 if (ret)
409 goto exit_fail;
410
411 return 0;
412
413exit_fail:
414 iwl_leds_unregister(priv);
415 return ret;
416} 224}
417EXPORT_SYMBOL(iwl_leds_register); 225EXPORT_SYMBOL(iwl_leds_init);
418
419/* unregister led class */
420static void iwl_leds_unregister_led(struct iwl_led *led, u8 set_led)
421{
422 if (!led->registered)
423 return;
424
425 led_classdev_unregister(&led->led_dev);
426
427 if (set_led)
428 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
429 led->registered = 0;
430}
431
432/* Unregister all led handlers */
433void iwl_leds_unregister(struct iwl_priv *priv)
434{
435 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
436 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
437 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
438 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
439}
440EXPORT_SYMBOL(iwl_leds_unregister);
441
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index ef9b174c37ff..f47f053f02ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -30,9 +30,6 @@
30 30
31struct iwl_priv; 31struct iwl_priv;
32 32
33#ifdef CONFIG_IWLWIFI_LEDS
34#include <linux/leds.h>
35
36#define IWL_LED_SOLID 11 33#define IWL_LED_SOLID 11
37#define IWL_LED_NAME_LEN 31 34#define IWL_LED_NAME_LEN 31
38#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) 35#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
@@ -47,38 +44,23 @@ enum led_type {
47 IWL_LED_TRG_RADIO, 44 IWL_LED_TRG_RADIO,
48 IWL_LED_TRG_MAX, 45 IWL_LED_TRG_MAX,
49}; 46};
50#endif
51
52#ifdef CONFIG_IWLWIFI_LEDS
53
54struct iwl_led {
55 struct iwl_priv *priv;
56 struct led_classdev led_dev;
57 char name[32];
58 47
59 int (*led_on) (struct iwl_priv *priv, int led_id); 48/*
60 int (*led_off) (struct iwl_priv *priv, int led_id); 49 * LED mode
61 int (*led_pattern) (struct iwl_priv *priv, int led_id, unsigned int idx); 50 * IWL_LED_BLINK: adjust led blink rate based on blink table
62 51 * IWL_LED_RF_STATE: turn LED on/off based on RF state
63 enum led_type type; 52 * LED ON = RF ON
64 unsigned int registered; 53 * LED OFF = RF OFF
54 */
55enum iwl_led_mode {
56 IWL_LED_BLINK,
57 IWL_LED_RF_STATE,
65}; 58};
66 59
67int iwl_leds_register(struct iwl_priv *priv); 60void iwl_leds_init(struct iwl_priv *priv);
68void iwl_leds_unregister(struct iwl_priv *priv);
69void iwl_leds_background(struct iwl_priv *priv); 61void iwl_leds_background(struct iwl_priv *priv);
62int iwl_led_start(struct iwl_priv *priv);
63int iwl_led_associate(struct iwl_priv *priv);
64int iwl_led_disassociate(struct iwl_priv *priv);
70 65
71#else
72static inline int iwl_leds_register(struct iwl_priv *priv)
73{
74 return 0;
75}
76static inline void iwl_leds_unregister(struct iwl_priv *priv)
77{
78}
79static inline void iwl_leds_background(struct iwl_priv *priv)
80{
81}
82
83#endif /* CONFIG_IWLWIFI_LEDS */
84#endif /* __iwl_leds_h__ */ 66#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 60be976afff8..8ccc0bb1d9ed 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(no_sleep_autoadjust,
66 66
67struct iwl_power_vec_entry { 67struct iwl_power_vec_entry {
68 struct iwl_powertable_cmd cmd; 68 struct iwl_powertable_cmd cmd;
69 u8 no_dtim; 69 u8 no_dtim; /* number of skip dtim */
70}; 70};
71 71
72#define IWL_DTIM_RANGE_0_MAX 2 72#define IWL_DTIM_RANGE_0_MAX 2
@@ -83,8 +83,9 @@ struct iwl_power_vec_entry {
83 cpu_to_le32(X4)} 83 cpu_to_le32(X4)}
84/* default power management (not Tx power) table values */ 84/* default power management (not Tx power) table values */
85/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ 85/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
86/* DTIM 0 - 2 */
86static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { 87static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 88 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 89 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, 90 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, 91 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
@@ -93,15 +94,17 @@ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
93 94
94 95
95/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ 96/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
97/* DTIM 3 - 10 */
96static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { 98static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
97 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 99 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, 100 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
99 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, 101 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, 102 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
101 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2} 103 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
102}; 104};
103 105
104/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ 106/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
107/* DTIM 11 - */
105static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { 108static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, 109 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, 110 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -115,13 +118,15 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
115 enum iwl_power_level lvl, int period) 118 enum iwl_power_level lvl, int period)
116{ 119{
117 const struct iwl_power_vec_entry *table; 120 const struct iwl_power_vec_entry *table;
118 int max_sleep, i; 121 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
119 bool skip; 122 int i;
123 u8 skip;
124 u32 slp_itrvl;
120 125
121 table = range_2; 126 table = range_2;
122 if (period < IWL_DTIM_RANGE_1_MAX) 127 if (period <= IWL_DTIM_RANGE_1_MAX)
123 table = range_1; 128 table = range_1;
124 if (period < IWL_DTIM_RANGE_0_MAX) 129 if (period <= IWL_DTIM_RANGE_0_MAX)
125 table = range_0; 130 table = range_0;
126 131
127 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); 132 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
@@ -129,34 +134,60 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
129 *cmd = table[lvl].cmd; 134 *cmd = table[lvl].cmd;
130 135
131 if (period == 0) { 136 if (period == 0) {
132 skip = false; 137 skip = 0;
133 period = 1; 138 period = 1;
139 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
140 max_sleep[i] = 1;
141
134 } else { 142 } else {
135 skip = !!table[lvl].no_dtim; 143 skip = table[lvl].no_dtim;
144 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
145 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
146 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
136 } 147 }
137 148
138 if (skip) { 149 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
139 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; 150 /* figure out the listen interval based on dtim period and skip */
140 max_sleep = le32_to_cpu(slp_itrvl); 151 if (slp_itrvl == 0xFF)
141 if (max_sleep == 0xFF) 152 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
142 max_sleep = period * (skip + 1); 153 cpu_to_le32(period * (skip + 1));
143 else if (max_sleep > period) 154
144 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; 155 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
156 if (slp_itrvl > period)
157 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
158 cpu_to_le32((slp_itrvl / period) * period);
159
160 if (skip)
145 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; 161 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
146 } else { 162 else
147 max_sleep = period;
148 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 163 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
149 }
150 164
151 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) 165 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
152 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 166 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
153 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 167 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
168 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
169
170 /* enforce max sleep interval */
171 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
172 if (le32_to_cpu(cmd->sleep_interval[i]) >
173 (max_sleep[i] * period))
174 cmd->sleep_interval[i] =
175 cpu_to_le32(max_sleep[i] * period);
176 if (i != (IWL_POWER_VEC_SIZE - 1)) {
177 if (le32_to_cpu(cmd->sleep_interval[i]) >
178 le32_to_cpu(cmd->sleep_interval[i+1]))
179 cmd->sleep_interval[i] =
180 cmd->sleep_interval[i+1];
181 }
182 }
154 183
155 if (priv->power_data.pci_pm) 184 if (priv->power_data.pci_pm)
156 cmd->flags |= IWL_POWER_PCI_PM_MSK; 185 cmd->flags |= IWL_POWER_PCI_PM_MSK;
157 else 186 else
158 cmd->flags &= ~IWL_POWER_PCI_PM_MSK; 187 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
159 188
189 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
190 skip, period);
160 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); 191 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
161} 192}
162 193
@@ -165,26 +196,26 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
165 *============================================================================= 196 *=============================================================================
166 * Condition Nxt State Condition Nxt State Condition Nxt State 197 * Condition Nxt State Condition Nxt State Condition Nxt State
167 *----------------------------------------------------------------------------- 198 *-----------------------------------------------------------------------------
168 * IWL_TI_0 T >= 115 CT_KILL 115>T>=105 TI_1 N/A N/A 199 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
169 * IWL_TI_1 T >= 115 CT_KILL 115>T>=110 TI_2 T<=95 TI_0 200 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
170 * IWL_TI_2 T >= 115 CT_KILL T<=100 TI_1 201 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
171 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 202 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
172 *============================================================================= 203 *=============================================================================
173 */ 204 */
174static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = { 205static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
175 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104}, 206 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
176 {IWL_TI_1, 105, CT_KILL_THRESHOLD}, 207 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
177 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX} 208 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
178}; 209};
179static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = { 210static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
180 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95}, 211 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
181 {IWL_TI_2, 110, CT_KILL_THRESHOLD}, 212 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
182 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX} 213 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
183}; 214};
184static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = { 215static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
185 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100}, 216 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
186 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX}, 217 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
187 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX} 218 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
188}; 219};
189static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = { 220static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
190 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD}, 221 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
@@ -294,6 +325,9 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
294 325
295 if (priv->cfg->broken_powersave) 326 if (priv->cfg->broken_powersave)
296 iwl_power_sleep_cam_cmd(priv, &cmd); 327 iwl_power_sleep_cam_cmd(priv, &cmd);
328 else if (priv->cfg->supports_idle &&
329 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
330 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
297 else if (tt->state >= IWL_TI_1) 331 else if (tt->state >= IWL_TI_1)
298 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper); 332 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
299 else if (!enabled) 333 else if (!enabled)
@@ -348,6 +382,23 @@ bool iwl_ht_enabled(struct iwl_priv *priv)
348} 382}
349EXPORT_SYMBOL(iwl_ht_enabled); 383EXPORT_SYMBOL(iwl_ht_enabled);
350 384
385bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
386{
387 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
388 bool within_margin = false;
389
390 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
391 temp = KELVIN_TO_CELSIUS(priv->temperature);
392
393 if (!priv->thermal_throttle.advanced_tt)
394 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
395 CT_KILL_THRESHOLD_LEGACY) ? true : false;
396 else
397 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
398 CT_KILL_THRESHOLD) ? true : false;
399 return within_margin;
400}
401
351enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv) 402enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
352{ 403{
353 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 404 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
@@ -372,6 +423,7 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
372} 423}
373 424
374#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */ 425#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
426#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
375 427
376/* 428/*
377 * toggle the bit to wake up uCode and check the temperature 429 * toggle the bit to wake up uCode and check the temperature
@@ -409,6 +461,7 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
409 /* Reschedule the ct_kill timer to occur in 461 /* Reschedule the ct_kill timer to occur in
410 * CT_KILL_EXIT_DURATION seconds to ensure we get a 462 * CT_KILL_EXIT_DURATION seconds to ensure we get a
411 * thermal update */ 463 * thermal update */
464 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
412 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies + 465 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
413 CT_KILL_EXIT_DURATION * HZ); 466 CT_KILL_EXIT_DURATION * HZ);
414 } 467 }
@@ -432,6 +485,33 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
432 } 485 }
433} 486}
434 487
488static void iwl_tt_ready_for_ct_kill(unsigned long data)
489{
490 struct iwl_priv *priv = (struct iwl_priv *)data;
491 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
492
493 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
494 return;
495
496 /* temperature timer expired, ready to go into CT_KILL state */
497 if (tt->state != IWL_TI_CT_KILL) {
498 IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
499 tt->state = IWL_TI_CT_KILL;
500 set_bit(STATUS_CT_KILL, &priv->status);
501 iwl_perform_ct_kill_task(priv, true);
502 }
503}
504
505static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
506{
507 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
508 /* make request to retrieve statistics information */
509 iwl_send_statistics_request(priv, CMD_SYNC, false);
510 /* Reschedule the ct_kill wait timer */
511 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
512 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
513}
514
435#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY) 515#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
436#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100) 516#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
437#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90) 517#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
@@ -445,7 +525,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
445 * Throttle early enough to lower the power consumption before 525 * Throttle early enough to lower the power consumption before
446 * drastic steps are needed 526 * drastic steps are needed
447 */ 527 */
448static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp) 528static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
449{ 529{
450 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 530 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
451 enum iwl_tt_state old_state; 531 enum iwl_tt_state old_state;
@@ -474,6 +554,8 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
474#ifdef CONFIG_IWLWIFI_DEBUG 554#ifdef CONFIG_IWLWIFI_DEBUG
475 tt->tt_previous_temp = temp; 555 tt->tt_previous_temp = temp;
476#endif 556#endif
557 /* stop ct_kill_waiting_tm timer */
558 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
477 if (tt->state != old_state) { 559 if (tt->state != old_state) {
478 switch (tt->state) { 560 switch (tt->state) {
479 case IWL_TI_0: 561 case IWL_TI_0:
@@ -494,17 +576,28 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
494 break; 576 break;
495 } 577 }
496 mutex_lock(&priv->mutex); 578 mutex_lock(&priv->mutex);
497 if (iwl_power_update_mode(priv, true)) { 579 if (old_state == IWL_TI_CT_KILL)
580 clear_bit(STATUS_CT_KILL, &priv->status);
581 if (tt->state != IWL_TI_CT_KILL &&
582 iwl_power_update_mode(priv, true)) {
498 /* TT state not updated 583 /* TT state not updated
499 * try again during next temperature read 584 * try again during next temperature read
500 */ 585 */
586 if (old_state == IWL_TI_CT_KILL)
587 set_bit(STATUS_CT_KILL, &priv->status);
501 tt->state = old_state; 588 tt->state = old_state;
502 IWL_ERR(priv, "Cannot update power mode, " 589 IWL_ERR(priv, "Cannot update power mode, "
503 "TT state not updated\n"); 590 "TT state not updated\n");
504 } else { 591 } else {
505 if (tt->state == IWL_TI_CT_KILL) 592 if (tt->state == IWL_TI_CT_KILL) {
506 iwl_perform_ct_kill_task(priv, true); 593 if (force) {
507 else if (old_state == IWL_TI_CT_KILL && 594 set_bit(STATUS_CT_KILL, &priv->status);
595 iwl_perform_ct_kill_task(priv, true);
596 } else {
597 iwl_prepare_ct_kill_task(priv);
598 tt->state = old_state;
599 }
600 } else if (old_state == IWL_TI_CT_KILL &&
508 tt->state != IWL_TI_CT_KILL) 601 tt->state != IWL_TI_CT_KILL)
509 iwl_perform_ct_kill_task(priv, false); 602 iwl_perform_ct_kill_task(priv, false);
510 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n", 603 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
@@ -531,13 +624,13 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
531 *============================================================================= 624 *=============================================================================
532 * Condition Nxt State Condition Nxt State Condition Nxt State 625 * Condition Nxt State Condition Nxt State Condition Nxt State
533 *----------------------------------------------------------------------------- 626 *-----------------------------------------------------------------------------
534 * IWL_TI_0 T >= 115 CT_KILL 115>T>=105 TI_1 N/A N/A 627 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
535 * IWL_TI_1 T >= 115 CT_KILL 115>T>=110 TI_2 T<=95 TI_0 628 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
536 * IWL_TI_2 T >= 115 CT_KILL T<=100 TI_1 629 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
537 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 630 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
538 *============================================================================= 631 *=============================================================================
539 */ 632 */
540static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp) 633static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
541{ 634{
542 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 635 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
543 int i; 636 int i;
@@ -582,6 +675,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
582 break; 675 break;
583 } 676 }
584 } 677 }
678 /* stop ct_kill_waiting_tm timer */
679 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
585 if (changed) { 680 if (changed) {
586 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 681 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
587 682
@@ -613,12 +708,17 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
613 iwl_set_rxon_ht(priv, &priv->current_ht_config); 708 iwl_set_rxon_ht(priv, &priv->current_ht_config);
614 } 709 }
615 mutex_lock(&priv->mutex); 710 mutex_lock(&priv->mutex);
616 if (iwl_power_update_mode(priv, true)) { 711 if (old_state == IWL_TI_CT_KILL)
712 clear_bit(STATUS_CT_KILL, &priv->status);
713 if (tt->state != IWL_TI_CT_KILL &&
714 iwl_power_update_mode(priv, true)) {
617 /* TT state not updated 715 /* TT state not updated
618 * try again during next temperature read 716 * try again during next temperature read
619 */ 717 */
620 IWL_ERR(priv, "Cannot update power mode, " 718 IWL_ERR(priv, "Cannot update power mode, "
621 "TT state not updated\n"); 719 "TT state not updated\n");
720 if (old_state == IWL_TI_CT_KILL)
721 set_bit(STATUS_CT_KILL, &priv->status);
622 tt->state = old_state; 722 tt->state = old_state;
623 } else { 723 } else {
624 IWL_DEBUG_POWER(priv, 724 IWL_DEBUG_POWER(priv,
@@ -626,9 +726,15 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
626 tt->state); 726 tt->state);
627 if (old_state != IWL_TI_CT_KILL && 727 if (old_state != IWL_TI_CT_KILL &&
628 tt->state == IWL_TI_CT_KILL) { 728 tt->state == IWL_TI_CT_KILL) {
629 IWL_DEBUG_POWER(priv, "Enter IWL_TI_CT_KILL\n"); 729 if (force) {
630 iwl_perform_ct_kill_task(priv, true); 730 IWL_DEBUG_POWER(priv,
631 731 "Enter IWL_TI_CT_KILL\n");
732 set_bit(STATUS_CT_KILL, &priv->status);
733 iwl_perform_ct_kill_task(priv, true);
734 } else {
735 iwl_prepare_ct_kill_task(priv);
736 tt->state = old_state;
737 }
632 } else if (old_state == IWL_TI_CT_KILL && 738 } else if (old_state == IWL_TI_CT_KILL &&
633 tt->state != IWL_TI_CT_KILL) { 739 tt->state != IWL_TI_CT_KILL) {
634 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n"); 740 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
@@ -665,10 +771,11 @@ static void iwl_bg_ct_enter(struct work_struct *work)
665 "- ucode going to sleep!\n"); 771 "- ucode going to sleep!\n");
666 if (!priv->thermal_throttle.advanced_tt) 772 if (!priv->thermal_throttle.advanced_tt)
667 iwl_legacy_tt_handler(priv, 773 iwl_legacy_tt_handler(priv,
668 IWL_MINIMAL_POWER_THRESHOLD); 774 IWL_MINIMAL_POWER_THRESHOLD,
775 true);
669 else 776 else
670 iwl_advance_tt_handler(priv, 777 iwl_advance_tt_handler(priv,
671 CT_KILL_THRESHOLD + 1); 778 CT_KILL_THRESHOLD + 1, true);
672 } 779 }
673} 780}
674 781
@@ -695,11 +802,18 @@ static void iwl_bg_ct_exit(struct work_struct *work)
695 IWL_ERR(priv, 802 IWL_ERR(priv,
696 "Device temperature below critical" 803 "Device temperature below critical"
697 "- ucode awake!\n"); 804 "- ucode awake!\n");
805 /*
806 * exit from CT_KILL state
807 * reset the current temperature reading
808 */
809 priv->temperature = 0;
698 if (!priv->thermal_throttle.advanced_tt) 810 if (!priv->thermal_throttle.advanced_tt)
699 iwl_legacy_tt_handler(priv, 811 iwl_legacy_tt_handler(priv,
700 IWL_REDUCED_PERFORMANCE_THRESHOLD_2); 812 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
813 true);
701 else 814 else
702 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD); 815 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
816 true);
703 } 817 }
704} 818}
705 819
@@ -735,9 +849,9 @@ static void iwl_bg_tt_work(struct work_struct *work)
735 temp = KELVIN_TO_CELSIUS(priv->temperature); 849 temp = KELVIN_TO_CELSIUS(priv->temperature);
736 850
737 if (!priv->thermal_throttle.advanced_tt) 851 if (!priv->thermal_throttle.advanced_tt)
738 iwl_legacy_tt_handler(priv, temp); 852 iwl_legacy_tt_handler(priv, temp, false);
739 else 853 else
740 iwl_advance_tt_handler(priv, temp); 854 iwl_advance_tt_handler(priv, temp, false);
741} 855}
742 856
743void iwl_tt_handler(struct iwl_priv *priv) 857void iwl_tt_handler(struct iwl_priv *priv)
@@ -768,16 +882,18 @@ void iwl_tt_initialize(struct iwl_priv *priv)
768 tt->state = IWL_TI_0; 882 tt->state = IWL_TI_0;
769 init_timer(&priv->thermal_throttle.ct_kill_exit_tm); 883 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
770 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv; 884 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
771 priv->thermal_throttle.ct_kill_exit_tm.function = iwl_tt_check_exit_ct_kill; 885 priv->thermal_throttle.ct_kill_exit_tm.function =
772 886 iwl_tt_check_exit_ct_kill;
887 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
888 priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
889 priv->thermal_throttle.ct_kill_waiting_tm.function =
890 iwl_tt_ready_for_ct_kill;
773 /* setup deferred ct kill work */ 891 /* setup deferred ct kill work */
774 INIT_WORK(&priv->tt_work, iwl_bg_tt_work); 892 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
775 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 893 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
776 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 894 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
777 895
778 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 896 if (priv->cfg->adv_thermal_throttle) {
779 case CSR_HW_REV_TYPE_6x00:
780 case CSR_HW_REV_TYPE_6x50:
781 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n"); 897 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
782 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) * 898 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
783 IWL_TI_STATE_MAX, GFP_KERNEL); 899 IWL_TI_STATE_MAX, GFP_KERNEL);
@@ -810,11 +926,9 @@ void iwl_tt_initialize(struct iwl_priv *priv)
810 &restriction_range[0], size); 926 &restriction_range[0], size);
811 priv->thermal_throttle.advanced_tt = true; 927 priv->thermal_throttle.advanced_tt = true;
812 } 928 }
813 break; 929 } else {
814 default:
815 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n"); 930 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
816 priv->thermal_throttle.advanced_tt = false; 931 priv->thermal_throttle.advanced_tt = false;
817 break;
818 } 932 }
819} 933}
820EXPORT_SYMBOL(iwl_tt_initialize); 934EXPORT_SYMBOL(iwl_tt_initialize);
@@ -826,6 +940,8 @@ void iwl_tt_exit(struct iwl_priv *priv)
826 940
827 /* stop ct_kill_exit_tm timer if activated */ 941 /* stop ct_kill_exit_tm timer if activated */
828 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); 942 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
943 /* stop ct_kill_waiting_tm timer if activated */
944 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
829 cancel_work_sync(&priv->tt_work); 945 cancel_work_sync(&priv->tt_work);
830 cancel_work_sync(&priv->ct_enter); 946 cancel_work_sync(&priv->ct_enter);
831 cancel_work_sync(&priv->ct_exit); 947 cancel_work_sync(&priv->ct_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df6f6a49712b..310c32e8f698 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -33,6 +33,7 @@
33#define IWL_ABSOLUTE_ZERO 0 33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF 34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5 35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
36 37
37enum iwl_antenna_ok { 38enum iwl_antenna_ok {
38 IWL_ANT_OK_NONE, 39 IWL_ANT_OK_NONE,
@@ -110,6 +111,7 @@ struct iwl_tt_mgmt {
110 struct iwl_tt_restriction *restriction; 111 struct iwl_tt_restriction *restriction;
111 struct iwl_tt_trans *transaction; 112 struct iwl_tt_trans *transaction;
112 struct timer_list ct_kill_exit_tm; 113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
113}; 115};
114 116
115enum iwl_power_level { 117enum iwl_power_level {
@@ -129,6 +131,7 @@ struct iwl_power_mgr {
129 131
130int iwl_power_update_mode(struct iwl_priv *priv, bool force); 132int iwl_power_update_mode(struct iwl_priv *priv, bool force);
131bool iwl_ht_enabled(struct iwl_priv *priv); 133bool iwl_ht_enabled(struct iwl_priv *priv);
134bool iwl_within_ct_kill_margin(struct iwl_priv *priv);
132enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv); 135enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
133enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv); 136enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
134void iwl_tt_enter_ct_kill(struct iwl_priv *priv); 137void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d393e8f02102..6d95832db06d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,8 @@
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel, 254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues. 255 * but one DMA channel may take input from several queues.
256 * 256 *
257 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows: 257 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
258 * 259 *
259 * 0 -- EDCA BK (background) frames, lowest priority 260 * 0 -- EDCA BK (background) frames, lowest priority
260 * 1 -- EDCA BE (best effort) frames, normal priority 261 * 1 -- EDCA BE (best effort) frames, normal priority
@@ -265,9 +266,21 @@
265 * 6 -- HCCA long frames 266 * 6 -- HCCA long frames
266 * 7 -- not used by driver (device-internal only) 267 * 7 -- not used by driver (device-internal only)
267 * 268 *
269 * For 5000 series and up, they are used slightly differently
270 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
271 *
272 * 0 -- EDCA BK (background) frames, lowest priority
273 * 1 -- EDCA BE (best effort) frames, normal priority
274 * 2 -- EDCA VI (video) frames, higher priority
275 * 3 -- EDCA VO (voice) and management frames, highest priority
276 * 4 -- (TBD)
277 * 5 -- HCCA short frames
278 * 6 -- HCCA long frames
279 * 7 -- Commands
280 *
268 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. 281 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
269 * In addition, driver can map queues 7-15 to Tx DMA/FIFO channels 0-3 to 282 * In addition, driver can map the remaining queues to Tx DMA/FIFO
270 * support 11n aggregation via EDCA DMA channels. 283 * channels 0-3 to support 11n aggregation via EDCA DMA channels.
271 * 284 *
272 * The driver sets up each queue to work in one of two modes: 285 * The driver sets up each queue to work in one of two modes:
273 * 286 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 493626bcd3ec..6090bc15a6d5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -140,6 +140,8 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
140 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 140 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
141 141
142 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 142 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
143 IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n",
144 reg);
143 iwl_set_bit(priv, CSR_GP_CNTRL, 145 iwl_set_bit(priv, CSR_GP_CNTRL,
144 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 146 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
145 goto exit_unlock; 147 goto exit_unlock;
@@ -200,7 +202,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
200 list_del(element); 202 list_del(element);
201 203
202 /* Point to Rx buffer via next RBD in circular buffer */ 204 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); 205 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb; 206 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 207 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
206 rxq->free_count--; 208 rxq->free_count--;
@@ -239,8 +241,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
239 struct iwl_rx_queue *rxq = &priv->rxq; 241 struct iwl_rx_queue *rxq = &priv->rxq;
240 struct list_head *element; 242 struct list_head *element;
241 struct iwl_rx_mem_buffer *rxb; 243 struct iwl_rx_mem_buffer *rxb;
242 struct sk_buff *skb; 244 struct page *page;
243 unsigned long flags; 245 unsigned long flags;
246 gfp_t gfp_mask = priority;
244 247
245 while (1) { 248 while (1) {
246 spin_lock_irqsave(&rxq->lock, flags); 249 spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +254,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
251 spin_unlock_irqrestore(&rxq->lock, flags); 254 spin_unlock_irqrestore(&rxq->lock, flags);
252 255
253 if (rxq->free_count > RX_LOW_WATERMARK) 256 if (rxq->free_count > RX_LOW_WATERMARK)
254 priority |= __GFP_NOWARN; 257 gfp_mask |= __GFP_NOWARN;
255 /* Alloc a new receive buffer */ 258
256 skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 259 if (priv->hw_params.rx_page_order > 0)
257 priority); 260 gfp_mask |= __GFP_COMP;
258 261
259 if (!skb) { 262 /* Alloc a new receive buffer */
263 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
264 if (!page) {
260 if (net_ratelimit()) 265 if (net_ratelimit())
261 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 266 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
267 "order: %d\n",
268 priv->hw_params.rx_page_order);
269
262 if ((rxq->free_count <= RX_LOW_WATERMARK) && 270 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
263 net_ratelimit()) 271 net_ratelimit())
264 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", 272 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
265 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", 273 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
266 rxq->free_count); 274 rxq->free_count);
267 /* We don't reschedule replenish work here -- we will 275 /* We don't reschedule replenish work here -- we will
268 * call the restock method and if it still needs 276 * call the restock method and if it still needs
269 * more buffers it will schedule replenish */ 277 * more buffers it will schedule replenish */
270 break; 278 return;
271 } 279 }
272 280
273 spin_lock_irqsave(&rxq->lock, flags); 281 spin_lock_irqsave(&rxq->lock, flags);
274 282
275 if (list_empty(&rxq->rx_used)) { 283 if (list_empty(&rxq->rx_used)) {
276 spin_unlock_irqrestore(&rxq->lock, flags); 284 spin_unlock_irqrestore(&rxq->lock, flags);
277 dev_kfree_skb_any(skb); 285 __free_pages(page, priv->hw_params.rx_page_order);
278 return; 286 return;
279 } 287 }
280 element = rxq->rx_used.next; 288 element = rxq->rx_used.next;
@@ -283,24 +291,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
283 291
284 spin_unlock_irqrestore(&rxq->lock, flags); 292 spin_unlock_irqrestore(&rxq->lock, flags);
285 293
286 rxb->skb = skb; 294 rxb->page = page;
287 /* Get physical address of RB/SKB */ 295 /* Get physical address of the RB */
288 rxb->real_dma_addr = pci_map_single( 296 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
289 priv->pci_dev, 297 PAGE_SIZE << priv->hw_params.rx_page_order,
290 rxb->skb->data, 298 PCI_DMA_FROMDEVICE);
291 priv->hw_params.rx_buf_size + 256,
292 PCI_DMA_FROMDEVICE);
293 /* dma address must be no more than 36 bits */ 299 /* dma address must be no more than 36 bits */
294 BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); 300 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
295 /* and also 256 byte aligned! */ 301 /* and also 256 byte aligned! */
296 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 302 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
297 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
298 303
299 spin_lock_irqsave(&rxq->lock, flags); 304 spin_lock_irqsave(&rxq->lock, flags);
300 305
301 list_add_tail(&rxb->list, &rxq->rx_free); 306 list_add_tail(&rxb->list, &rxq->rx_free);
302 rxq->free_count++; 307 rxq->free_count++;
303 priv->alloc_rxb_skb++; 308 priv->alloc_rxb_page++;
304 309
305 spin_unlock_irqrestore(&rxq->lock, flags); 310 spin_unlock_irqrestore(&rxq->lock, flags);
306 } 311 }
@@ -336,12 +341,14 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
336{ 341{
337 int i; 342 int i;
338 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 343 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
339 if (rxq->pool[i].skb != NULL) { 344 if (rxq->pool[i].page != NULL) {
340 pci_unmap_single(priv->pci_dev, 345 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
341 rxq->pool[i].real_dma_addr, 346 PAGE_SIZE << priv->hw_params.rx_page_order,
342 priv->hw_params.rx_buf_size + 256, 347 PCI_DMA_FROMDEVICE);
343 PCI_DMA_FROMDEVICE); 348 __free_pages(rxq->pool[i].page,
344 dev_kfree_skb(rxq->pool[i].skb); 349 priv->hw_params.rx_page_order);
350 rxq->pool[i].page = NULL;
351 priv->alloc_rxb_page--;
345 } 352 }
346 } 353 }
347 354
@@ -405,14 +412,14 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
405 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 412 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
406 /* In the reset function, these buffers may have been allocated 413 /* In the reset function, these buffers may have been allocated
407 * to an SKB, so we need to unmap and free potential storage */ 414 * to an SKB, so we need to unmap and free potential storage */
408 if (rxq->pool[i].skb != NULL) { 415 if (rxq->pool[i].page != NULL) {
409 pci_unmap_single(priv->pci_dev, 416 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
410 rxq->pool[i].real_dma_addr, 417 PAGE_SIZE << priv->hw_params.rx_page_order,
411 priv->hw_params.rx_buf_size + 256, 418 PCI_DMA_FROMDEVICE);
412 PCI_DMA_FROMDEVICE); 419 priv->alloc_rxb_page--;
413 priv->alloc_rxb_skb--; 420 __free_pages(rxq->pool[i].page,
414 dev_kfree_skb(rxq->pool[i].skb); 421 priv->hw_params.rx_page_order);
415 rxq->pool[i].skb = NULL; 422 rxq->pool[i].page = NULL;
416 } 423 }
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 424 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
418 } 425 }
@@ -470,7 +477,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
470 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 477 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 478 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
472 479
473 iwl_write32(priv, CSR_INT_COALESCING, 0x40); 480 /* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */
481 iwl_write8(priv, CSR_INT_COALESCING, 0x40);
474 482
475 return 0; 483 return 0;
476} 484}
@@ -491,7 +499,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb) 499 struct iwl_rx_mem_buffer *rxb)
492 500
493{ 501{
494 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 502 struct iwl_rx_packet *pkt = rxb_addr(rxb);
495 struct iwl_missed_beacon_notif *missed_beacon; 503 struct iwl_missed_beacon_notif *missed_beacon;
496 504
497 missed_beacon = &pkt->u.missed_beacon; 505 missed_beacon = &pkt->u.missed_beacon;
@@ -548,13 +556,51 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
548 priv->last_rx_noise); 556 priv->last_rx_noise);
549} 557}
550 558
559#ifdef CONFIG_IWLWIFI_DEBUG
560/*
561 * based on the assumption of all statistics counter are in DWORD
562 * FIXME: This function is for debugging, do not deal with
563 * the case of counters roll-over.
564 */
565static void iwl_accumulative_statistics(struct iwl_priv *priv,
566 __le32 *stats)
567{
568 int i;
569 __le32 *prev_stats;
570 u32 *accum_stats;
571
572 prev_stats = (__le32 *)&priv->statistics;
573 accum_stats = (u32 *)&priv->accum_statistics;
574
575 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
576 i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
577 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
578 *accum_stats += (le32_to_cpu(*stats) -
579 le32_to_cpu(*prev_stats));
580
581 /* reset accumulative statistics for "no-counter" type statistics */
582 priv->accum_statistics.general.temperature =
583 priv->statistics.general.temperature;
584 priv->accum_statistics.general.temperature_m =
585 priv->statistics.general.temperature_m;
586 priv->accum_statistics.general.ttl_timestamp =
587 priv->statistics.general.ttl_timestamp;
588 priv->accum_statistics.tx.tx_power.ant_a =
589 priv->statistics.tx.tx_power.ant_a;
590 priv->accum_statistics.tx.tx_power.ant_b =
591 priv->statistics.tx.tx_power.ant_b;
592 priv->accum_statistics.tx.tx_power.ant_c =
593 priv->statistics.tx.tx_power.ant_c;
594}
595#endif
596
551#define REG_RECALIB_PERIOD (60) 597#define REG_RECALIB_PERIOD (60)
552 598
553void iwl_rx_statistics(struct iwl_priv *priv, 599void iwl_rx_statistics(struct iwl_priv *priv,
554 struct iwl_rx_mem_buffer *rxb) 600 struct iwl_rx_mem_buffer *rxb)
555{ 601{
556 int change; 602 int change;
557 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 603 struct iwl_rx_packet *pkt = rxb_addr(rxb);
558 604
559 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 605 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
560 (int)sizeof(priv->statistics), 606 (int)sizeof(priv->statistics),
@@ -566,6 +612,9 @@ void iwl_rx_statistics(struct iwl_priv *priv,
566 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 612 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
567 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 613 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
568 614
615#ifdef CONFIG_IWLWIFI_DEBUG
616 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
617#endif
569 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 618 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
570 619
571 set_bit(STATUS_STATISTICS, &priv->status); 620 set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,14 +631,29 @@ void iwl_rx_statistics(struct iwl_priv *priv,
582 iwl_rx_calc_noise(priv); 631 iwl_rx_calc_noise(priv);
583 queue_work(priv->workqueue, &priv->run_time_calib_work); 632 queue_work(priv->workqueue, &priv->run_time_calib_work);
584 } 633 }
585
586 iwl_leds_background(priv);
587
588 if (priv->cfg->ops->lib->temp_ops.temperature && change) 634 if (priv->cfg->ops->lib->temp_ops.temperature && change)
589 priv->cfg->ops->lib->temp_ops.temperature(priv); 635 priv->cfg->ops->lib->temp_ops.temperature(priv);
590} 636}
591EXPORT_SYMBOL(iwl_rx_statistics); 637EXPORT_SYMBOL(iwl_rx_statistics);
592 638
639void iwl_reply_statistics(struct iwl_priv *priv,
640 struct iwl_rx_mem_buffer *rxb)
641{
642 struct iwl_rx_packet *pkt = rxb_addr(rxb);
643
644 if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
645 memset(&priv->statistics, 0,
646 sizeof(struct iwl_notif_statistics));
647#ifdef CONFIG_IWLWIFI_DEBUG
648 memset(&priv->accum_statistics, 0,
649 sizeof(struct iwl_notif_statistics));
650#endif
651 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
652 }
653 iwl_rx_statistics(priv, rxb);
654}
655EXPORT_SYMBOL(iwl_reply_statistics);
656
593#define PERFECT_RSSI (-20) /* dBm */ 657#define PERFECT_RSSI (-20) /* dBm */
594#define WORST_RSSI (-95) /* dBm */ 658#define WORST_RSSI (-95) /* dBm */
595#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) 659#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
@@ -878,6 +942,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
878 struct iwl_rx_mem_buffer *rxb, 942 struct iwl_rx_mem_buffer *rxb,
879 struct ieee80211_rx_status *stats) 943 struct ieee80211_rx_status *stats)
880{ 944{
945 struct sk_buff *skb;
946 int ret = 0;
947 __le16 fc = hdr->frame_control;
948
881 /* We only process data packets if the interface is open */ 949 /* We only process data packets if the interface is open */
882 if (unlikely(!priv->is_open)) { 950 if (unlikely(!priv->is_open)) {
883 IWL_DEBUG_DROP_LIMIT(priv, 951 IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +958,44 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
890 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 958 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
891 return; 959 return;
892 960
893 /* Resize SKB from mac header to end of packet */ 961 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
894 skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data); 962 if (!skb) {
895 skb_put(rxb->skb, len); 963 IWL_ERR(priv, "alloc_skb failed\n");
964 return;
965 }
966
967 skb_reserve(skb, IWL_LINK_HDR_MAX);
968 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
969
970 /* mac80211 currently doesn't support paged SKB. Convert it to
971 * linear SKB for management frame and data frame requires
972 * software decryption or software defragementation. */
973 if (ieee80211_is_mgmt(fc) ||
974 ieee80211_has_protected(fc) ||
975 ieee80211_has_morefrags(fc) ||
976 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
977 ret = skb_linearize(skb);
978 else
979 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
980 0 : -ENOMEM;
981
982 if (ret) {
983 kfree_skb(skb);
984 goto out;
985 }
986
987 /*
988 * XXX: We cannot touch the page and its virtual memory (hdr) after
989 * here. It might have already been freed by the above skb change.
990 */
896 991
897 iwl_update_stats(priv, false, hdr->frame_control, len); 992 iwl_update_stats(priv, false, fc, len);
898 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 993 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
899 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 994
900 priv->alloc_rxb_skb--; 995 ieee80211_rx(priv->hw, skb);
901 rxb->skb = NULL; 996 out:
997 priv->alloc_rxb_page--;
998 rxb->page = NULL;
902} 999}
903 1000
904/* This is necessary only for a number of statistics, see the caller. */ 1001/* This is necessary only for a number of statistics, see the caller. */
@@ -926,13 +1023,12 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
926{ 1023{
927 struct ieee80211_hdr *header; 1024 struct ieee80211_hdr *header;
928 struct ieee80211_rx_status rx_status; 1025 struct ieee80211_rx_status rx_status;
929 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1026 struct iwl_rx_packet *pkt = rxb_addr(rxb);
930 struct iwl_rx_phy_res *phy_res; 1027 struct iwl_rx_phy_res *phy_res;
931 __le32 rx_pkt_status; 1028 __le32 rx_pkt_status;
932 struct iwl4965_rx_mpdu_res_start *amsdu; 1029 struct iwl4965_rx_mpdu_res_start *amsdu;
933 u32 len; 1030 u32 len;
934 u32 ampdu_status; 1031 u32 ampdu_status;
935 u16 fc;
936 u32 rate_n_flags; 1032 u32 rate_n_flags;
937 1033
938 /** 1034 /**
@@ -1065,20 +1161,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
1065 priv->last_tsf = le64_to_cpu(phy_res->timestamp); 1161 priv->last_tsf = le64_to_cpu(phy_res->timestamp);
1066 } 1162 }
1067 1163
1068 fc = le16_to_cpu(header->frame_control); 1164 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1069 switch (fc & IEEE80211_FCTL_FTYPE) { 1165 rxb, &rx_status);
1070 case IEEE80211_FTYPE_MGMT:
1071 case IEEE80211_FTYPE_DATA:
1072 if (priv->iw_mode == NL80211_IFTYPE_AP)
1073 iwl_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
1074 header->addr2);
1075 /* fall through */
1076 default:
1077 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1078 rxb, &rx_status);
1079 break;
1080
1081 }
1082} 1166}
1083EXPORT_SYMBOL(iwl_rx_reply_rx); 1167EXPORT_SYMBOL(iwl_rx_reply_rx);
1084 1168
@@ -1087,7 +1171,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx);
1087void iwl_rx_reply_rx_phy(struct iwl_priv *priv, 1171void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1088 struct iwl_rx_mem_buffer *rxb) 1172 struct iwl_rx_mem_buffer *rxb)
1089{ 1173{
1090 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1174 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1091 priv->last_phy_res[0] = 1; 1175 priv->last_phy_res[0] = 1;
1092 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 1176 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1093 sizeof(struct iwl_rx_phy_res)); 1177 sizeof(struct iwl_rx_phy_res));
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 4f3a108fa990..a2b2b8315ff9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -27,7 +27,6 @@
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <net/lib80211.h>
31#include <net/mac80211.h> 30#include <net/mac80211.h>
32 31
33#include "iwl-eeprom.h" 32#include "iwl-eeprom.h"
@@ -112,7 +111,7 @@ EXPORT_SYMBOL(iwl_scan_cancel_timeout);
112static int iwl_send_scan_abort(struct iwl_priv *priv) 111static int iwl_send_scan_abort(struct iwl_priv *priv)
113{ 112{
114 int ret = 0; 113 int ret = 0;
115 struct iwl_rx_packet *res; 114 struct iwl_rx_packet *pkt;
116 struct iwl_host_cmd cmd = { 115 struct iwl_host_cmd cmd = {
117 .id = REPLY_SCAN_ABORT_CMD, 116 .id = REPLY_SCAN_ABORT_CMD,
118 .flags = CMD_WANT_SKB, 117 .flags = CMD_WANT_SKB,
@@ -132,21 +131,21 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
132 return ret; 131 return ret;
133 } 132 }
134 133
135 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 134 pkt = (struct iwl_rx_packet *)cmd.reply_page;
136 if (res->u.status != CAN_ABORT_STATUS) { 135 if (pkt->u.status != CAN_ABORT_STATUS) {
137 /* The scan abort will return 1 for success or 136 /* The scan abort will return 1 for success or
138 * 2 for "failure". A failure condition can be 137 * 2 for "failure". A failure condition can be
139 * due to simply not being in an active scan which 138 * due to simply not being in an active scan which
140 * can occur if we send the scan abort before we 139 * can occur if we send the scan abort before we
141 * the microcode has notified us that a scan is 140 * the microcode has notified us that a scan is
142 * completed. */ 141 * completed. */
143 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", res->u.status); 142 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
144 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 143 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
145 clear_bit(STATUS_SCAN_HW, &priv->status); 144 clear_bit(STATUS_SCAN_HW, &priv->status);
146 } 145 }
147 146
148 priv->alloc_rxb_skb--; 147 priv->alloc_rxb_page--;
149 dev_kfree_skb_any(cmd.reply_skb); 148 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
150 149
151 return ret; 150 return ret;
152} 151}
@@ -156,7 +155,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
156 struct iwl_rx_mem_buffer *rxb) 155 struct iwl_rx_mem_buffer *rxb)
157{ 156{
158#ifdef CONFIG_IWLWIFI_DEBUG 157#ifdef CONFIG_IWLWIFI_DEBUG
159 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
160 struct iwl_scanreq_notification *notif = 159 struct iwl_scanreq_notification *notif =
161 (struct iwl_scanreq_notification *)pkt->u.raw; 160 (struct iwl_scanreq_notification *)pkt->u.raw;
162 161
@@ -168,7 +167,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
168static void iwl_rx_scan_start_notif(struct iwl_priv *priv, 167static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
169 struct iwl_rx_mem_buffer *rxb) 168 struct iwl_rx_mem_buffer *rxb)
170{ 169{
171 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 170 struct iwl_rx_packet *pkt = rxb_addr(rxb);
172 struct iwl_scanstart_notification *notif = 171 struct iwl_scanstart_notification *notif =
173 (struct iwl_scanstart_notification *)pkt->u.raw; 172 (struct iwl_scanstart_notification *)pkt->u.raw;
174 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 173 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -187,7 +186,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb) 186 struct iwl_rx_mem_buffer *rxb)
188{ 187{
189#ifdef CONFIG_IWLWIFI_DEBUG 188#ifdef CONFIG_IWLWIFI_DEBUG
190 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanresults_notification *notif = 190 struct iwl_scanresults_notification *notif =
192 (struct iwl_scanresults_notification *)pkt->u.raw; 191 (struct iwl_scanresults_notification *)pkt->u.raw;
193 192
@@ -214,7 +213,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb) 213 struct iwl_rx_mem_buffer *rxb)
215{ 214{
216#ifdef CONFIG_IWLWIFI_DEBUG 215#ifdef CONFIG_IWLWIFI_DEBUG
217 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
218 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 217 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
219 218
220 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 219 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
@@ -402,6 +401,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
402 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 401 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
403 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 402 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
404} 403}
404EXPORT_SYMBOL(iwl_init_scan_params);
405 405
406static int iwl_scan_initiate(struct iwl_priv *priv) 406static int iwl_scan_initiate(struct iwl_priv *priv)
407{ 407{
@@ -581,6 +581,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
581 u8 rate; 581 u8 rate;
582 bool is_active = false; 582 bool is_active = false;
583 int chan_mod; 583 int chan_mod;
584 u8 active_chains;
584 585
585 conf = ieee80211_get_hw_conf(priv->hw); 586 conf = ieee80211_get_hw_conf(priv->hw);
586 587
@@ -734,9 +735,22 @@ static void iwl_bg_request_scan(struct work_struct *data)
734 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 735 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
735 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); 736 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
736 737
738 /* In power save mode use one chain, otherwise use all chains */
739 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
740 /* rx_ant has been set to all valid chains previously */
741 active_chains = rx_ant &
742 ((u8)(priv->chain_noise_data.active_chains));
743 if (!active_chains)
744 active_chains = rx_ant;
745
746 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
747 priv->chain_noise_data.active_chains);
748
749 rx_ant = first_antenna(active_chains);
750 }
737 /* MIMO is not used here, but value is required */ 751 /* MIMO is not used here, but value is required */
738 rx_chain |= ANT_ABC << RXON_RX_CHAIN_VALID_POS; 752 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
739 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 753 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
740 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 754 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
741 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 755 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
742 scan->rx_chain = cpu_to_le16(rx_chain); 756 scan->rx_chain = cpu_to_le16(rx_chain);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
index 022bcf115731..1ea5cd345fe8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -177,7 +177,7 @@ static int iwl_get_measurement(struct iwl_priv *priv,
177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb) 178 struct iwl_rx_mem_buffer *rxb)
179{ 179{
180 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 180 struct iwl_rx_packet *pkt = rxb_addr(rxb);
181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); 181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
182 182
183 if (!report->state) { 183 if (!report->state) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c6633fec8216..cd6a6901216e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -99,32 +99,25 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
99 99
100static void iwl_add_sta_callback(struct iwl_priv *priv, 100static void iwl_add_sta_callback(struct iwl_priv *priv,
101 struct iwl_device_cmd *cmd, 101 struct iwl_device_cmd *cmd,
102 struct sk_buff *skb) 102 struct iwl_rx_packet *pkt)
103{ 103{
104 struct iwl_rx_packet *res = NULL;
105 struct iwl_addsta_cmd *addsta = 104 struct iwl_addsta_cmd *addsta =
106 (struct iwl_addsta_cmd *)cmd->cmd.payload; 105 (struct iwl_addsta_cmd *)cmd->cmd.payload;
107 u8 sta_id = addsta->sta.sta_id; 106 u8 sta_id = addsta->sta.sta_id;
108 107
109 if (!skb) { 108 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
110 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
111 return;
112 }
113
114 res = (struct iwl_rx_packet *)skb->data;
115 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
116 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 109 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
117 res->hdr.flags); 110 pkt->hdr.flags);
118 return; 111 return;
119 } 112 }
120 113
121 switch (res->u.add_sta.status) { 114 switch (pkt->u.add_sta.status) {
122 case ADD_STA_SUCCESS_MSK: 115 case ADD_STA_SUCCESS_MSK:
123 iwl_sta_ucode_activate(priv, sta_id); 116 iwl_sta_ucode_activate(priv, sta_id);
124 /* fall through */ 117 /* fall through */
125 default: 118 default:
126 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 119 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
127 res->u.add_sta.status); 120 pkt->u.add_sta.status);
128 break; 121 break;
129 } 122 }
130} 123}
@@ -132,7 +125,7 @@ static void iwl_add_sta_callback(struct iwl_priv *priv,
132int iwl_send_add_sta(struct iwl_priv *priv, 125int iwl_send_add_sta(struct iwl_priv *priv,
133 struct iwl_addsta_cmd *sta, u8 flags) 126 struct iwl_addsta_cmd *sta, u8 flags)
134{ 127{
135 struct iwl_rx_packet *res = NULL; 128 struct iwl_rx_packet *pkt = NULL;
136 int ret = 0; 129 int ret = 0;
137 u8 data[sizeof(*sta)]; 130 u8 data[sizeof(*sta)];
138 struct iwl_host_cmd cmd = { 131 struct iwl_host_cmd cmd = {
@@ -152,15 +145,15 @@ int iwl_send_add_sta(struct iwl_priv *priv,
152 if (ret || (flags & CMD_ASYNC)) 145 if (ret || (flags & CMD_ASYNC))
153 return ret; 146 return ret;
154 147
155 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 148 pkt = (struct iwl_rx_packet *)cmd.reply_page;
156 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 149 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
157 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 150 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
158 res->hdr.flags); 151 pkt->hdr.flags);
159 ret = -EIO; 152 ret = -EIO;
160 } 153 }
161 154
162 if (ret == 0) { 155 if (ret == 0) {
163 switch (res->u.add_sta.status) { 156 switch (pkt->u.add_sta.status) {
164 case ADD_STA_SUCCESS_MSK: 157 case ADD_STA_SUCCESS_MSK:
165 iwl_sta_ucode_activate(priv, sta->sta.sta_id); 158 iwl_sta_ucode_activate(priv, sta->sta.sta_id);
166 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 159 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
@@ -172,8 +165,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
172 } 165 }
173 } 166 }
174 167
175 priv->alloc_rxb_skb--; 168 priv->alloc_rxb_page--;
176 dev_kfree_skb_any(cmd.reply_skb); 169 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
177 170
178 return ret; 171 return ret;
179} 172}
@@ -189,6 +182,11 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
189 goto done; 182 goto done;
190 183
191 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 184 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
185 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
186 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
187 "static" :
188 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
189 "dynamic" : "disabled");
192 190
193 sta_flags = priv->stations[index].sta.station_flags; 191 sta_flags = priv->stations[index].sta.station_flags;
194 192
@@ -324,26 +322,19 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
324 322
325static void iwl_remove_sta_callback(struct iwl_priv *priv, 323static void iwl_remove_sta_callback(struct iwl_priv *priv,
326 struct iwl_device_cmd *cmd, 324 struct iwl_device_cmd *cmd,
327 struct sk_buff *skb) 325 struct iwl_rx_packet *pkt)
328{ 326{
329 struct iwl_rx_packet *res = NULL;
330 struct iwl_rem_sta_cmd *rm_sta = 327 struct iwl_rem_sta_cmd *rm_sta =
331 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 328 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
332 const char *addr = rm_sta->addr; 329 const char *addr = rm_sta->addr;
333 330
334 if (!skb) { 331 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
335 IWL_ERR(priv, "Error: Response NULL in REPLY_REMOVE_STA.\n");
336 return;
337 }
338
339 res = (struct iwl_rx_packet *)skb->data;
340 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
341 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 332 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
342 res->hdr.flags); 333 pkt->hdr.flags);
343 return; 334 return;
344 } 335 }
345 336
346 switch (res->u.rem_sta.status) { 337 switch (pkt->u.rem_sta.status) {
347 case REM_STA_SUCCESS_MSK: 338 case REM_STA_SUCCESS_MSK:
348 iwl_sta_ucode_deactivate(priv, addr); 339 iwl_sta_ucode_deactivate(priv, addr);
349 break; 340 break;
@@ -356,7 +347,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
356static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, 347static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
357 u8 flags) 348 u8 flags)
358{ 349{
359 struct iwl_rx_packet *res = NULL; 350 struct iwl_rx_packet *pkt;
360 int ret; 351 int ret;
361 352
362 struct iwl_rem_sta_cmd rm_sta_cmd; 353 struct iwl_rem_sta_cmd rm_sta_cmd;
@@ -381,15 +372,15 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
381 if (ret || (flags & CMD_ASYNC)) 372 if (ret || (flags & CMD_ASYNC))
382 return ret; 373 return ret;
383 374
384 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 375 pkt = (struct iwl_rx_packet *)cmd.reply_page;
385 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 376 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
386 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 377 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
387 res->hdr.flags); 378 pkt->hdr.flags);
388 ret = -EIO; 379 ret = -EIO;
389 } 380 }
390 381
391 if (!ret) { 382 if (!ret) {
392 switch (res->u.rem_sta.status) { 383 switch (pkt->u.rem_sta.status) {
393 case REM_STA_SUCCESS_MSK: 384 case REM_STA_SUCCESS_MSK:
394 iwl_sta_ucode_deactivate(priv, addr); 385 iwl_sta_ucode_deactivate(priv, addr);
395 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 386 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
@@ -401,8 +392,8 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
401 } 392 }
402 } 393 }
403 394
404 priv->alloc_rxb_skb--; 395 priv->alloc_rxb_page--;
405 dev_kfree_skb_any(cmd.reply_skb); 396 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
406 397
407 return ret; 398 return ret;
408} 399}
@@ -1026,7 +1017,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
1026 */ 1017 */
1027 if (priv->current_ht_config.is_ht) { 1018 if (priv->current_ht_config.is_ht) {
1028 rcu_read_lock(); 1019 rcu_read_lock();
1029 sta = ieee80211_find_sta(priv->hw, addr); 1020 sta = ieee80211_find_sta(priv->vif, addr);
1030 if (sta) { 1021 if (sta) {
1031 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config)); 1022 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
1032 cur_ht_config = &ht_config; 1023 cur_ht_config = &ht_config;
@@ -1044,6 +1035,68 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
1044EXPORT_SYMBOL(iwl_rxon_add_station); 1035EXPORT_SYMBOL(iwl_rxon_add_station);
1045 1036
1046/** 1037/**
1038 * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
1039 *
1040 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
1041 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
1042 * which requires station table entry to exist).
1043 */
1044static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
1045{
1046 int i, r;
1047 struct iwl_link_quality_cmd link_cmd = {
1048 .reserved1 = 0,
1049 };
1050 u32 rate_flags;
1051
1052 /* Set up the rate scaling to start at selected rate, fall back
1053 * all the way down to 1M in IEEE order, and then spin on 1M */
1054 if (priv->band == IEEE80211_BAND_5GHZ)
1055 r = IWL_RATE_6M_INDEX;
1056 else
1057 r = IWL_RATE_1M_INDEX;
1058
1059 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1060 rate_flags = 0;
1061 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
1062 rate_flags |= RATE_MCS_CCK_MSK;
1063
1064 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
1065 RATE_MCS_ANT_POS;
1066
1067 link_cmd.rs_table[i].rate_n_flags =
1068 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
1069 r = iwl_get_prev_ieee_rate(r);
1070 }
1071
1072 link_cmd.general_params.single_stream_ant_msk =
1073 first_antenna(priv->hw_params.valid_tx_ant);
1074 link_cmd.general_params.dual_stream_ant_msk = 3;
1075 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
1076 link_cmd.agg_params.agg_time_limit =
1077 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
1078
1079 /* Update the rate scaling for control frame Tx to AP */
1080 link_cmd.sta_id = priv->hw_params.bcast_sta_id;
1081
1082 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
1083 sizeof(link_cmd), &link_cmd, NULL);
1084}
1085
1086
1087/**
1088 * iwl_add_bcast_station - add broadcast station into station table.
1089 */
1090void iwl_add_bcast_station(struct iwl_priv *priv)
1091{
1092 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
1093
1094 /* Set up default rate scaling table in device's station table */
1095 iwl_sta_init_bcast_lq(priv);
1096}
1097EXPORT_SYMBOL(iwl_add_bcast_station);
1098
1099/**
1047 * iwl_get_sta_id - Find station's index within station table 1100 * iwl_get_sta_id - Find station's index within station table
1048 * 1101 *
1049 * If new IBSS station, create new entry in station table 1102 * If new IBSS station, create new entry in station table
@@ -1163,7 +1216,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid)
1163} 1216}
1164EXPORT_SYMBOL(iwl_sta_rx_agg_stop); 1217EXPORT_SYMBOL(iwl_sta_rx_agg_stop);
1165 1218
1166static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 1219void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1167{ 1220{
1168 unsigned long flags; 1221 unsigned long flags;
1169 1222
@@ -1171,27 +1224,26 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1171 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK; 1224 priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
1172 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK; 1225 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1173 priv->stations[sta_id].sta.sta.modify_mask = 0; 1226 priv->stations[sta_id].sta.sta.modify_mask = 0;
1227 priv->stations[sta_id].sta.sleep_tx_count = 0;
1174 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1228 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1175 spin_unlock_irqrestore(&priv->sta_lock, flags); 1229 spin_unlock_irqrestore(&priv->sta_lock, flags);
1176 1230
1177 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 1231 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1178} 1232}
1233EXPORT_SYMBOL(iwl_sta_modify_ps_wake);
1179 1234
1180void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr) 1235void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1181{ 1236{
1182 /* FIXME: need locking over ps_status ??? */ 1237 unsigned long flags;
1183 u8 sta_id = iwl_find_station(priv, addr);
1184 1238
1185 if (sta_id != IWL_INVALID_STATION) { 1239 spin_lock_irqsave(&priv->sta_lock, flags);
1186 u8 sta_awake = priv->stations[sta_id]. 1240 priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
1187 ps_status == STA_PS_STATUS_WAKE; 1241 priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
1242 priv->stations[sta_id].sta.sta.modify_mask =
1243 STA_MODIFY_SLEEP_TX_COUNT_MSK;
1244 priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
1245 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1246 spin_unlock_irqrestore(&priv->sta_lock, flags);
1188 1247
1189 if (sta_awake && ps_bit) 1248 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1190 priv->stations[sta_id].ps_status = STA_PS_STATUS_SLEEP;
1191 else if (!sta_awake && !ps_bit) {
1192 iwl_sta_modify_ps_wake(priv, sta_id);
1193 priv->stations[sta_id].ps_status = STA_PS_STATUS_WAKE;
1194 }
1195 }
1196} 1249}
1197
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 6deebade6361..8d052de2d405 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -52,6 +52,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
52 const u8 *addr, u32 iv32, u16 *phase1key); 52 const u8 *addr, u32 iv32, u16 *phase1key);
53 53
54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
55void iwl_add_bcast_station(struct iwl_priv *priv);
55int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 56int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
56void iwl_clear_stations_table(struct iwl_priv *priv); 57void iwl_clear_stations_table(struct iwl_priv *priv);
57int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 58int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
@@ -65,5 +66,6 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
65int iwl_sta_rx_agg_start(struct iwl_priv *priv, 66int iwl_sta_rx_agg_start(struct iwl_priv *priv,
66 const u8 *addr, int tid, u16 ssn); 67 const u8 *addr, int tid, u16 ssn);
67int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid); 68int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid);
68void iwl_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr); 69void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
70void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
69#endif /* __iwl_sta_h__ */ 71#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index fb9bcfa6d947..888a8e9fe9ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -97,7 +97,8 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
97 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 97 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
98 98
99 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 99 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
100 IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); 100 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
101 txq_id, reg);
101 iwl_set_bit(priv, CSR_GP_CNTRL, 102 iwl_set_bit(priv, CSR_GP_CNTRL,
102 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 103 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
103 return ret; 104 return ret;
@@ -132,7 +133,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
132 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 133 struct iwl_tx_queue *txq = &priv->txq[txq_id];
133 struct iwl_queue *q = &txq->q; 134 struct iwl_queue *q = &txq->q;
134 struct pci_dev *dev = priv->pci_dev; 135 struct pci_dev *dev = priv->pci_dev;
135 int i, len; 136 int i;
136 137
137 if (q->n_bd == 0) 138 if (q->n_bd == 0)
138 return; 139 return;
@@ -142,8 +143,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
142 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 143 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
143 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 144 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
144 145
145 len = sizeof(struct iwl_device_cmd) * q->n_window;
146
147 /* De-alloc array of command/tx buffers */ 146 /* De-alloc array of command/tx buffers */
148 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
149 kfree(txq->cmd[i]); 148 kfree(txq->cmd[i]);
@@ -181,14 +180,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
181 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
182 struct iwl_queue *q = &txq->q; 181 struct iwl_queue *q = &txq->q;
183 struct pci_dev *dev = priv->pci_dev; 182 struct pci_dev *dev = priv->pci_dev;
184 int i, len; 183 int i;
185 184
186 if (q->n_bd == 0) 185 if (q->n_bd == 0)
187 return; 186 return;
188 187
189 len = sizeof(struct iwl_device_cmd) * q->n_window;
190 len += IWL_MAX_SCAN_SIZE;
191
192 /* De-alloc array of command/tx buffers */ 188 /* De-alloc array of command/tx buffers */
193 for (i = 0; i <= TFD_CMD_SLOTS; i++) 189 for (i = 0; i <= TFD_CMD_SLOTS; i++)
194 kfree(txq->cmd[i]); 190 kfree(txq->cmd[i]);
@@ -370,8 +366,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
370 366
371 txq->need_update = 0; 367 txq->need_update = 0;
372 368
373 /* aggregation TX queues will get their ID when aggregation begins */ 369 /*
374 if (txq_id <= IWL_TX_FIFO_AC3) 370 * Aggregation TX queues will get their ID when aggregation begins;
371 * they overwrite the setting done here. The command FIFO doesn't
372 * need an swq_id so don't set one to catch errors, all others can
373 * be set up to the identity mapping.
374 */
375 if (txq_id != IWL_CMD_QUEUE_NUM)
375 txq->swq_id = txq_id; 376 txq->swq_id = txq_id;
376 377
377 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 378 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
@@ -406,15 +407,19 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
406 int txq_id; 407 int txq_id;
407 408
408 /* Tx queues */ 409 /* Tx queues */
409 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 410 if (priv->txq)
410 if (txq_id == IWL_CMD_QUEUE_NUM) 411 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
411 iwl_cmd_queue_free(priv); 412 txq_id++)
412 else 413 if (txq_id == IWL_CMD_QUEUE_NUM)
413 iwl_tx_queue_free(priv, txq_id); 414 iwl_cmd_queue_free(priv);
414 415 else
416 iwl_tx_queue_free(priv, txq_id);
415 iwl_free_dma_ptr(priv, &priv->kw); 417 iwl_free_dma_ptr(priv, &priv->kw);
416 418
417 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); 419 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
420
421 /* free tx queue structure */
422 iwl_free_txq_mem(priv);
418} 423}
419EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 424EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
420 425
@@ -446,6 +451,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
446 IWL_ERR(priv, "Keep Warm allocation failed\n"); 451 IWL_ERR(priv, "Keep Warm allocation failed\n");
447 goto error_kw; 452 goto error_kw;
448 } 453 }
454
455 /* allocate tx queue structure */
456 ret = iwl_alloc_txq_mem(priv);
457 if (ret)
458 goto error;
459
449 spin_lock_irqsave(&priv->lock, flags); 460 spin_lock_irqsave(&priv->lock, flags);
450 461
451 /* Turn off all Tx DMA fifos */ 462 /* Turn off all Tx DMA fifos */
@@ -582,9 +593,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
582 u8 rate_plcp; 593 u8 rate_plcp;
583 594
584 /* Set retry limit on DATA packets and Probe Responses*/ 595 /* Set retry limit on DATA packets and Probe Responses*/
585 if (priv->data_retry_limit != -1) 596 if (ieee80211_is_probe_resp(fc))
586 data_retry_limit = priv->data_retry_limit;
587 else if (ieee80211_is_probe_resp(fc))
588 data_retry_limit = 3; 597 data_retry_limit = 3;
589 else 598 else
590 data_retry_limit = IWL_DEFAULT_TX_RETRY; 599 data_retry_limit = IWL_DEFAULT_TX_RETRY;
@@ -701,6 +710,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
701{ 710{
702 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 711 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
703 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 712 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
713 struct ieee80211_sta *sta = info->control.sta;
714 struct iwl_station_priv *sta_priv = NULL;
704 struct iwl_tx_queue *txq; 715 struct iwl_tx_queue *txq;
705 struct iwl_queue *q; 716 struct iwl_queue *q;
706 struct iwl_device_cmd *out_cmd; 717 struct iwl_device_cmd *out_cmd;
@@ -710,7 +721,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
710 dma_addr_t phys_addr; 721 dma_addr_t phys_addr;
711 dma_addr_t txcmd_phys; 722 dma_addr_t txcmd_phys;
712 dma_addr_t scratch_phys; 723 dma_addr_t scratch_phys;
713 u16 len, len_org; 724 u16 len, len_org, firstlen, secondlen;
714 u16 seq_number = 0; 725 u16 seq_number = 0;
715 __le16 fc; 726 __le16 fc;
716 u8 hdr_len; 727 u8 hdr_len;
@@ -763,6 +774,24 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
763 774
764 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 775 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
765 776
777 if (sta)
778 sta_priv = (void *)sta->drv_priv;
779
780 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
781 sta_priv->asleep) {
782 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
783 /*
784 * This sends an asynchronous command to the device,
785 * but we can rely on it being processed before the
786 * next frame is processed -- and the next frame to
787 * this station is the one that will consume this
788 * counter.
789 * For now set the counter to just 1 since we do not
790 * support uAPSD yet.
791 */
792 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
793 }
794
766 txq_id = skb_get_queue_mapping(skb); 795 txq_id = skb_get_queue_mapping(skb);
767 if (ieee80211_is_data_qos(fc)) { 796 if (ieee80211_is_data_qos(fc)) {
768 qc = ieee80211_get_qos_ctl(hdr); 797 qc = ieee80211_get_qos_ctl(hdr);
@@ -843,7 +872,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
843 sizeof(struct iwl_cmd_header) + hdr_len; 872 sizeof(struct iwl_cmd_header) + hdr_len;
844 873
845 len_org = len; 874 len_org = len;
846 len = (len + 3) & ~3; 875 firstlen = len = (len + 3) & ~3;
847 876
848 if (len_org != len) 877 if (len_org != len)
849 len_org = 1; 878 len_org = 1;
@@ -877,7 +906,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
877 906
878 /* Set up TFD's 2nd entry to point directly to remainder of skb, 907 /* Set up TFD's 2nd entry to point directly to remainder of skb,
879 * if any (802.11 null frames have no payload). */ 908 * if any (802.11 null frames have no payload). */
880 len = skb->len - hdr_len; 909 secondlen = len = skb->len - hdr_len;
881 if (len) { 910 if (len) {
882 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 911 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
883 len, PCI_DMA_TODEVICE); 912 len, PCI_DMA_TODEVICE);
@@ -911,11 +940,28 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
911 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 940 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
912 len, PCI_DMA_BIDIRECTIONAL); 941 len, PCI_DMA_BIDIRECTIONAL);
913 942
943 trace_iwlwifi_dev_tx(priv,
944 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
945 sizeof(struct iwl_tfd),
946 &out_cmd->hdr, firstlen,
947 skb->data + hdr_len, secondlen);
948
914 /* Tell device the write index *just past* this latest filled TFD */ 949 /* Tell device the write index *just past* this latest filled TFD */
915 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 950 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
916 ret = iwl_txq_update_write_ptr(priv, txq); 951 ret = iwl_txq_update_write_ptr(priv, txq);
917 spin_unlock_irqrestore(&priv->lock, flags); 952 spin_unlock_irqrestore(&priv->lock, flags);
918 953
954 /*
955 * At this point the frame is "transmitted" successfully
956 * and we will get a TX status notification eventually,
957 * regardless of the value of ret. "ret" only indicates
958 * whether or not we should update the write pointer.
959 */
960
961 /* avoid atomic ops if it isn't an associated client */
962 if (sta_priv && sta_priv->client)
963 atomic_inc(&sta_priv->pending_frames);
964
919 if (ret) 965 if (ret)
920 return ret; 966 return ret;
921 967
@@ -970,13 +1016,20 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
970 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 1016 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
971 !(cmd->flags & CMD_SIZE_HUGE)); 1017 !(cmd->flags & CMD_SIZE_HUGE));
972 1018
973 if (iwl_is_rfkill(priv)) { 1019 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
974 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n"); 1020 IWL_WARN(priv, "Not sending command - %s KILL\n",
1021 iwl_is_rfkill(priv) ? "RF" : "CT");
975 return -EIO; 1022 return -EIO;
976 } 1023 }
977 1024
978 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1025 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
979 IWL_ERR(priv, "No space for Tx\n"); 1026 IWL_ERR(priv, "No space in command queue\n");
1027 if (iwl_within_ct_kill_margin(priv))
1028 iwl_tt_enter_ct_kill(priv);
1029 else {
1030 IWL_ERR(priv, "Restarting adapter due to queue full\n");
1031 queue_work(priv->workqueue, &priv->restart);
1032 }
980 return -ENOSPC; 1033 return -ENOSPC;
981 } 1034 }
982 1035
@@ -1039,6 +1092,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1039 pci_unmap_addr_set(out_meta, mapping, phys_addr); 1092 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1040 pci_unmap_len_set(out_meta, len, fix_size); 1093 pci_unmap_len_set(out_meta, len, fix_size);
1041 1094
1095 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1096
1042 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 1097 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1043 phys_addr, fix_size, 1, 1098 phys_addr, fix_size, 1,
1044 U32_PAD(cmd->len)); 1099 U32_PAD(cmd->len));
@@ -1051,6 +1106,24 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1051 return ret ? ret : idx; 1106 return ret ? ret : idx;
1052} 1107}
1053 1108
1109static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1110{
1111 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1112 struct ieee80211_sta *sta;
1113 struct iwl_station_priv *sta_priv;
1114
1115 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1116 if (sta) {
1117 sta_priv = (void *)sta->drv_priv;
1118 /* avoid atomic ops if this isn't a client */
1119 if (sta_priv->client &&
1120 atomic_dec_return(&sta_priv->pending_frames) == 0)
1121 ieee80211_sta_block_awake(priv->hw, sta, false);
1122 }
1123
1124 ieee80211_tx_status_irqsafe(priv->hw, skb);
1125}
1126
1054int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) 1127int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1055{ 1128{
1056 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1129 struct iwl_tx_queue *txq = &priv->txq[txq_id];
@@ -1070,7 +1143,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1070 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1143 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1071 1144
1072 tx_info = &txq->txb[txq->q.read_ptr]; 1145 tx_info = &txq->txb[txq->q.read_ptr];
1073 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 1146 iwl_tx_status(priv, tx_info->skb[0]);
1074 tx_info->skb[0] = NULL; 1147 tx_info->skb[0] = NULL;
1075 1148
1076 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1149 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
@@ -1105,11 +1178,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1105 return; 1178 return;
1106 } 1179 }
1107 1180
1108 pci_unmap_single(priv->pci_dev,
1109 pci_unmap_addr(&txq->meta[cmd_idx], mapping),
1110 pci_unmap_len(&txq->meta[cmd_idx], len),
1111 PCI_DMA_BIDIRECTIONAL);
1112
1113 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 1181 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1114 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1182 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1115 1183
@@ -1132,7 +1200,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1132 */ 1200 */
1133void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1201void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1134{ 1202{
1135 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1203 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1136 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1204 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1137 int txq_id = SEQ_TO_QUEUE(sequence); 1205 int txq_id = SEQ_TO_QUEUE(sequence);
1138 int index = SEQ_TO_INDEX(sequence); 1206 int index = SEQ_TO_INDEX(sequence);
@@ -1157,12 +1225,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1157 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1225 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1158 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; 1226 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
1159 1227
1228 pci_unmap_single(priv->pci_dev,
1229 pci_unmap_addr(meta, mapping),
1230 pci_unmap_len(meta, len),
1231 PCI_DMA_BIDIRECTIONAL);
1232
1160 /* Input error checking is done when commands are added to queue. */ 1233 /* Input error checking is done when commands are added to queue. */
1161 if (meta->flags & CMD_WANT_SKB) { 1234 if (meta->flags & CMD_WANT_SKB) {
1162 meta->source->reply_skb = rxb->skb; 1235 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1163 rxb->skb = NULL; 1236 rxb->page = NULL;
1164 } else if (meta->callback) 1237 } else if (meta->callback)
1165 meta->callback(priv, cmd, rxb->skb); 1238 meta->callback(priv, cmd, pkt);
1166 1239
1167 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 1240 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1168 1241
@@ -1240,7 +1313,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1240 if (tid_data->tfds_in_queue == 0) { 1313 if (tid_data->tfds_in_queue == 0) {
1241 IWL_DEBUG_HT(priv, "HW queue is empty\n"); 1314 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1242 tid_data->agg.state = IWL_AGG_ON; 1315 tid_data->agg.state = IWL_AGG_ON;
1243 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); 1316 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1244 } else { 1317 } else {
1245 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", 1318 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1246 tid_data->tfds_in_queue); 1319 tid_data->tfds_in_queue);
@@ -1305,7 +1378,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1305 if (ret) 1378 if (ret)
1306 return ret; 1379 return ret;
1307 1380
1308 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); 1381 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1309 1382
1310 return 0; 1383 return 0;
1311} 1384}
@@ -1329,7 +1402,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1329 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1402 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1330 ssn, tx_fifo); 1403 ssn, tx_fifo);
1331 tid_data->agg.state = IWL_AGG_OFF; 1404 tid_data->agg.state = IWL_AGG_OFF;
1332 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); 1405 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1333 } 1406 }
1334 break; 1407 break;
1335 case IWL_EMPTYING_HW_QUEUE_ADDBA: 1408 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1337,7 +1410,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1337 if (tid_data->tfds_in_queue == 0) { 1410 if (tid_data->tfds_in_queue == 0) {
1338 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); 1411 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1339 tid_data->agg.state = IWL_AGG_ON; 1412 tid_data->agg.state = IWL_AGG_ON;
1340 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); 1413 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1341 } 1414 }
1342 break; 1415 break;
1343 } 1416 }
@@ -1401,7 +1474,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1401 1474
1402 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); 1475 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1403 memset(&info->status, 0, sizeof(info->status)); 1476 memset(&info->status, 0, sizeof(info->status));
1404 info->flags = IEEE80211_TX_STAT_ACK; 1477 info->flags |= IEEE80211_TX_STAT_ACK;
1405 info->flags |= IEEE80211_TX_STAT_AMPDU; 1478 info->flags |= IEEE80211_TX_STAT_AMPDU;
1406 info->status.ampdu_ack_map = successes; 1479 info->status.ampdu_ack_map = successes;
1407 info->status.ampdu_ack_len = agg->frame_count; 1480 info->status.ampdu_ack_len = agg->frame_count;
@@ -1421,7 +1494,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1421void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, 1494void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1422 struct iwl_rx_mem_buffer *rxb) 1495 struct iwl_rx_mem_buffer *rxb)
1423{ 1496{
1424 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1497 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1425 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 1498 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1426 struct iwl_tx_queue *txq = NULL; 1499 struct iwl_tx_queue *txq = NULL;
1427 struct iwl_ht_agg *agg; 1500 struct iwl_ht_agg *agg;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d00a80334095..0db9b79a69a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -42,7 +42,6 @@
42#include <linux/if_arp.h> 42#include <linux/if_arp.h>
43 43
44#include <net/ieee80211_radiotap.h> 44#include <net/ieee80211_radiotap.h>
45#include <net/lib80211.h>
46#include <net/mac80211.h> 45#include <net/mac80211.h>
47 46
48#include <asm/div64.h> 47#include <asm/div64.h>
@@ -90,7 +89,6 @@ MODULE_LICENSE("GPL");
90 89
91 /* module parameters */ 90 /* module parameters */
92struct iwl_mod_params iwl3945_mod_params = { 91struct iwl_mod_params iwl3945_mod_params = {
93 .num_of_queues = IWL39_NUM_QUEUES, /* Not used */
94 .sw_crypto = 1, 92 .sw_crypto = 1,
95 .restart_fw = 1, 93 .restart_fw = 1,
96 /* the rest are 0 by default */ 94 /* the rest are 0 by default */
@@ -368,13 +366,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
368 struct sk_buff *skb_frag, 366 struct sk_buff *skb_frag,
369 int sta_id) 367 int sta_id)
370{ 368{
371 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 369 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
372 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 370 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
373 371
374 switch (keyinfo->alg) { 372 switch (keyinfo->alg) {
375 case ALG_CCMP: 373 case ALG_CCMP:
376 tx->sec_ctl = TX_CMD_SEC_CCM; 374 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
377 memcpy(tx->key, keyinfo->key, keyinfo->keylen); 375 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
378 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 376 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
379 break; 377 break;
380 378
@@ -382,13 +380,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
382 break; 380 break;
383 381
384 case ALG_WEP: 382 case ALG_WEP:
385 tx->sec_ctl = TX_CMD_SEC_WEP | 383 tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
386 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 384 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
387 385
388 if (keyinfo->keylen == 13) 386 if (keyinfo->keylen == 13)
389 tx->sec_ctl |= TX_CMD_SEC_KEY128; 387 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
390 388
391 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen); 389 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
392 390
393 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 391 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
394 "with key %d\n", info->control.hw_key->hw_key_idx); 392 "with key %d\n", info->control.hw_key->hw_key_idx);
@@ -408,12 +406,11 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
408 struct ieee80211_tx_info *info, 406 struct ieee80211_tx_info *info,
409 struct ieee80211_hdr *hdr, u8 std_id) 407 struct ieee80211_hdr *hdr, u8 std_id)
410{ 408{
411 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 409 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
412 __le32 tx_flags = tx->tx_flags; 410 __le32 tx_flags = tx_cmd->tx_flags;
413 __le16 fc = hdr->frame_control; 411 __le16 fc = hdr->frame_control;
414 u8 rc_flags = info->control.rates[0].flags;
415 412
416 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 413 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
417 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 414 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
418 tx_flags |= TX_CMD_FLG_ACK_MSK; 415 tx_flags |= TX_CMD_FLG_ACK_MSK;
419 if (ieee80211_is_mgmt(fc)) 416 if (ieee80211_is_mgmt(fc))
@@ -426,25 +423,19 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
426 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 423 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
427 } 424 }
428 425
429 tx->sta_id = std_id; 426 tx_cmd->sta_id = std_id;
430 if (ieee80211_has_morefrags(fc)) 427 if (ieee80211_has_morefrags(fc))
431 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 428 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
432 429
433 if (ieee80211_is_data_qos(fc)) { 430 if (ieee80211_is_data_qos(fc)) {
434 u8 *qc = ieee80211_get_qos_ctl(hdr); 431 u8 *qc = ieee80211_get_qos_ctl(hdr);
435 tx->tid_tspec = qc[0] & 0xf; 432 tx_cmd->tid_tspec = qc[0] & 0xf;
436 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 433 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
437 } else { 434 } else {
438 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 435 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
439 } 436 }
440 437
441 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 438 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
442 tx_flags |= TX_CMD_FLG_RTS_MSK;
443 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
444 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
445 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
446 tx_flags |= TX_CMD_FLG_CTS_MSK;
447 }
448 439
449 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) 440 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
450 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 441 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
@@ -452,19 +443,16 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
452 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 443 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
453 if (ieee80211_is_mgmt(fc)) { 444 if (ieee80211_is_mgmt(fc)) {
454 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 445 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
455 tx->timeout.pm_frame_timeout = cpu_to_le16(3); 446 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
456 else 447 else
457 tx->timeout.pm_frame_timeout = cpu_to_le16(2); 448 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
458 } else { 449 } else {
459 tx->timeout.pm_frame_timeout = 0; 450 tx_cmd->timeout.pm_frame_timeout = 0;
460#ifdef CONFIG_IWLWIFI_LEDS
461 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
462#endif
463 } 451 }
464 452
465 tx->driver_txop = 0; 453 tx_cmd->driver_txop = 0;
466 tx->tx_flags = tx_flags; 454 tx_cmd->tx_flags = tx_flags;
467 tx->next_frame_len = 0; 455 tx_cmd->next_frame_len = 0;
468} 456}
469 457
470/* 458/*
@@ -474,7 +462,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
474{ 462{
475 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 463 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
476 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 464 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
477 struct iwl3945_tx_cmd *tx; 465 struct iwl3945_tx_cmd *tx_cmd;
478 struct iwl_tx_queue *txq = NULL; 466 struct iwl_tx_queue *txq = NULL;
479 struct iwl_queue *q = NULL; 467 struct iwl_queue *q = NULL;
480 struct iwl_device_cmd *out_cmd; 468 struct iwl_device_cmd *out_cmd;
@@ -573,9 +561,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
573 /* Init first empty entry in queue's array of Tx/cmd buffers */ 561 /* Init first empty entry in queue's array of Tx/cmd buffers */
574 out_cmd = txq->cmd[idx]; 562 out_cmd = txq->cmd[idx];
575 out_meta = &txq->meta[idx]; 563 out_meta = &txq->meta[idx];
576 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; 564 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
577 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 565 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
578 memset(tx, 0, sizeof(*tx)); 566 memset(tx_cmd, 0, sizeof(*tx_cmd));
579 567
580 /* 568 /*
581 * Set up the Tx-command (not MAC!) header. 569 * Set up the Tx-command (not MAC!) header.
@@ -588,7 +576,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
588 INDEX_TO_SEQ(q->write_ptr))); 576 INDEX_TO_SEQ(q->write_ptr)));
589 577
590 /* Copy MAC header from skb into command buffer */ 578 /* Copy MAC header from skb into command buffer */
591 memcpy(tx->hdr, hdr, hdr_len); 579 memcpy(tx_cmd->hdr, hdr, hdr_len);
592 580
593 581
594 if (info->control.hw_key) 582 if (info->control.hw_key)
@@ -602,12 +590,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
602 590
603 /* Total # bytes to be transmitted */ 591 /* Total # bytes to be transmitted */
604 len = (u16)skb->len; 592 len = (u16)skb->len;
605 tx->len = cpu_to_le16(len); 593 tx_cmd->len = cpu_to_le16(len);
606 594
607 iwl_dbg_log_tx_data_frame(priv, len, hdr); 595 iwl_dbg_log_tx_data_frame(priv, len, hdr);
608 iwl_update_stats(priv, true, fc, len); 596 iwl_update_stats(priv, true, fc, len);
609 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 597 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
610 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 598 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
611 599
612 if (!ieee80211_has_morefrags(hdr->frame_control)) { 600 if (!ieee80211_has_morefrags(hdr->frame_control)) {
613 txq->need_update = 1; 601 txq->need_update = 1;
@@ -620,9 +608,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
620 608
621 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 609 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
622 le16_to_cpu(out_cmd->hdr.sequence)); 610 le16_to_cpu(out_cmd->hdr.sequence));
623 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags)); 611 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
624 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); 612 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
625 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, 613 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
626 ieee80211_hdrlen(fc)); 614 ieee80211_hdrlen(fc));
627 615
628 /* 616 /*
@@ -758,7 +746,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
758 u8 type) 746 u8 type)
759{ 747{
760 struct iwl_spectrum_cmd spectrum; 748 struct iwl_spectrum_cmd spectrum;
761 struct iwl_rx_packet *res; 749 struct iwl_rx_packet *pkt;
762 struct iwl_host_cmd cmd = { 750 struct iwl_host_cmd cmd = {
763 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 751 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
764 .data = (void *)&spectrum, 752 .data = (void *)&spectrum,
@@ -803,18 +791,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
803 if (rc) 791 if (rc)
804 return rc; 792 return rc;
805 793
806 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 794 pkt = (struct iwl_rx_packet *)cmd.reply_page;
807 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 795 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
808 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); 796 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
809 rc = -EIO; 797 rc = -EIO;
810 } 798 }
811 799
812 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); 800 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
813 switch (spectrum_resp_status) { 801 switch (spectrum_resp_status) {
814 case 0: /* Command will be handled */ 802 case 0: /* Command will be handled */
815 if (res->u.spectrum.id != 0xff) { 803 if (pkt->u.spectrum.id != 0xff) {
816 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", 804 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
817 res->u.spectrum.id); 805 pkt->u.spectrum.id);
818 priv->measurement_status &= ~MEASUREMENT_READY; 806 priv->measurement_status &= ~MEASUREMENT_READY;
819 } 807 }
820 priv->measurement_status |= MEASUREMENT_ACTIVE; 808 priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -826,7 +814,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
826 break; 814 break;
827 } 815 }
828 816
829 dev_kfree_skb_any(cmd.reply_skb); 817 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
830 818
831 return rc; 819 return rc;
832} 820}
@@ -835,7 +823,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
835static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 823static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
836 struct iwl_rx_mem_buffer *rxb) 824 struct iwl_rx_mem_buffer *rxb)
837{ 825{
838 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 826 struct iwl_rx_packet *pkt = rxb_addr(rxb);
839 struct iwl_alive_resp *palive; 827 struct iwl_alive_resp *palive;
840 struct delayed_work *pwork; 828 struct delayed_work *pwork;
841 829
@@ -872,7 +860,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
872 struct iwl_rx_mem_buffer *rxb) 860 struct iwl_rx_mem_buffer *rxb)
873{ 861{
874#ifdef CONFIG_IWLWIFI_DEBUG 862#ifdef CONFIG_IWLWIFI_DEBUG
875 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 863 struct iwl_rx_packet *pkt = rxb_addr(rxb);
876#endif 864#endif
877 865
878 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 866 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -908,7 +896,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
908 struct iwl_rx_mem_buffer *rxb) 896 struct iwl_rx_mem_buffer *rxb)
909{ 897{
910#ifdef CONFIG_IWLWIFI_DEBUG 898#ifdef CONFIG_IWLWIFI_DEBUG
911 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 899 struct iwl_rx_packet *pkt = rxb_addr(rxb);
912 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 900 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
913 u8 rate = beacon->beacon_notify_hdr.rate; 901 u8 rate = beacon->beacon_notify_hdr.rate;
914 902
@@ -931,7 +919,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
931static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, 919static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
932 struct iwl_rx_mem_buffer *rxb) 920 struct iwl_rx_mem_buffer *rxb)
933{ 921{
934 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 922 struct iwl_rx_packet *pkt = rxb_addr(rxb);
935 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 923 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
936 unsigned long status = priv->status; 924 unsigned long status = priv->status;
937 925
@@ -1095,7 +1083,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1095 list_del(element); 1083 list_del(element);
1096 1084
1097 /* Point to Rx buffer via next RBD in circular buffer */ 1085 /* Point to Rx buffer via next RBD in circular buffer */
1098 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr); 1086 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
1099 rxq->queue[rxq->write] = rxb; 1087 rxq->queue[rxq->write] = rxb;
1100 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1088 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
1101 rxq->free_count--; 1089 rxq->free_count--;
@@ -1135,8 +1123,9 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1135 struct iwl_rx_queue *rxq = &priv->rxq; 1123 struct iwl_rx_queue *rxq = &priv->rxq;
1136 struct list_head *element; 1124 struct list_head *element;
1137 struct iwl_rx_mem_buffer *rxb; 1125 struct iwl_rx_mem_buffer *rxb;
1138 struct sk_buff *skb; 1126 struct page *page;
1139 unsigned long flags; 1127 unsigned long flags;
1128 gfp_t gfp_mask = priority;
1140 1129
1141 while (1) { 1130 while (1) {
1142 spin_lock_irqsave(&rxq->lock, flags); 1131 spin_lock_irqsave(&rxq->lock, flags);
@@ -1148,10 +1137,14 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1148 spin_unlock_irqrestore(&rxq->lock, flags); 1137 spin_unlock_irqrestore(&rxq->lock, flags);
1149 1138
1150 if (rxq->free_count > RX_LOW_WATERMARK) 1139 if (rxq->free_count > RX_LOW_WATERMARK)
1151 priority |= __GFP_NOWARN; 1140 gfp_mask |= __GFP_NOWARN;
1141
1142 if (priv->hw_params.rx_page_order > 0)
1143 gfp_mask |= __GFP_COMP;
1144
1152 /* Alloc a new receive buffer */ 1145 /* Alloc a new receive buffer */
1153 skb = alloc_skb(priv->hw_params.rx_buf_size, priority); 1146 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1154 if (!skb) { 1147 if (!page) {
1155 if (net_ratelimit()) 1148 if (net_ratelimit())
1156 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 1149 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1157 if ((rxq->free_count <= RX_LOW_WATERMARK) && 1150 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1168,7 +1161,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1168 spin_lock_irqsave(&rxq->lock, flags); 1161 spin_lock_irqsave(&rxq->lock, flags);
1169 if (list_empty(&rxq->rx_used)) { 1162 if (list_empty(&rxq->rx_used)) {
1170 spin_unlock_irqrestore(&rxq->lock, flags); 1163 spin_unlock_irqrestore(&rxq->lock, flags);
1171 dev_kfree_skb_any(skb); 1164 __free_pages(page, priv->hw_params.rx_page_order);
1172 return; 1165 return;
1173 } 1166 }
1174 element = rxq->rx_used.next; 1167 element = rxq->rx_used.next;
@@ -1176,26 +1169,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1176 list_del(element); 1169 list_del(element);
1177 spin_unlock_irqrestore(&rxq->lock, flags); 1170 spin_unlock_irqrestore(&rxq->lock, flags);
1178 1171
1179 rxb->skb = skb; 1172 rxb->page = page;
1180
1181 /* If radiotap head is required, reserve some headroom here.
1182 * The physical head count is a variable rx_stats->phy_count.
1183 * We reserve 4 bytes here. Plus these extra bytes, the
1184 * headroom of the physical head should be enough for the
1185 * radiotap head that iwl3945 supported. See iwl3945_rt.
1186 */
1187 skb_reserve(rxb->skb, 4);
1188
1189 /* Get physical address of RB/SKB */ 1173 /* Get physical address of RB/SKB */
1190 rxb->real_dma_addr = pci_map_single(priv->pci_dev, 1174 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1191 rxb->skb->data, 1175 PAGE_SIZE << priv->hw_params.rx_page_order,
1192 priv->hw_params.rx_buf_size, 1176 PCI_DMA_FROMDEVICE);
1193 PCI_DMA_FROMDEVICE);
1194 1177
1195 spin_lock_irqsave(&rxq->lock, flags); 1178 spin_lock_irqsave(&rxq->lock, flags);
1179
1196 list_add_tail(&rxb->list, &rxq->rx_free); 1180 list_add_tail(&rxb->list, &rxq->rx_free);
1197 priv->alloc_rxb_skb++;
1198 rxq->free_count++; 1181 rxq->free_count++;
1182 priv->alloc_rxb_page++;
1183
1199 spin_unlock_irqrestore(&rxq->lock, flags); 1184 spin_unlock_irqrestore(&rxq->lock, flags);
1200 } 1185 }
1201} 1186}
@@ -1211,14 +1196,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1211 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1196 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1212 /* In the reset function, these buffers may have been allocated 1197 /* In the reset function, these buffers may have been allocated
1213 * to an SKB, so we need to unmap and free potential storage */ 1198 * to an SKB, so we need to unmap and free potential storage */
1214 if (rxq->pool[i].skb != NULL) { 1199 if (rxq->pool[i].page != NULL) {
1215 pci_unmap_single(priv->pci_dev, 1200 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1216 rxq->pool[i].real_dma_addr, 1201 PAGE_SIZE << priv->hw_params.rx_page_order,
1217 priv->hw_params.rx_buf_size, 1202 PCI_DMA_FROMDEVICE);
1218 PCI_DMA_FROMDEVICE); 1203 priv->alloc_rxb_page--;
1219 priv->alloc_rxb_skb--; 1204 __free_pages(rxq->pool[i].page,
1220 dev_kfree_skb(rxq->pool[i].skb); 1205 priv->hw_params.rx_page_order);
1221 rxq->pool[i].skb = NULL; 1206 rxq->pool[i].page = NULL;
1222 } 1207 }
1223 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1208 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1224 } 1209 }
@@ -1226,8 +1211,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1226 /* Set us so that we have processed and used all buffers, but have 1211 /* Set us so that we have processed and used all buffers, but have
1227 * not restocked the Rx queue with fresh buffers */ 1212 * not restocked the Rx queue with fresh buffers */
1228 rxq->read = rxq->write = 0; 1213 rxq->read = rxq->write = 0;
1229 rxq->free_count = 0;
1230 rxq->write_actual = 0; 1214 rxq->write_actual = 0;
1215 rxq->free_count = 0;
1231 spin_unlock_irqrestore(&rxq->lock, flags); 1216 spin_unlock_irqrestore(&rxq->lock, flags);
1232} 1217}
1233 1218
@@ -1260,12 +1245,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1260{ 1245{
1261 int i; 1246 int i;
1262 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1247 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1263 if (rxq->pool[i].skb != NULL) { 1248 if (rxq->pool[i].page != NULL) {
1264 pci_unmap_single(priv->pci_dev, 1249 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1265 rxq->pool[i].real_dma_addr, 1250 PAGE_SIZE << priv->hw_params.rx_page_order,
1266 priv->hw_params.rx_buf_size, 1251 PCI_DMA_FROMDEVICE);
1267 PCI_DMA_FROMDEVICE); 1252 __free_pages(rxq->pool[i].page,
1268 dev_kfree_skb(rxq->pool[i].skb); 1253 priv->hw_params.rx_page_order);
1254 rxq->pool[i].page = NULL;
1255 priv->alloc_rxb_page--;
1269 } 1256 }
1270 } 1257 }
1271 1258
@@ -1381,7 +1368,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1381 i = rxq->read; 1368 i = rxq->read;
1382 1369
1383 /* calculate total frames need to be restock after handling RX */ 1370 /* calculate total frames need to be restock after handling RX */
1384 total_empty = r - priv->rxq.write_actual; 1371 total_empty = r - rxq->write_actual;
1385 if (total_empty < 0) 1372 if (total_empty < 0)
1386 total_empty += RX_QUEUE_SIZE; 1373 total_empty += RX_QUEUE_SIZE;
1387 1374
@@ -1401,10 +1388,13 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1401 1388
1402 rxq->queue[i] = NULL; 1389 rxq->queue[i] = NULL;
1403 1390
1404 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 1391 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1405 priv->hw_params.rx_buf_size, 1392 PAGE_SIZE << priv->hw_params.rx_page_order,
1406 PCI_DMA_FROMDEVICE); 1393 PCI_DMA_FROMDEVICE);
1407 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1394 pkt = rxb_addr(rxb);
1395
1396 trace_iwlwifi_dev_rx(priv, pkt,
1397 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
1408 1398
1409 /* Reclaim a command buffer only if this packet is a response 1399 /* Reclaim a command buffer only if this packet is a response
1410 * to a (driver-originated) command. 1400 * to a (driver-originated) command.
@@ -1422,44 +1412,55 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1422 if (priv->rx_handlers[pkt->hdr.cmd]) { 1412 if (priv->rx_handlers[pkt->hdr.cmd]) {
1423 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1413 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1424 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1414 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1425 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1426 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1415 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1416 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1427 } else { 1417 } else {
1428 /* No handling needed */ 1418 /* No handling needed */
1429 IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", 1419 IWL_DEBUG_RX(priv,
1420 "r %d i %d No handler needed for %s, 0x%02x\n",
1430 r, i, get_cmd_string(pkt->hdr.cmd), 1421 r, i, get_cmd_string(pkt->hdr.cmd),
1431 pkt->hdr.cmd); 1422 pkt->hdr.cmd);
1432 } 1423 }
1433 1424
1425 /*
1426 * XXX: After here, we should always check rxb->page
1427 * against NULL before touching it or its virtual
1428 * memory (pkt). Because some rx_handler might have
1429 * already taken or freed the pages.
1430 */
1431
1434 if (reclaim) { 1432 if (reclaim) {
1435 /* Invoke any callbacks, transfer the skb to caller, and 1433 /* Invoke any callbacks, transfer the buffer to caller,
1436 * fire off the (possibly) blocking iwl_send_cmd() 1434 * and fire off the (possibly) blocking iwl_send_cmd()
1437 * as we reclaim the driver command queue */ 1435 * as we reclaim the driver command queue */
1438 if (rxb && rxb->skb) 1436 if (rxb->page)
1439 iwl_tx_cmd_complete(priv, rxb); 1437 iwl_tx_cmd_complete(priv, rxb);
1440 else 1438 else
1441 IWL_WARN(priv, "Claim null rxb?\n"); 1439 IWL_WARN(priv, "Claim null rxb?\n");
1442 } 1440 }
1443 1441
1444 /* For now we just don't re-use anything. We can tweak this 1442 /* Reuse the page if possible. For notification packets and
1445 * later to try and re-use notification packets and SKBs that 1443 * SKBs that fail to Rx correctly, add them back into the
1446 * fail to Rx correctly */ 1444 * rx_free list for reuse later. */
1447 if (rxb->skb != NULL) {
1448 priv->alloc_rxb_skb--;
1449 dev_kfree_skb_any(rxb->skb);
1450 rxb->skb = NULL;
1451 }
1452
1453 spin_lock_irqsave(&rxq->lock, flags); 1445 spin_lock_irqsave(&rxq->lock, flags);
1454 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1446 if (rxb->page != NULL) {
1447 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1448 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1449 PCI_DMA_FROMDEVICE);
1450 list_add_tail(&rxb->list, &rxq->rx_free);
1451 rxq->free_count++;
1452 } else
1453 list_add_tail(&rxb->list, &rxq->rx_used);
1454
1455 spin_unlock_irqrestore(&rxq->lock, flags); 1455 spin_unlock_irqrestore(&rxq->lock, flags);
1456
1456 i = (i + 1) & RX_QUEUE_MASK; 1457 i = (i + 1) & RX_QUEUE_MASK;
1457 /* If there are a lot of unused frames, 1458 /* If there are a lot of unused frames,
1458 * restock the Rx queue so ucode won't assert. */ 1459 * restock the Rx queue so ucode won't assert. */
1459 if (fill_rx) { 1460 if (fill_rx) {
1460 count++; 1461 count++;
1461 if (count >= 8) { 1462 if (count >= 8) {
1462 priv->rxq.read = i; 1463 rxq->read = i;
1463 iwl3945_rx_replenish_now(priv); 1464 iwl3945_rx_replenish_now(priv);
1464 count = 0; 1465 count = 0;
1465 } 1466 }
@@ -1467,7 +1468,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1467 } 1468 }
1468 1469
1469 /* Backtrack one entry */ 1470 /* Backtrack one entry */
1470 priv->rxq.read = i; 1471 rxq->read = i;
1471 if (fill_rx) 1472 if (fill_rx)
1472 iwl3945_rx_replenish_now(priv); 1473 iwl3945_rx_replenish_now(priv);
1473 else 1474 else
@@ -1482,7 +1483,6 @@ static inline void iwl_synchronize_irq(struct iwl_priv *priv)
1482 tasklet_kill(&priv->irq_tasklet); 1483 tasklet_kill(&priv->irq_tasklet);
1483} 1484}
1484 1485
1485#ifdef CONFIG_IWLWIFI_DEBUG
1486static const char *desc_lookup(int i) 1486static const char *desc_lookup(int i)
1487{ 1487{
1488 switch (i) { 1488 switch (i) {
@@ -1551,8 +1551,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1551 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1551 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1552 desc_lookup(desc), desc, time, blink1, blink2, 1552 desc_lookup(desc), desc, time, blink1, blink2,
1553 ilink1, ilink2, data1); 1553 ilink1, ilink2, data1);
1554 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
1555 0, blink1, blink2, ilink1, ilink2);
1554 } 1556 }
1555
1556} 1557}
1557 1558
1558#define EVENT_START_OFFSET (6 * sizeof(u32)) 1559#define EVENT_START_OFFSET (6 * sizeof(u32))
@@ -1569,6 +1570,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1569 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ 1570 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1570 u32 ptr; /* SRAM byte address of log data */ 1571 u32 ptr; /* SRAM byte address of log data */
1571 u32 ev, time, data; /* event log data */ 1572 u32 ev, time, data; /* event log data */
1573 unsigned long reg_flags;
1572 1574
1573 if (num_events == 0) 1575 if (num_events == 0)
1574 return; 1576 return;
@@ -1582,25 +1584,71 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1582 1584
1583 ptr = base + EVENT_START_OFFSET + (start_idx * event_size); 1585 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1584 1586
1587 /* Make sure device is powered up for SRAM reads */
1588 spin_lock_irqsave(&priv->reg_lock, reg_flags);
1589 iwl_grab_nic_access(priv);
1590
1591 /* Set starting address; reads will auto-increment */
1592 _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
1593 rmb();
1594
1585 /* "time" is actually "data" for mode 0 (no timestamp). 1595 /* "time" is actually "data" for mode 0 (no timestamp).
1586 * place event id # at far right for easier visual parsing. */ 1596 * place event id # at far right for easier visual parsing. */
1587 for (i = 0; i < num_events; i++) { 1597 for (i = 0; i < num_events; i++) {
1588 ev = iwl_read_targ_mem(priv, ptr); 1598 ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1589 ptr += sizeof(u32); 1599 time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1590 time = iwl_read_targ_mem(priv, ptr);
1591 ptr += sizeof(u32);
1592 if (mode == 0) { 1600 if (mode == 0) {
1593 /* data, ev */ 1601 /* data, ev */
1594 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1602 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1603 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
1595 } else { 1604 } else {
1596 data = iwl_read_targ_mem(priv, ptr); 1605 data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
1597 ptr += sizeof(u32);
1598 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); 1606 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
1607 trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
1599 } 1608 }
1600 } 1609 }
1610
1611 /* Allow device to power down */
1612 iwl_release_nic_access(priv);
1613 spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
1601} 1614}
1602 1615
1603void iwl3945_dump_nic_event_log(struct iwl_priv *priv) 1616/**
1617 * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
1618 */
1619static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1620 u32 num_wraps, u32 next_entry,
1621 u32 size, u32 mode)
1622{
1623 /*
1624 * display the newest DEFAULT_LOG_ENTRIES entries
1625 * i.e the entries just before the next ont that uCode would fill.
1626 */
1627 if (num_wraps) {
1628 if (next_entry < size) {
1629 iwl3945_print_event_log(priv,
1630 capacity - (size - next_entry),
1631 size - next_entry, mode);
1632 iwl3945_print_event_log(priv, 0,
1633 next_entry, mode);
1634 } else
1635 iwl3945_print_event_log(priv, next_entry - size,
1636 size, mode);
1637 } else {
1638 if (next_entry < size)
1639 iwl3945_print_event_log(priv, 0, next_entry, mode);
1640 else
1641 iwl3945_print_event_log(priv, next_entry - size,
1642 size, mode);
1643 }
1644}
1645
1646/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1647#define IWL3945_MAX_EVENT_LOG_SIZE (512)
1648
1649#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1650
1651void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
1604{ 1652{
1605 u32 base; /* SRAM byte address of event log header */ 1653 u32 base; /* SRAM byte address of event log header */
1606 u32 capacity; /* event log capacity in # entries */ 1654 u32 capacity; /* event log capacity in # entries */
@@ -1621,6 +1669,18 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1621 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1669 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1622 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1670 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1623 1671
1672 if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) {
1673 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1674 capacity, IWL3945_MAX_EVENT_LOG_SIZE);
1675 capacity = IWL3945_MAX_EVENT_LOG_SIZE;
1676 }
1677
1678 if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) {
1679 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1680 next_entry, IWL3945_MAX_EVENT_LOG_SIZE);
1681 next_entry = IWL3945_MAX_EVENT_LOG_SIZE;
1682 }
1683
1624 size = num_wraps ? capacity : next_entry; 1684 size = num_wraps ? capacity : next_entry;
1625 1685
1626 /* bail out if nothing in log */ 1686 /* bail out if nothing in log */
@@ -1629,8 +1689,17 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1629 return; 1689 return;
1630 } 1690 }
1631 1691
1632 IWL_ERR(priv, "Start IWL Event Log Dump: display count %d, wraps %d\n", 1692#ifdef CONFIG_IWLWIFI_DEBUG
1633 size, num_wraps); 1693 if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
1694 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1695 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1696#else
1697 size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
1698 ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
1699#endif
1700
1701 IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n",
1702 size);
1634 1703
1635 /* if uCode has wrapped back to top of log, start at the oldest entry, 1704 /* if uCode has wrapped back to top of log, start at the oldest entry,
1636 * i.e the next one that uCode would fill. */ 1705 * i.e the next one that uCode would fill. */
@@ -1641,18 +1710,28 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv)
1641 /* (then/else) start at top of log */ 1710 /* (then/else) start at top of log */
1642 iwl3945_print_event_log(priv, 0, next_entry, mode); 1711 iwl3945_print_event_log(priv, 0, next_entry, mode);
1643 1712
1644} 1713#ifdef CONFIG_IWLWIFI_DEBUG
1714 if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
1715 /* if uCode has wrapped back to top of log,
1716 * start at the oldest entry,
1717 * i.e the next one that uCode would fill.
1718 */
1719 if (num_wraps)
1720 iwl3945_print_event_log(priv, next_entry,
1721 capacity - next_entry, mode);
1722
1723 /* (then/else) start at top of log */
1724 iwl3945_print_event_log(priv, 0, next_entry, mode);
1725 } else
1726 iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1727 next_entry, size, mode);
1645#else 1728#else
1646void iwl3945_dump_nic_event_log(struct iwl_priv *priv) 1729 iwl3945_print_last_event_logs(priv, capacity, num_wraps,
1647{ 1730 next_entry, size, mode);
1648} 1731#endif
1649 1732
1650void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1651{
1652} 1733}
1653 1734
1654#endif
1655
1656static void iwl3945_irq_tasklet(struct iwl_priv *priv) 1735static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1657{ 1736{
1658 u32 inta, handled = 0; 1737 u32 inta, handled = 0;
@@ -1685,6 +1764,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1685 } 1764 }
1686#endif 1765#endif
1687 1766
1767 spin_unlock_irqrestore(&priv->lock, flags);
1768
1688 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1769 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1689 * atomic, make sure that inta covers all the interrupts that 1770 * atomic, make sure that inta covers all the interrupts that
1690 * we've discovered, even if FH interrupt came in just after 1771 * we've discovered, even if FH interrupt came in just after
@@ -1706,8 +1787,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1706 1787
1707 handled |= CSR_INT_BIT_HW_ERR; 1788 handled |= CSR_INT_BIT_HW_ERR;
1708 1789
1709 spin_unlock_irqrestore(&priv->lock, flags);
1710
1711 return; 1790 return;
1712 } 1791 }
1713 1792
@@ -1799,7 +1878,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1799 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1878 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1800 } 1879 }
1801#endif 1880#endif
1802 spin_unlock_irqrestore(&priv->lock, flags);
1803} 1881}
1804 1882
1805static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1883static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
@@ -2158,6 +2236,14 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2158 IWL_UCODE_API(priv->ucode_ver), 2236 IWL_UCODE_API(priv->ucode_ver),
2159 IWL_UCODE_SERIAL(priv->ucode_ver)); 2237 IWL_UCODE_SERIAL(priv->ucode_ver));
2160 2238
2239 snprintf(priv->hw->wiphy->fw_version,
2240 sizeof(priv->hw->wiphy->fw_version),
2241 "%u.%u.%u.%u",
2242 IWL_UCODE_MAJOR(priv->ucode_ver),
2243 IWL_UCODE_MINOR(priv->ucode_ver),
2244 IWL_UCODE_API(priv->ucode_ver),
2245 IWL_UCODE_SERIAL(priv->ucode_ver));
2246
2161 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", 2247 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
2162 priv->ucode_ver); 2248 priv->ucode_ver);
2163 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", 2249 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
@@ -2458,7 +2544,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2458 priv->active_rate = priv->rates_mask; 2544 priv->active_rate = priv->rates_mask;
2459 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 2545 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
2460 2546
2461 iwl_power_update_mode(priv, false); 2547 iwl_power_update_mode(priv, true);
2462 2548
2463 if (iwl_is_associated(priv)) { 2549 if (iwl_is_associated(priv)) {
2464 struct iwl3945_rxon_cmd *active_rxon = 2550 struct iwl3945_rxon_cmd *active_rxon =
@@ -2479,7 +2565,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2479 2565
2480 iwl3945_reg_txpower_periodic(priv); 2566 iwl3945_reg_txpower_periodic(priv);
2481 2567
2482 iwl3945_led_register(priv); 2568 iwl_leds_init(priv);
2483 2569
2484 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2570 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2485 set_bit(STATUS_READY, &priv->status); 2571 set_bit(STATUS_READY, &priv->status);
@@ -2517,7 +2603,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
2517 if (!exit_pending) 2603 if (!exit_pending)
2518 set_bit(STATUS_EXIT_PENDING, &priv->status); 2604 set_bit(STATUS_EXIT_PENDING, &priv->status);
2519 2605
2520 iwl3945_led_unregister(priv);
2521 iwl_clear_stations_table(priv); 2606 iwl_clear_stations_table(priv);
2522 2607
2523 /* Unblock any waiting calls */ 2608 /* Unblock any waiting calls */
@@ -2563,23 +2648,15 @@ static void __iwl3945_down(struct iwl_priv *priv)
2563 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2648 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2564 STATUS_EXIT_PENDING; 2649 STATUS_EXIT_PENDING;
2565 2650
2566 priv->cfg->ops->lib->apm_ops.reset(priv);
2567 spin_lock_irqsave(&priv->lock, flags);
2568 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2569 spin_unlock_irqrestore(&priv->lock, flags);
2570
2571 iwl3945_hw_txq_ctx_stop(priv); 2651 iwl3945_hw_txq_ctx_stop(priv);
2572 iwl3945_hw_rxq_stop(priv); 2652 iwl3945_hw_rxq_stop(priv);
2573 2653
2574 iwl_write_prph(priv, APMG_CLK_DIS_REG, 2654 /* Power-down device's busmaster DMA clocks */
2575 APMG_CLK_VAL_DMA_CLK_RQT); 2655 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
2576
2577 udelay(5); 2656 udelay(5);
2578 2657
2579 if (exit_pending) 2658 /* Stop the device, and put it in low power state */
2580 priv->cfg->ops->lib->apm_ops.stop(priv); 2659 priv->cfg->ops->lib->apm_ops.stop(priv);
2581 else
2582 priv->cfg->ops->lib->apm_ops.reset(priv);
2583 2660
2584 exit: 2661 exit:
2585 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2662 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2724,19 +2801,34 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2724 mutex_unlock(&priv->mutex); 2801 mutex_unlock(&priv->mutex);
2725} 2802}
2726 2803
2804/*
2805 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2806 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2807 * *is* readable even when device has been SW_RESET into low power mode
2808 * (e.g. during RF KILL).
2809 */
2727static void iwl3945_rfkill_poll(struct work_struct *data) 2810static void iwl3945_rfkill_poll(struct work_struct *data)
2728{ 2811{
2729 struct iwl_priv *priv = 2812 struct iwl_priv *priv =
2730 container_of(data, struct iwl_priv, rfkill_poll.work); 2813 container_of(data, struct iwl_priv, rfkill_poll.work);
2814 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2815 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2816 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2731 2817
2732 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2818 if (new_rfkill != old_rfkill) {
2733 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2819 if (new_rfkill)
2734 else 2820 set_bit(STATUS_RF_KILL_HW, &priv->status);
2735 set_bit(STATUS_RF_KILL_HW, &priv->status); 2821 else
2822 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2823
2824 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2736 2825
2737 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 2826 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2738 test_bit(STATUS_RF_KILL_HW, &priv->status)); 2827 new_rfkill ? "disable radio" : "enable radio");
2828 }
2739 2829
2830 /* Keep this running, even if radio now enabled. This will be
2831 * cancelled in mac_start() if system decides to start again */
2740 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2832 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
2741 round_jiffies_relative(2 * HZ)); 2833 round_jiffies_relative(2 * HZ));
2742 2834
@@ -3152,6 +3244,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3152 * no need to poll the killswitch state anymore */ 3244 * no need to poll the killswitch state anymore */
3153 cancel_delayed_work(&priv->rfkill_poll); 3245 cancel_delayed_work(&priv->rfkill_poll);
3154 3246
3247 iwl_led_start(priv);
3248
3155 priv->is_open = 1; 3249 priv->is_open = 1;
3156 IWL_DEBUG_MAC80211(priv, "leave\n"); 3250 IWL_DEBUG_MAC80211(priv, "leave\n");
3157 return 0; 3251 return 0;
@@ -3606,7 +3700,7 @@ static ssize_t show_statistics(struct device *d,
3606 return -EAGAIN; 3700 return -EAGAIN;
3607 3701
3608 mutex_lock(&priv->mutex); 3702 mutex_lock(&priv->mutex);
3609 rc = iwl_send_statistics_request(priv, 0); 3703 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
3610 mutex_unlock(&priv->mutex); 3704 mutex_unlock(&priv->mutex);
3611 3705
3612 if (rc) { 3706 if (rc) {
@@ -3795,7 +3889,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3795 /* Clear the driver's (not device's) station table */ 3889 /* Clear the driver's (not device's) station table */
3796 iwl_clear_stations_table(priv); 3890 iwl_clear_stations_table(priv);
3797 3891
3798 priv->data_retry_limit = -1;
3799 priv->ieee_channels = NULL; 3892 priv->ieee_channels = NULL;
3800 priv->ieee_rates = NULL; 3893 priv->ieee_rates = NULL;
3801 priv->band = IEEE80211_BAND_2GHZ; 3894 priv->band = IEEE80211_BAND_2GHZ;
@@ -3862,10 +3955,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3862 BIT(NL80211_IFTYPE_STATION) | 3955 BIT(NL80211_IFTYPE_STATION) |
3863 BIT(NL80211_IFTYPE_ADHOC); 3956 BIT(NL80211_IFTYPE_ADHOC);
3864 3957
3865 hw->wiphy->custom_regulatory = true; 3958 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
3866 3959 WIPHY_FLAG_DISABLE_BEACON_HINTS;
3867 /* Firmware does not support this */
3868 hw->wiphy->disable_beacon_hints = true;
3869 3960
3870 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3961 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3871 /* we create the 802.11 header and a zero-length SSID element */ 3962 /* we create the 802.11 header and a zero-length SSID element */
@@ -3982,13 +4073,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3982 */ 4073 */
3983 spin_lock_init(&priv->reg_lock); 4074 spin_lock_init(&priv->reg_lock);
3984 4075
3985 /* amp init */
3986 err = priv->cfg->ops->lib->apm_ops.init(priv);
3987 if (err < 0) {
3988 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
3989 goto out_iounmap;
3990 }
3991
3992 /*********************** 4076 /***********************
3993 * 4. Read EEPROM 4077 * 4. Read EEPROM
3994 * ********************/ 4078 * ********************/
@@ -4054,6 +4138,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4054 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4138 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
4055 iwl3945_setup_deferred_work(priv); 4139 iwl3945_setup_deferred_work(priv);
4056 iwl3945_setup_rx_handlers(priv); 4140 iwl3945_setup_rx_handlers(priv);
4141 iwl_power_initialize(priv);
4057 4142
4058 /********************************* 4143 /*********************************
4059 * 8. Setup and Register mac80211 4144 * 8. Setup and Register mac80211
@@ -4124,6 +4209,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4124 iwl3945_down(priv); 4209 iwl3945_down(priv);
4125 } 4210 }
4126 4211
4212 /*
4213 * Make sure device is reset to low power before unloading driver.
4214 * This may be redundant with iwl_down(), but there are paths to
4215 * run iwl_down() without calling apm_ops.stop(), and there are
4216 * paths to avoid running iwl_down() at all before leaving driver.
4217 * This (inexpensive) call *makes sure* device is reset.
4218 */
4219 priv->cfg->ops->lib->apm_ops.stop(priv);
4220
4127 /* make sure we flush any pending irq or 4221 /* make sure we flush any pending irq or
4128 * tasklet for the driver 4222 * tasklet for the driver
4129 */ 4223 */
@@ -4226,18 +4320,19 @@ static void __exit iwl3945_exit(void)
4226 4320
4227MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); 4321MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
4228 4322
4229module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444); 4323module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4230MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4324MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4231module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444); 4325module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4232MODULE_PARM_DESC(swcrypto, 4326MODULE_PARM_DESC(swcrypto,
4233 "using software crypto (default 1 [software])\n"); 4327 "using software crypto (default 1 [software])\n");
4234#ifdef CONFIG_IWLWIFI_DEBUG 4328#ifdef CONFIG_IWLWIFI_DEBUG
4235module_param_named(debug, iwl_debug_level, uint, 0644); 4329module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4236MODULE_PARM_DESC(debug, "debug output mask"); 4330MODULE_PARM_DESC(debug, "debug output mask");
4237#endif 4331#endif
4238module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444); 4332module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4333 int, S_IRUGO);
4239MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4334MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4240module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, 0444); 4335module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4241MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4336MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
4242 4337
4243module_exit(iwl3945_exit); 4338module_exit(iwl3945_exit);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index c25a04371ca8..b9d34a766964 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,8 +1,9 @@
1config IWM 1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver" 2 tristate "Intel Wireless Multicomm 3200 WiFi driver"
3 depends on MMC && WLAN_80211 && EXPERIMENTAL 3 depends on MMC && EXPERIMENTAL
4 depends on CFG80211 4 depends on CFG80211
5 select FW_LOADER 5 select FW_LOADER
6 select IWMC3200TOP
6 help 7 help
7 The Intel Wireless Multicomm 3200 hardware is a combo 8 The Intel Wireless Multicomm 3200 hardware is a combo
8 card with GPS, Bluetooth, WiMax and 802.11 radios. It 9 card with GPS, Bluetooth, WiMax and 802.11 radios. It
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index f3c55658225b..2e00a4b389e6 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -405,39 +405,21 @@ static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
405{ 405{
406 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 406 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
407 struct ieee80211_channel *chan = params->channel; 407 struct ieee80211_channel *chan = params->channel;
408 struct cfg80211_bss *bss;
409 408
410 if (!test_bit(IWM_STATUS_READY, &iwm->status)) 409 if (!test_bit(IWM_STATUS_READY, &iwm->status))
411 return -EIO; 410 return -EIO;
412 411
413 /* UMAC doesn't support creating IBSS network with specified bssid. 412 /* UMAC doesn't support creating or joining an IBSS network
414 * This should be removed after we have join only mode supported. */ 413 * with specified bssid. */
415 if (params->bssid) 414 if (params->bssid)
416 return -EOPNOTSUPP; 415 return -EOPNOTSUPP;
417 416
418 bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
419 params->ssid, params->ssid_len);
420 if (!bss) {
421 iwm_scan_one_ssid(iwm, params->ssid, params->ssid_len);
422 schedule_timeout_interruptible(2 * HZ);
423 bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
424 params->ssid, params->ssid_len);
425 }
426 /* IBSS join only mode is not supported by UMAC ATM */
427 if (bss) {
428 cfg80211_put_bss(bss);
429 return -EOPNOTSUPP;
430 }
431
432 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq); 417 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
433 iwm->umac_profile->ibss.band = chan->band; 418 iwm->umac_profile->ibss.band = chan->band;
434 iwm->umac_profile->ibss.channel = iwm->channel; 419 iwm->umac_profile->ibss.channel = iwm->channel;
435 iwm->umac_profile->ssid.ssid_len = params->ssid_len; 420 iwm->umac_profile->ssid.ssid_len = params->ssid_len;
436 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len); 421 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
437 422
438 if (params->bssid)
439 memcpy(&iwm->umac_profile->bssid[0], params->bssid, ETH_ALEN);
440
441 return iwm_send_mlme_profile(iwm); 423 return iwm_send_mlme_profile(iwm);
442} 424}
443 425
@@ -490,12 +472,12 @@ static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
490 return 0; 472 return 0;
491 } 473 }
492 474
475 if (wpa_version & NL80211_WPA_VERSION_1)
476 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
477
493 if (wpa_version & NL80211_WPA_VERSION_2) 478 if (wpa_version & NL80211_WPA_VERSION_2)
494 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK; 479 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
495 480
496 if (wpa_version & NL80211_WPA_VERSION_1)
497 iwm->umac_profile->sec.flags |= UMAC_SEC_FLG_WPA_ON_MSK;
498
499 return 0; 481 return 0;
500} 482}
501 483
@@ -646,6 +628,13 @@ static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
646 iwm->default_key = sme->key_idx; 628 iwm->default_key = sme->key_idx;
647 } 629 }
648 630
631 /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
632 if ((iwm->umac_profile->sec.flags &
633 (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
634 iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
635 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
636 }
637
649 ret = iwm_send_mlme_profile(iwm); 638 ret = iwm_send_mlme_profile(iwm);
650 639
651 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK || 640 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
@@ -682,9 +671,19 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
682static int iwm_cfg80211_set_txpower(struct wiphy *wiphy, 671static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
683 enum tx_power_setting type, int dbm) 672 enum tx_power_setting type, int dbm)
684{ 673{
674 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
675 int ret;
676
685 switch (type) { 677 switch (type) {
686 case TX_POWER_AUTOMATIC: 678 case TX_POWER_AUTOMATIC:
687 return 0; 679 return 0;
680 case TX_POWER_FIXED:
681 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
682 CFG_TX_PWR_LIMIT_USR, dbm * 2);
683 if (ret < 0)
684 return ret;
685
686 return iwm_tx_power_trigger(iwm);
688 default: 687 default:
689 return -EOPNOTSUPP; 688 return -EOPNOTSUPP;
690 } 689 }
@@ -696,7 +695,7 @@ static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
696{ 695{
697 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 696 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
698 697
699 *dbm = iwm->txpower; 698 *dbm = iwm->txpower >> 1;
700 699
701 return 0; 700 return 0;
702} 701}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 84158b6d35d8..7e12438551ba 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -77,6 +77,11 @@ int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
77 int ret; 77 int ret;
78 u8 oid = hdr->oid; 78 u8 oid = hdr->oid;
79 79
80 if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
81 IWM_ERR(iwm, "Interface is not ready yet");
82 return -EAGAIN;
83 }
84
80 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER; 85 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
81 umac_cmd.resp = resp; 86 umac_cmd.resp = resp;
82 87
@@ -275,6 +280,17 @@ int iwm_send_calib_results(struct iwm_priv *iwm)
275 return ret; 280 return ret;
276} 281}
277 282
283int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
284{
285 struct iwm_ct_kill_cfg_cmd cmd;
286
287 cmd.entry_threshold = entry;
288 cmd.exit_threshold = exit;
289
290 return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
291 sizeof(struct iwm_ct_kill_cfg_cmd), 0);
292}
293
278int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp) 294int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
279{ 295{
280 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; 296 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
@@ -778,11 +794,24 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
778 return ret; 794 return ret;
779 795
780 ret = wait_event_interruptible_timeout(iwm->mlme_queue, 796 ret = wait_event_interruptible_timeout(iwm->mlme_queue,
781 (iwm->umac_profile_active == 0), 2 * HZ); 797 (iwm->umac_profile_active == 0), 5 * HZ);
782 798
783 return ret ? 0 : -EBUSY; 799 return ret ? 0 : -EBUSY;
784} 800}
785 801
802int iwm_tx_power_trigger(struct iwm_priv *iwm)
803{
804 struct iwm_umac_pwr_trigger pwr_trigger;
805
806 pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
807 pwr_trigger.hdr.buf_size =
808 cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
809 sizeof(struct iwm_umac_wifi_if));
810
811
812 return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
813}
814
786int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags) 815int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
787{ 816{
788 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; 817 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index e24d5b633997..b36be2b23a3c 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -102,7 +102,6 @@ enum {
102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN, 102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
103 CFG_TLC_SUPPORTED_TX_HT_RATES, 103 CFG_TLC_SUPPORTED_TX_HT_RATES,
104 CFG_TLC_SUPPORTED_TX_RATES, 104 CFG_TLC_SUPPORTED_TX_RATES,
105 CFG_TLC_VALID_ANTENNA,
106 CFG_TLC_SPATIAL_STREAM_SUPPORTED, 105 CFG_TLC_SPATIAL_STREAM_SUPPORTED,
107 CFG_TLC_RETRY_PER_RATE, 106 CFG_TLC_RETRY_PER_RATE,
108 CFG_TLC_RETRY_PER_HT_RATE, 107 CFG_TLC_RETRY_PER_HT_RATE,
@@ -136,6 +135,10 @@ enum {
136 CFG_TLC_RENEW_ADDBA_DELAY, 135 CFG_TLC_RENEW_ADDBA_DELAY,
137 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD, 136 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
138 CFG_TLC_IS_STABLE_IN_HT, 137 CFG_TLC_IS_STABLE_IN_HT,
138 CFG_TLC_SR_SIC_1ST_FAIL,
139 CFG_TLC_SR_SIC_1ST_PASS,
140 CFG_TLC_SR_SIC_TOTAL_FAIL,
141 CFG_TLC_SR_SIC_TOTAL_PASS,
139 CFG_RLC_CHAIN_CTRL, 142 CFG_RLC_CHAIN_CTRL,
140 CFG_TRK_TABLE_OP_MODE, 143 CFG_TRK_TABLE_OP_MODE,
141 CFG_TRK_TABLE_RSSI_THRESHOLD, 144 CFG_TRK_TABLE_RSSI_THRESHOLD,
@@ -147,6 +150,58 @@ enum {
147 CFG_MLME_DBG_NOTIF_BLOCK, 150 CFG_MLME_DBG_NOTIF_BLOCK,
148 CFG_BT_OFF_BECONS_INTERVALS, 151 CFG_BT_OFF_BECONS_INTERVALS,
149 CFG_BT_FRAG_DURATION, 152 CFG_BT_FRAG_DURATION,
153 CFG_ACTIVE_CHAINS,
154 CFG_CALIB_CTRL,
155 CFG_CAPABILITY_SUPPORTED_HT_RATES,
156 CFG_HT_MAC_PARAM_INFO,
157 CFG_MIMO_PS_MODE,
158 CFG_HT_DEFAULT_CAPABILIES_INFO,
159 CFG_LED_SC_RESOLUTION_FACTOR,
160 CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
161 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
162 CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
163 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
164 CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
165 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
166 CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
167 CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
168 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
169 CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
170 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
171 CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
172 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
173 CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
174 CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
175 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
176 CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
177 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
178 CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
179 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
180 CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
181 CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
182 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
183 CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
184 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
185 CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
186 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
187 CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
188 CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
189 CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
190 CFG_PTAM_LINK_SENS_FA_CCK_MAX,
191 CFG_PTAM_LINK_SENS_FA_CCK_MIN,
192 CFG_PTAM_LINK_SENS_NRG_DIFF,
193 CFG_PTAM_LINK_SENS_NRG_MARGIN,
194 CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
195 CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
196 CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
197 CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
198 CFG_AGG_MGG_ADDBA_BUF_SIZE,
199 CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
200 CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
201 CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
202 CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
203 CFG_11D_ENABLED,
204 CFG_11H_FEATURE_FLAGS,
150 205
151 /* <-- LAST --> */ 206 /* <-- LAST --> */
152 CFG_TBL_FIX_LAST 207 CFG_TBL_FIX_LAST
@@ -155,7 +210,8 @@ enum {
155/* variable size table */ 210/* variable size table */
156enum { 211enum {
157 CFG_NET_ADDR = 0, 212 CFG_NET_ADDR = 0,
158 CFG_PROFILE, 213 CFG_LED_PATTERN_TABLE,
214
159 /* <-- LAST --> */ 215 /* <-- LAST --> */
160 CFG_TBL_VAR_LAST 216 CFG_TBL_VAR_LAST
161}; 217};
@@ -288,6 +344,9 @@ struct iwm_umac_cmd_scan_request {
288/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */ 344/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
289#define UMAC_SEC_FLG_WSC_ON_POS 2 345#define UMAC_SEC_FLG_WSC_ON_POS 2
290#define UMAC_SEC_FLG_WSC_ON_SEED 1 346#define UMAC_SEC_FLG_WSC_ON_SEED 1
347#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
348 UMAC_SEC_FLG_WSC_ON_POS)
349
291 350
292/* Legacy profile can use only WEP40 and WEP104 for encryption and 351/* Legacy profile can use only WEP40 and WEP104 for encryption and
293 * OPEN or PSK for authentication */ 352 * OPEN or PSK for authentication */
@@ -382,6 +441,11 @@ struct iwm_umac_tx_key_id {
382 u8 reserved[3]; 441 u8 reserved[3];
383} __attribute__ ((packed)); 442} __attribute__ ((packed));
384 443
444struct iwm_umac_pwr_trigger {
445 struct iwm_umac_wifi_if hdr;
446 __le32 reseved;
447} __attribute__ ((packed));
448
385struct iwm_umac_cmd_stats_req { 449struct iwm_umac_cmd_stats_req {
386 __le32 flags; 450 __le32 flags;
387} __attribute__ ((packed)); 451} __attribute__ ((packed));
@@ -393,6 +457,7 @@ int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
393int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested); 457int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
394int iwm_send_calib_results(struct iwm_priv *iwm); 458int iwm_send_calib_results(struct iwm_priv *iwm);
395int iwm_store_rxiq_calib_result(struct iwm_priv *iwm); 459int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
460int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
396 461
397/* UMAC commands */ 462/* UMAC commands */
398int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size, 463int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
@@ -407,6 +472,7 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
407int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id); 472int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
408int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx); 473int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
409int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key); 474int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
475int iwm_tx_power_trigger(struct iwm_priv *iwm);
410int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags); 476int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
411int iwm_send_umac_channel_list(struct iwm_priv *iwm); 477int iwm_send_umac_channel_list(struct iwm_priv *iwm);
412int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids, 478int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 6b0bcad758ca..49067092d336 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -217,6 +217,13 @@ static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date), 217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
218 IWM_BUILD_DAY(build_date)); 218 IWM_BUILD_DAY(build_date));
219 219
220 if (!strcmp(img_name, iwm->bus_ops->umac_name))
221 sprintf(iwm->umac_version, "%02X.%02X",
222 ver->major, ver->minor);
223
224 if (!strcmp(img_name, iwm->bus_ops->lmac_name))
225 sprintf(iwm->lmac_version, "%02X.%02X",
226 ver->major, ver->minor);
220 227
221 err_release_fw: 228 err_release_fw:
222 release_firmware(fw); 229 release_firmware(fw);
@@ -398,6 +405,8 @@ int iwm_load_fw(struct iwm_priv *iwm)
398 iwm_send_prio_table(iwm); 405 iwm_send_prio_table(iwm);
399 iwm_send_calib_results(iwm); 406 iwm_send_calib_results(iwm);
400 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map); 407 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
408 iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
409 iwm->conf.ct_kill_exit);
401 410
402 return 0; 411 return 0;
403 412
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 1b02a4e2a1ac..a9bf6bc97bea 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -65,6 +65,8 @@ struct iwm_conf {
65 u32 sdio_ior_timeout; 65 u32 sdio_ior_timeout;
66 unsigned long calib_map; 66 unsigned long calib_map;
67 unsigned long expected_calib_map; 67 unsigned long expected_calib_map;
68 u8 ct_kill_entry;
69 u8 ct_kill_exit;
68 bool reset_on_fatal_err; 70 bool reset_on_fatal_err;
69 bool auto_connect; 71 bool auto_connect;
70 bool wimax_not_present; 72 bool wimax_not_present;
@@ -276,12 +278,14 @@ struct iwm_priv {
276 struct iw_statistics wstats; 278 struct iw_statistics wstats;
277 struct delayed_work stats_request; 279 struct delayed_work stats_request;
278 struct delayed_work disconnect; 280 struct delayed_work disconnect;
281 struct delayed_work ct_kill_delay;
279 282
280 struct iwm_debugfs dbg; 283 struct iwm_debugfs dbg;
281 284
282 u8 *eeprom; 285 u8 *eeprom;
283 struct timer_list watchdog; 286 struct timer_list watchdog;
284 struct work_struct reset_worker; 287 struct work_struct reset_worker;
288 struct work_struct auth_retry_worker;
285 struct mutex mutex; 289 struct mutex mutex;
286 290
287 u8 *req_ie; 291 u8 *req_ie;
@@ -290,6 +294,8 @@ struct iwm_priv {
290 int resp_ie_len; 294 int resp_ie_len;
291 295
292 struct iwm_fw_error_hdr *last_fw_err; 296 struct iwm_fw_error_hdr *last_fw_err;
297 char umac_version[8];
298 char lmac_version[8];
293 299
294 char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); 300 char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
295}; 301};
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index 6c1a14c4480f..a3a79b5e2898 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -187,6 +187,14 @@ struct iwm_coex_prio_table_cmd {
187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \ 187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK) 188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
189 189
190/* CT kill config command */
191struct iwm_ct_kill_cfg_cmd {
192 u32 exit_threshold;
193 u32 reserved;
194 u32 entry_threshold;
195} __attribute__ ((packed));
196
197
190/* LMAC OP CODES */ 198/* LMAC OP CODES */
191#define REPLY_PAD 0x0 199#define REPLY_PAD 0x0
192#define REPLY_ALIVE 0x1 200#define REPLY_ALIVE 0x1
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 222eb2cf1b30..75f105a59543 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -64,6 +64,8 @@ static struct iwm_conf def_iwm_conf = {
64 BIT(PHY_CALIBRATE_TX_IQ_CMD) | 64 BIT(PHY_CALIBRATE_TX_IQ_CMD) |
65 BIT(PHY_CALIBRATE_RX_IQ_CMD) | 65 BIT(PHY_CALIBRATE_RX_IQ_CMD) |
66 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), 66 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
67 .ct_kill_entry = 110,
68 .ct_kill_exit = 110,
67 .reset_on_fatal_err = 1, 69 .reset_on_fatal_err = 1,
68 .auto_connect = 1, 70 .auto_connect = 1,
69 .wimax_not_present = 0, 71 .wimax_not_present = 0,
@@ -134,6 +136,17 @@ static void iwm_disconnect_work(struct work_struct *work)
134 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL); 136 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
135} 137}
136 138
139static void iwm_ct_kill_work(struct work_struct *work)
140{
141 struct iwm_priv *iwm =
142 container_of(work, struct iwm_priv, ct_kill_delay.work);
143 struct wiphy *wiphy = iwm_to_wiphy(iwm);
144
145 IWM_INFO(iwm, "CT kill delay timeout\n");
146
147 wiphy_rfkill_set_hw_state(wiphy, false);
148}
149
137static int __iwm_up(struct iwm_priv *iwm); 150static int __iwm_up(struct iwm_priv *iwm);
138static int __iwm_down(struct iwm_priv *iwm); 151static int __iwm_down(struct iwm_priv *iwm);
139 152
@@ -195,6 +208,33 @@ static void iwm_reset_worker(struct work_struct *work)
195 mutex_unlock(&iwm->mutex); 208 mutex_unlock(&iwm->mutex);
196} 209}
197 210
211static void iwm_auth_retry_worker(struct work_struct *work)
212{
213 struct iwm_priv *iwm;
214 int i, ret;
215
216 iwm = container_of(work, struct iwm_priv, auth_retry_worker);
217 if (iwm->umac_profile_active) {
218 ret = iwm_invalidate_mlme_profile(iwm);
219 if (ret < 0)
220 return;
221 }
222
223 iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
224
225 ret = iwm_send_mlme_profile(iwm);
226 if (ret < 0)
227 return;
228
229 for (i = 0; i < IWM_NUM_KEYS; i++)
230 if (iwm->keys[i].key_len)
231 iwm_set_key(iwm, 0, &iwm->keys[i]);
232
233 iwm_set_tx_key(iwm, iwm->default_key);
234}
235
236
237
198static void iwm_watchdog(unsigned long data) 238static void iwm_watchdog(unsigned long data)
199{ 239{
200 struct iwm_priv *iwm = (struct iwm_priv *)data; 240 struct iwm_priv *iwm = (struct iwm_priv *)data;
@@ -226,7 +266,9 @@ int iwm_priv_init(struct iwm_priv *iwm)
226 iwm->scan_id = 1; 266 iwm->scan_id = 1;
227 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request); 267 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
228 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work); 268 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
269 INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
229 INIT_WORK(&iwm->reset_worker, iwm_reset_worker); 270 INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
271 INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
230 INIT_LIST_HEAD(&iwm->bss_list); 272 INIT_LIST_HEAD(&iwm->bss_list);
231 273
232 skb_queue_head_init(&iwm->rx_list); 274 skb_queue_head_init(&iwm->rx_list);
@@ -587,6 +629,7 @@ static int __iwm_up(struct iwm_priv *iwm)
587{ 629{
588 int ret; 630 int ret;
589 struct iwm_notif *notif_reboot, *notif_ack = NULL; 631 struct iwm_notif *notif_reboot, *notif_ack = NULL;
632 struct wiphy *wiphy = iwm_to_wiphy(iwm);
590 633
591 ret = iwm_bus_enable(iwm); 634 ret = iwm_bus_enable(iwm);
592 if (ret) { 635 if (ret) {
@@ -638,6 +681,8 @@ static int __iwm_up(struct iwm_priv *iwm)
638 IWM_ERR(iwm, "MAC reading failed\n"); 681 IWM_ERR(iwm, "MAC reading failed\n");
639 goto err_disable; 682 goto err_disable;
640 } 683 }
684 memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr,
685 ETH_ALEN);
641 686
642 /* We can load the FWs */ 687 /* We can load the FWs */
643 ret = iwm_load_fw(iwm); 688 ret = iwm_load_fw(iwm);
@@ -646,6 +691,9 @@ static int __iwm_up(struct iwm_priv *iwm)
646 goto err_disable; 691 goto err_disable;
647 } 692 }
648 693
694 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
695 iwm->lmac_version, iwm->umac_version);
696
649 /* We configure the UMAC and enable the wifi module */ 697 /* We configure the UMAC and enable the wifi module */
650 ret = iwm_send_umac_config(iwm, 698 ret = iwm_send_umac_config(iwm,
651 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) | 699 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 35ec006c2d2c..4f8dbdd7b917 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -152,6 +152,7 @@ void iwm_if_free(struct iwm_priv *iwm)
152 if (!iwm_to_ndev(iwm)) 152 if (!iwm_to_ndev(iwm))
153 return; 153 return;
154 154
155 cancel_delayed_work_sync(&iwm->ct_kill_delay);
155 free_netdev(iwm_to_ndev(iwm)); 156 free_netdev(iwm_to_ndev(iwm));
156 iwm_priv_deinit(iwm); 157 iwm_priv_deinit(iwm);
157 kfree(iwm->umac_profile); 158 kfree(iwm->umac_profile);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 771a301003c9..bdb1d7e7979d 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -423,7 +423,9 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
423 if (IS_ERR(ticket_node)) 423 if (IS_ERR(ticket_node))
424 return PTR_ERR(ticket_node); 424 return PTR_ERR(ticket_node);
425 425
426 IWM_DBG_RX(iwm, DBG, "TICKET RELEASE(%d)\n", 426 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
427 ticket->action == IWM_RX_TICKET_RELEASE ?
428 "RELEASE" : "DROP",
427 ticket->id); 429 ticket->id);
428 list_add_tail(&ticket_node->node, &iwm->rx_tickets); 430 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
429 431
@@ -500,6 +502,18 @@ static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
500 return 0; 502 return 0;
501} 503}
502 504
505static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
506{
507 if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
508 iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
509 (iwm->umac_profile->sec.ucast_cipher ==
510 iwm->umac_profile->sec.mcast_cipher) &&
511 (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
512 return 1;
513
514 return 0;
515}
516
503static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, 517static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
504 unsigned long buf_size, 518 unsigned long buf_size,
505 struct iwm_wifi_cmd *cmd) 519 struct iwm_wifi_cmd *cmd)
@@ -565,11 +579,17 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
565 goto ibss; 579 goto ibss;
566 580
567 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) 581 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
568 cfg80211_connect_result(iwm_to_ndev(iwm), 582 if (!iwm_is_open_wep_profile(iwm)) {
569 complete->bssid, 583 cfg80211_connect_result(iwm_to_ndev(iwm),
570 NULL, 0, NULL, 0, 584 complete->bssid,
571 WLAN_STATUS_UNSPECIFIED_FAILURE, 585 NULL, 0, NULL, 0,
572 GFP_KERNEL); 586 WLAN_STATUS_UNSPECIFIED_FAILURE,
587 GFP_KERNEL);
588 } else {
589 /* Let's try shared WEP auth */
590 IWM_ERR(iwm, "Trying WEP shared auth\n");
591 schedule_work(&iwm->auth_retry_worker);
592 }
573 else 593 else
574 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, 594 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
575 GFP_KERNEL); 595 GFP_KERNEL);
@@ -713,6 +733,19 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
713 return 0; 733 return 0;
714} 734}
715 735
736static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
737 unsigned long buf_size,
738 struct iwm_wifi_cmd *cmd)
739{
740 struct wiphy *wiphy = iwm_to_wiphy(iwm);
741
742 IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
743
744 wiphy_rfkill_set_hw_state(wiphy, true);
745
746 return 0;
747}
748
716static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, 749static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
717 unsigned long buf_size, 750 unsigned long buf_size,
718 struct iwm_wifi_cmd *cmd) 751 struct iwm_wifi_cmd *cmd)
@@ -899,6 +932,8 @@ static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
899 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED: 932 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
900 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n"); 933 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
901 break; 934 break;
935 case WIFI_IF_NTFY_RADIO_PREEMPTION:
936 return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
902 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED: 937 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
903 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd); 938 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
904 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED: 939 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
@@ -1056,8 +1091,14 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1056 unsigned long buf_size, 1091 unsigned long buf_size,
1057 struct iwm_wifi_cmd *cmd) 1092 struct iwm_wifi_cmd *cmd)
1058{ 1093{
1059 struct iwm_umac_wifi_if *hdr = 1094 struct iwm_umac_wifi_if *hdr;
1060 (struct iwm_umac_wifi_if *)cmd->buf.payload; 1095
1096 if (cmd == NULL) {
1097 IWM_ERR(iwm, "Couldn't find expected wifi command\n");
1098 return -EINVAL;
1099 }
1100
1101 hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
1061 1102
1062 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " 1103 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1063 "oid is 0x%x\n", hdr->oid); 1104 "oid is 0x%x\n", hdr->oid);
@@ -1079,6 +1120,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1079 return 0; 1120 return 0;
1080} 1121}
1081 1122
1123#define CT_KILL_DELAY (30 * HZ)
1082static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf, 1124static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1083 unsigned long buf_size, struct iwm_wifi_cmd *cmd) 1125 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
1084{ 1126{
@@ -1091,7 +1133,20 @@ static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1091 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF", 1133 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
1092 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF"); 1134 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
1093 1135
1094 wiphy_rfkill_set_hw_state(wiphy, flags & IWM_CARD_STATE_HW_DISABLED); 1136 if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
1137 /*
1138 * We got a CTKILL event: We bring the interface down in
1139 * oder to cool the device down, and try to bring it up
1140 * 30 seconds later. If it's still too hot, we'll go through
1141 * this code path again.
1142 */
1143 cancel_delayed_work_sync(&iwm->ct_kill_delay);
1144 schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
1145 }
1146
1147 wiphy_rfkill_set_hw_state(wiphy, flags &
1148 (IWM_CARD_STATE_HW_DISABLED |
1149 IWM_CARD_STATE_CTKILL_DISABLED));
1095 1150
1096 return 0; 1151 return 0;
1097} 1152}
@@ -1282,6 +1337,14 @@ int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
1282 1337
1283 switch (le32_to_cpu(hdr->cmd)) { 1338 switch (le32_to_cpu(hdr->cmd)) {
1284 case UMAC_REBOOT_BARKER: 1339 case UMAC_REBOOT_BARKER:
1340 if (test_bit(IWM_STATUS_READY, &iwm->status)) {
1341 IWM_ERR(iwm, "Unexpected BARKER\n");
1342
1343 schedule_work(&iwm->reset_worker);
1344
1345 return 0;
1346 }
1347
1285 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION, 1348 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
1286 IWM_SRC_UDMA, buf, buf_size); 1349 IWM_SRC_UDMA, buf, buf_size);
1287 case UMAC_ACK_BARKER: 1350 case UMAC_ACK_BARKER:
@@ -1444,7 +1507,8 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
1444 } 1507 }
1445 break; 1508 break;
1446 case IWM_RX_TICKET_DROP: 1509 case IWM_RX_TICKET_DROP:
1447 IWM_DBG_RX(iwm, DBG, "DROP packet\n"); 1510 IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
1511 le16_to_cpu(ticket_node->ticket->flags));
1448 kfree_skb(packet->skb); 1512 kfree_skb(packet->skb);
1449 break; 1513 break;
1450 default: 1514 default:
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 8b1de84003ca..a7ec7eac9137 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -224,8 +224,6 @@ static int if_sdio_disable(struct iwm_priv *iwm)
224 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); 224 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
225 int ret; 225 int ret;
226 226
227 iwm_reset(iwm);
228
229 sdio_claim_host(hw->func); 227 sdio_claim_host(hw->func);
230 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret); 228 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
231 if (ret < 0) 229 if (ret < 0)
@@ -237,6 +235,8 @@ static int if_sdio_disable(struct iwm_priv *iwm)
237 235
238 iwm_sdio_rx_free(hw); 236 iwm_sdio_rx_free(hw);
239 237
238 iwm_reset(iwm);
239
240 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n"); 240 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
241 241
242 return 0; 242 return 0;
@@ -399,6 +399,9 @@ static struct iwm_if_ops if_sdio_ops = {
399 .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin", 399 .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
400 .lmac_name = "iwmc3200wifi-lmac-sdio.bin", 400 .lmac_name = "iwmc3200wifi-lmac-sdio.bin",
401}; 401};
402MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin");
403MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin");
404MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin");
402 405
403static int iwm_sdio_probe(struct sdio_func *func, 406static int iwm_sdio_probe(struct sdio_func *func,
404 const struct sdio_device_id *id) 407 const struct sdio_device_id *id)
@@ -493,8 +496,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
493} 496}
494 497
495static const struct sdio_device_id iwm_sdio_ids[] = { 498static const struct sdio_device_id iwm_sdio_ids[] = {
496 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 499 /* Global/AGN SKU */
497 SDIO_DEVICE_ID_INTEL_IWMC3200WIFI) }, 500 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
501 /* BGN SKU */
502 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
498 { /* end: all zeroes */ }, 503 { /* end: all zeroes */ },
499}; 504};
500MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids); 505MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index c5a14ae3160a..be903543bb47 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -687,6 +687,9 @@ struct iwm_umac_notif_rx_ticket {
687/* Tx/Rx rates window (number of max of last update window per second) */ 687/* Tx/Rx rates window (number of max of last update window per second) */
688#define UMAC_NTF_RATE_SAMPLE_NR 4 688#define UMAC_NTF_RATE_SAMPLE_NR 4
689 689
690/* Max numbers of bits required to go through all antennae in bitmasks */
691#define UMAC_PHY_NUM_CHAINS 3
692
690#define IWM_UMAC_MGMT_TID 8 693#define IWM_UMAC_MGMT_TID 8
691#define IWM_UMAC_TID_NR 8 694#define IWM_UMAC_TID_NR 8
692 695
@@ -697,9 +700,11 @@ struct iwm_umac_notif_stats {
697 __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */ 700 __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */
698 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR]; 701 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
699 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR]; 702 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
703 __le32 chain_energy[UMAC_PHY_NUM_CHAINS];
700 s32 rssi_dbm; 704 s32 rssi_dbm;
701 s32 noise_dbm; 705 s32 noise_dbm;
702 __le32 supp_rates; 706 __le32 supp_rates;
707 __le32 supp_ht_rates;
703 __le32 missed_beacons; 708 __le32 missed_beacons;
704 __le32 rx_beacons; 709 __le32 rx_beacons;
705 __le32 rx_dir_pkts; 710 __le32 rx_dir_pkts;
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
deleted file mode 100644
index 5c6968101f0d..000000000000
--- a/drivers/net/wireless/libertas/11d.c
+++ /dev/null
@@ -1,696 +0,0 @@
1/**
2 * This file contains functions for 802.11D.
3 */
4#include <linux/ctype.h>
5#include <linux/kernel.h>
6#include <linux/wireless.h>
7
8#include "host.h"
9#include "decl.h"
10#include "11d.h"
11#include "dev.h"
12#include "wext.h"
13
14#define TX_PWR_DEFAULT 10
15
16static struct region_code_mapping region_code_mapping[] = {
17 {"US ", 0x10}, /* US FCC */
18 {"CA ", 0x10}, /* IC Canada */
19 {"SG ", 0x10}, /* Singapore */
20 {"EU ", 0x30}, /* ETSI */
21 {"AU ", 0x30}, /* Australia */
22 {"KR ", 0x30}, /* Republic Of Korea */
23 {"ES ", 0x31}, /* Spain */
24 {"FR ", 0x32}, /* France */
25 {"JP ", 0x40}, /* Japan */
26};
27
28/* Following 2 structure defines the supported channels */
29static struct chan_freq_power channel_freq_power_UN_BG[] = {
30 {1, 2412, TX_PWR_DEFAULT},
31 {2, 2417, TX_PWR_DEFAULT},
32 {3, 2422, TX_PWR_DEFAULT},
33 {4, 2427, TX_PWR_DEFAULT},
34 {5, 2432, TX_PWR_DEFAULT},
35 {6, 2437, TX_PWR_DEFAULT},
36 {7, 2442, TX_PWR_DEFAULT},
37 {8, 2447, TX_PWR_DEFAULT},
38 {9, 2452, TX_PWR_DEFAULT},
39 {10, 2457, TX_PWR_DEFAULT},
40 {11, 2462, TX_PWR_DEFAULT},
41 {12, 2467, TX_PWR_DEFAULT},
42 {13, 2472, TX_PWR_DEFAULT},
43 {14, 2484, TX_PWR_DEFAULT}
44};
45
46static u8 lbs_region_2_code(u8 *region)
47{
48 u8 i;
49
50 for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
51 region[i] = toupper(region[i]);
52
53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
54 if (!memcmp(region, region_code_mapping[i].region,
55 COUNTRY_CODE_LEN))
56 return (region_code_mapping[i].code);
57 }
58
59 /* default is US */
60 return (region_code_mapping[0].code);
61}
62
63static u8 *lbs_code_2_region(u8 code)
64{
65 u8 i;
66
67 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
68 if (region_code_mapping[i].code == code)
69 return (region_code_mapping[i].region);
70 }
71 /* default is US */
72 return (region_code_mapping[0].region);
73}
74
75/**
76 * @brief This function finds the nrchan-th chan after the firstchan
77 * @param band band
78 * @param firstchan first channel number
79 * @param nrchan number of channels
80 * @return the nrchan-th chan number
81*/
82static u8 lbs_get_chan_11d(u8 firstchan, u8 nrchan, u8 *chan)
83/*find the nrchan-th chan after the firstchan*/
84{
85 u8 i;
86 struct chan_freq_power *cfp;
87 u8 cfp_no;
88
89 cfp = channel_freq_power_UN_BG;
90 cfp_no = ARRAY_SIZE(channel_freq_power_UN_BG);
91
92 for (i = 0; i < cfp_no; i++) {
93 if ((cfp + i)->channel == firstchan) {
94 lbs_deb_11d("firstchan found\n");
95 break;
96 }
97 }
98
99 if (i < cfp_no) {
100 /*if beyond the boundary */
101 if (i + nrchan < cfp_no) {
102 *chan = (cfp + i + nrchan)->channel;
103 return 1;
104 }
105 }
106
107 return 0;
108}
109
110/**
111 * @brief This function Checks if chan txpwr is learned from AP/IBSS
112 * @param chan chan number
113 * @param parsed_region_chan pointer to parsed_region_chan_11d
114 * @return TRUE; FALSE
115*/
116static u8 lbs_channel_known_11d(u8 chan,
117 struct parsed_region_chan_11d * parsed_region_chan)
118{
119 struct chan_power_11d *chanpwr = parsed_region_chan->chanpwr;
120 u8 nr_chan = parsed_region_chan->nr_chan;
121 u8 i = 0;
122
123 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr,
124 sizeof(struct chan_power_11d) * nr_chan);
125
126 for (i = 0; i < nr_chan; i++) {
127 if (chan == chanpwr[i].chan) {
128 lbs_deb_11d("found chan %d\n", chan);
129 return 1;
130 }
131 }
132
133 lbs_deb_11d("chan %d not found\n", chan);
134 return 0;
135}
136
137u32 lbs_chan_2_freq(u8 chan)
138{
139 struct chan_freq_power *cf;
140 u16 i;
141 u32 freq = 0;
142
143 cf = channel_freq_power_UN_BG;
144
145 for (i = 0; i < ARRAY_SIZE(channel_freq_power_UN_BG); i++) {
146 if (chan == cf[i].channel)
147 freq = cf[i].freq;
148 }
149
150 return freq;
151}
152
153static int generate_domain_info_11d(struct parsed_region_chan_11d
154 *parsed_region_chan,
155 struct lbs_802_11d_domain_reg *domaininfo)
156{
157 u8 nr_subband = 0;
158
159 u8 nr_chan = parsed_region_chan->nr_chan;
160 u8 nr_parsedchan = 0;
161
162 u8 firstchan = 0, nextchan = 0, maxpwr = 0;
163
164 u8 i, flag = 0;
165
166 memcpy(domaininfo->countrycode, parsed_region_chan->countrycode,
167 COUNTRY_CODE_LEN);
168
169 lbs_deb_11d("nrchan %d\n", nr_chan);
170 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan,
171 sizeof(struct parsed_region_chan_11d));
172
173 for (i = 0; i < nr_chan; i++) {
174 if (!flag) {
175 flag = 1;
176 nextchan = firstchan =
177 parsed_region_chan->chanpwr[i].chan;
178 maxpwr = parsed_region_chan->chanpwr[i].pwr;
179 nr_parsedchan = 1;
180 continue;
181 }
182
183 if (parsed_region_chan->chanpwr[i].chan == nextchan + 1 &&
184 parsed_region_chan->chanpwr[i].pwr == maxpwr) {
185 nextchan++;
186 nr_parsedchan++;
187 } else {
188 domaininfo->subband[nr_subband].firstchan = firstchan;
189 domaininfo->subband[nr_subband].nrchan =
190 nr_parsedchan;
191 domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
192 nr_subband++;
193 nextchan = firstchan =
194 parsed_region_chan->chanpwr[i].chan;
195 maxpwr = parsed_region_chan->chanpwr[i].pwr;
196 }
197 }
198
199 if (flag) {
200 domaininfo->subband[nr_subband].firstchan = firstchan;
201 domaininfo->subband[nr_subband].nrchan = nr_parsedchan;
202 domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
203 nr_subband++;
204 }
205 domaininfo->nr_subband = nr_subband;
206
207 lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
208 lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
209 COUNTRY_CODE_LEN + 1 +
210 sizeof(struct ieee_subbandset) * nr_subband);
211 return 0;
212}
213
214/**
215 * @brief This function generates parsed_region_chan from Domain Info learned from AP/IBSS
216 * @param region_chan pointer to struct region_channel
217 * @param *parsed_region_chan pointer to parsed_region_chan_11d
218 * @return N/A
219*/
220static void lbs_generate_parsed_region_chan_11d(struct region_channel *region_chan,
221 struct parsed_region_chan_11d *
222 parsed_region_chan)
223{
224 u8 i;
225 struct chan_freq_power *cfp;
226
227 if (region_chan == NULL) {
228 lbs_deb_11d("region_chan is NULL\n");
229 return;
230 }
231
232 cfp = region_chan->CFP;
233 if (cfp == NULL) {
234 lbs_deb_11d("cfp is NULL \n");
235 return;
236 }
237
238 parsed_region_chan->band = region_chan->band;
239 parsed_region_chan->region = region_chan->region;
240 memcpy(parsed_region_chan->countrycode,
241 lbs_code_2_region(region_chan->region), COUNTRY_CODE_LEN);
242
243 lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region,
244 parsed_region_chan->band);
245
246 for (i = 0; i < region_chan->nrcfp; i++, cfp++) {
247 parsed_region_chan->chanpwr[i].chan = cfp->channel;
248 parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower;
249 lbs_deb_11d("chan %d, pwr %d\n",
250 parsed_region_chan->chanpwr[i].chan,
251 parsed_region_chan->chanpwr[i].pwr);
252 }
253 parsed_region_chan->nr_chan = region_chan->nrcfp;
254
255 lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan);
256
257 return;
258}
259
260/**
261 * @brief generate parsed_region_chan from Domain Info learned from AP/IBSS
262 * @param region region ID
263 * @param band band
264 * @param chan chan
265 * @return TRUE;FALSE
266*/
267static u8 lbs_region_chan_supported_11d(u8 region, u8 chan)
268{
269 struct chan_freq_power *cfp;
270 int cfp_no;
271 u8 idx;
272 int ret = 0;
273
274 lbs_deb_enter(LBS_DEB_11D);
275
276 cfp = lbs_get_region_cfp_table(region, &cfp_no);
277 if (cfp == NULL)
278 return 0;
279
280 for (idx = 0; idx < cfp_no; idx++) {
281 if (chan == (cfp + idx)->channel) {
282 /* If Mrvl Chip Supported? */
283 if ((cfp + idx)->unsupported) {
284 ret = 0;
285 } else {
286 ret = 1;
287 }
288 goto done;
289 }
290 }
291
292 /*chan is not in the region table */
293
294done:
295 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
296 return ret;
297}
298
299/**
300 * @brief This function checks if chan txpwr is learned from AP/IBSS
301 * @param chan chan number
302 * @param parsed_region_chan pointer to parsed_region_chan_11d
303 * @return 0
304*/
305static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
306 u8 band,
307 struct parsed_region_chan_11d *parsed_region_chan)
308{
309 u8 nr_subband, nrchan;
310 u8 lastchan, firstchan;
311 u8 region;
312 u8 curchan = 0;
313
314 u8 idx = 0; /*chan index in parsed_region_chan */
315
316 u8 j, i;
317
318 lbs_deb_enter(LBS_DEB_11D);
319
320 /*validation Rules:
321 1. valid region Code
322 2. First Chan increment
323 3. channel range no overlap
324 4. channel is valid?
325 5. channel is supported by region?
326 6. Others
327 */
328
329 lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
330
331 if ((*(countryinfo->countrycode)) == 0
332 || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
333 /* No region Info or Wrong region info: treat as No 11D info */
334 goto done;
335 }
336
337 /*Step1: check region_code */
338 parsed_region_chan->region = region =
339 lbs_region_2_code(countryinfo->countrycode);
340
341 lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region);
342 lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode,
343 COUNTRY_CODE_LEN);
344
345 parsed_region_chan->band = band;
346
347 memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
348 COUNTRY_CODE_LEN);
349
350 nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
351 sizeof(struct ieee_subbandset);
352
353 for (j = 0, lastchan = 0; j < nr_subband; j++) {
354
355 if (countryinfo->subband[j].firstchan <= lastchan) {
356 /*Step2&3. Check First Chan Num increment and no overlap */
357 lbs_deb_11d("chan %d>%d, overlap\n",
358 countryinfo->subband[j].firstchan, lastchan);
359 continue;
360 }
361
362 firstchan = countryinfo->subband[j].firstchan;
363 nrchan = countryinfo->subband[j].nrchan;
364
365 for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) {
366 /*step4: channel is supported? */
367
368 if (!lbs_get_chan_11d(firstchan, i, &curchan)) {
369 /* Chan is not found in UN table */
370 lbs_deb_11d("chan is not supported: %d \n", i);
371 break;
372 }
373
374 lastchan = curchan;
375
376 if (lbs_region_chan_supported_11d(region, curchan)) {
377 /*step5: Check if curchan is supported by mrvl in region */
378 parsed_region_chan->chanpwr[idx].chan = curchan;
379 parsed_region_chan->chanpwr[idx].pwr =
380 countryinfo->subband[j].maxtxpwr;
381 idx++;
382 } else {
383 /*not supported and ignore the chan */
384 lbs_deb_11d(
385 "i %d, chan %d unsupported in region %x, band %d\n",
386 i, curchan, region, band);
387 }
388 }
389
390 /*Step6: Add other checking if any */
391
392 }
393
394 parsed_region_chan->nr_chan = idx;
395
396 lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan);
397 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan,
398 2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx);
399
400done:
401 lbs_deb_enter(LBS_DEB_11D);
402 return 0;
403}
404
405/**
406 * @brief This function calculates the scan type for channels
407 * @param chan chan number
408 * @param parsed_region_chan pointer to parsed_region_chan_11d
409 * @return PASSIVE if chan is unknown; ACTIVE if chan is known
410*/
411u8 lbs_get_scan_type_11d(u8 chan,
412 struct parsed_region_chan_11d * parsed_region_chan)
413{
414 u8 scan_type = CMD_SCAN_TYPE_PASSIVE;
415
416 lbs_deb_enter(LBS_DEB_11D);
417
418 if (lbs_channel_known_11d(chan, parsed_region_chan)) {
419 lbs_deb_11d("found, do active scan\n");
420 scan_type = CMD_SCAN_TYPE_ACTIVE;
421 } else {
422 lbs_deb_11d("not found, do passive scan\n");
423 }
424
425 lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type);
426 return scan_type;
427
428}
429
430void lbs_init_11d(struct lbs_private *priv)
431{
432 priv->enable11d = 0;
433 memset(&(priv->parsed_region_chan), 0,
434 sizeof(struct parsed_region_chan_11d));
435 return;
436}
437
438/**
439 * @brief This function sets DOMAIN INFO to FW
440 * @param priv pointer to struct lbs_private
441 * @return 0; -1
442*/
443static int set_domain_info_11d(struct lbs_private *priv)
444{
445 int ret;
446
447 if (!priv->enable11d) {
448 lbs_deb_11d("dnld domain Info with 11d disabled\n");
449 return 0;
450 }
451
452 ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
453 CMD_ACT_SET,
454 CMD_OPTION_WAITFORRSP, 0, NULL);
455 if (ret)
456 lbs_deb_11d("fail to dnld domain info\n");
457
458 return ret;
459}
460
461/**
462 * @brief This function setups scan channels
463 * @param priv pointer to struct lbs_private
464 * @param band band
465 * @return 0
466*/
467int lbs_set_universaltable(struct lbs_private *priv, u8 band)
468{
469 u16 size = sizeof(struct chan_freq_power);
470 u16 i = 0;
471
472 memset(priv->universal_channel, 0,
473 sizeof(priv->universal_channel));
474
475 priv->universal_channel[i].nrcfp =
476 sizeof(channel_freq_power_UN_BG) / size;
477 lbs_deb_11d("BG-band nrcfp %d\n",
478 priv->universal_channel[i].nrcfp);
479
480 priv->universal_channel[i].CFP = channel_freq_power_UN_BG;
481 priv->universal_channel[i].valid = 1;
482 priv->universal_channel[i].region = UNIVERSAL_REGION_CODE;
483 priv->universal_channel[i].band = band;
484 i++;
485
486 return 0;
487}
488
489/**
490 * @brief This function implements command CMD_802_11D_DOMAIN_INFO
491 * @param priv pointer to struct lbs_private
492 * @param cmd pointer to cmd buffer
493 * @param cmdno cmd ID
494 * @param cmdOption cmd action
495 * @return 0
496*/
497int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
498 struct cmd_ds_command *cmd, u16 cmdno,
499 u16 cmdoption)
500{
501 struct cmd_ds_802_11d_domain_info *pdomaininfo =
502 &cmd->params.domaininfo;
503 struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
504 u8 nr_subband = priv->domainreg.nr_subband;
505
506 lbs_deb_enter(LBS_DEB_11D);
507
508 lbs_deb_11d("nr_subband=%x\n", nr_subband);
509
510 cmd->command = cpu_to_le16(cmdno);
511 pdomaininfo->action = cpu_to_le16(cmdoption);
512 if (cmdoption == CMD_ACT_GET) {
513 cmd->size =
514 cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
515 lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
516 le16_to_cpu(cmd->size));
517 goto done;
518 }
519
520 domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
521 memcpy(domain->countrycode, priv->domainreg.countrycode,
522 sizeof(domain->countrycode));
523
524 domain->header.len =
525 cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
526 sizeof(domain->countrycode));
527
528 if (nr_subband) {
529 memcpy(domain->subband, priv->domainreg.subband,
530 nr_subband * sizeof(struct ieee_subbandset));
531
532 cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
533 le16_to_cpu(domain->header.len) +
534 sizeof(struct mrvl_ie_header) +
535 S_DS_GEN);
536 } else {
537 cmd->size =
538 cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
539 }
540
541 lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size));
542
543done:
544 lbs_deb_enter(LBS_DEB_11D);
545 return 0;
546}
547
548/**
549 * @brief This function parses countryinfo from AP and download country info to FW
550 * @param priv pointer to struct lbs_private
551 * @param resp pointer to command response buffer
552 * @return 0; -1
553 */
554int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
555{
556 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
557 struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
558 u16 action = le16_to_cpu(domaininfo->action);
559 s16 ret = 0;
560 u8 nr_subband = 0;
561
562 lbs_deb_enter(LBS_DEB_11D);
563
564 lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
565 (int)le16_to_cpu(resp->size));
566
567 nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
568 sizeof(struct ieee_subbandset);
569
570 lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
571
572 if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) {
573 lbs_deb_11d("Invalid Numrer of Subband returned!!\n");
574 return -1;
575 }
576
577 switch (action) {
578 case CMD_ACT_SET: /*Proc Set action */
579 break;
580
581 case CMD_ACT_GET:
582 break;
583 default:
584 lbs_deb_11d("Invalid action:%d\n", domaininfo->action);
585 ret = -1;
586 break;
587 }
588
589 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
590 return ret;
591}
592
593/**
594 * @brief This function parses countryinfo from AP and download country info to FW
595 * @param priv pointer to struct lbs_private
596 * @return 0; -1
597 */
598int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
599 struct bss_descriptor * bss)
600{
601 int ret;
602
603 lbs_deb_enter(LBS_DEB_11D);
604 if (priv->enable11d) {
605 memset(&priv->parsed_region_chan, 0,
606 sizeof(struct parsed_region_chan_11d));
607 ret = parse_domain_info_11d(&bss->countryinfo, 0,
608 &priv->parsed_region_chan);
609
610 if (ret == -1) {
611 lbs_deb_11d("error parsing domain_info from AP\n");
612 goto done;
613 }
614
615 memset(&priv->domainreg, 0,
616 sizeof(struct lbs_802_11d_domain_reg));
617 generate_domain_info_11d(&priv->parsed_region_chan,
618 &priv->domainreg);
619
620 ret = set_domain_info_11d(priv);
621
622 if (ret) {
623 lbs_deb_11d("error setting domain info\n");
624 goto done;
625 }
626 }
627 ret = 0;
628
629done:
630 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
631 return ret;
632}
633
634/**
635 * @brief This function generates 11D info from user specified regioncode and download to FW
636 * @param priv pointer to struct lbs_private
637 * @return 0; -1
638 */
639int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv)
640{
641 int ret;
642 struct region_channel *region_chan;
643 u8 j;
644
645 lbs_deb_enter(LBS_DEB_11D);
646 lbs_deb_11d("curbssparams.band %d\n", priv->curbssparams.band);
647
648 if (priv->enable11d) {
649 /* update parsed_region_chan_11; dnld domaininf to FW */
650
651 for (j = 0; j < ARRAY_SIZE(priv->region_channel); j++) {
652 region_chan = &priv->region_channel[j];
653
654 lbs_deb_11d("%d region_chan->band %d\n", j,
655 region_chan->band);
656
657 if (!region_chan || !region_chan->valid
658 || !region_chan->CFP)
659 continue;
660 if (region_chan->band != priv->curbssparams.band)
661 continue;
662 break;
663 }
664
665 if (j >= ARRAY_SIZE(priv->region_channel)) {
666 lbs_deb_11d("region_chan not found, band %d\n",
667 priv->curbssparams.band);
668 ret = -1;
669 goto done;
670 }
671
672 memset(&priv->parsed_region_chan, 0,
673 sizeof(struct parsed_region_chan_11d));
674 lbs_generate_parsed_region_chan_11d(region_chan,
675 &priv->
676 parsed_region_chan);
677
678 memset(&priv->domainreg, 0,
679 sizeof(struct lbs_802_11d_domain_reg));
680 generate_domain_info_11d(&priv->parsed_region_chan,
681 &priv->domainreg);
682
683 ret = set_domain_info_11d(priv);
684
685 if (ret) {
686 lbs_deb_11d("error setting domain info\n");
687 goto done;
688 }
689
690 }
691 ret = 0;
692
693done:
694 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
695 return ret;
696}
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
deleted file mode 100644
index fb75d3e321a0..000000000000
--- a/drivers/net/wireless/libertas/11d.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/**
2 * This header file contains data structures and
3 * function declarations of 802.11d
4 */
5#ifndef _LBS_11D_
6#define _LBS_11D_
7
8#include "types.h"
9#include "defs.h"
10
11#define UNIVERSAL_REGION_CODE 0xff
12
13/** (Beaconsize(256)-5(IEId,len,contrystr(3))/3(FirstChan,NoOfChan,MaxPwr)
14 */
15#define MRVDRV_MAX_SUBBAND_802_11D 83
16
17#define COUNTRY_CODE_LEN 3
18#define MAX_NO_OF_CHAN 40
19
20struct cmd_ds_command;
21
22/** Data structure for Country IE*/
23struct ieee_subbandset {
24 u8 firstchan;
25 u8 nrchan;
26 u8 maxtxpwr;
27} __attribute__ ((packed));
28
29struct ieee_ie_country_info_set {
30 struct ieee_ie_header header;
31
32 u8 countrycode[COUNTRY_CODE_LEN];
33 struct ieee_subbandset subband[1];
34};
35
36struct ieee_ie_country_info_full_set {
37 struct ieee_ie_header header;
38
39 u8 countrycode[COUNTRY_CODE_LEN];
40 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
41} __attribute__ ((packed));
42
43struct mrvl_ie_domain_param_set {
44 struct mrvl_ie_header header;
45
46 u8 countrycode[COUNTRY_CODE_LEN];
47 struct ieee_subbandset subband[1];
48} __attribute__ ((packed));
49
50struct cmd_ds_802_11d_domain_info {
51 __le16 action;
52 struct mrvl_ie_domain_param_set domain;
53} __attribute__ ((packed));
54
55/** domain regulatory information */
56struct lbs_802_11d_domain_reg {
57 /** country Code*/
58 u8 countrycode[COUNTRY_CODE_LEN];
59 /** No. of subband*/
60 u8 nr_subband;
61 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
62};
63
64struct chan_power_11d {
65 u8 chan;
66 u8 pwr;
67} __attribute__ ((packed));
68
69struct parsed_region_chan_11d {
70 u8 band;
71 u8 region;
72 s8 countrycode[COUNTRY_CODE_LEN];
73 struct chan_power_11d chanpwr[MAX_NO_OF_CHAN];
74 u8 nr_chan;
75} __attribute__ ((packed));
76
77struct region_code_mapping {
78 u8 region[COUNTRY_CODE_LEN];
79 u8 code;
80};
81
82struct lbs_private;
83
84u8 lbs_get_scan_type_11d(u8 chan,
85 struct parsed_region_chan_11d *parsed_region_chan);
86
87u32 lbs_chan_2_freq(u8 chan);
88
89void lbs_init_11d(struct lbs_private *priv);
90
91int lbs_set_universaltable(struct lbs_private *priv, u8 band);
92
93int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
94 struct cmd_ds_command *cmd, u16 cmdno,
95 u16 cmdOption);
96
97int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
98
99struct bss_descriptor;
100int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
101 struct bss_descriptor * bss);
102
103int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv);
104
105#endif
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
new file mode 100644
index 000000000000..30aa9d48d67e
--- /dev/null
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -0,0 +1,39 @@
1config LIBERTAS
2 tristate "Marvell 8xxx Libertas WLAN driver support"
3 depends on CFG80211
4 select WIRELESS_EXT
5 select WEXT_SPY
6 select LIB80211
7 select FW_LOADER
8 ---help---
9 A library for Marvell Libertas 8xxx devices.
10
11config LIBERTAS_USB
12 tristate "Marvell Libertas 8388 USB 802.11b/g cards"
13 depends on LIBERTAS && USB
14 ---help---
15 A driver for Marvell Libertas 8388 USB devices.
16
17config LIBERTAS_CS
18 tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
19 depends on LIBERTAS && PCMCIA
20 ---help---
21 A driver for Marvell Libertas 8385 CompactFlash devices.
22
23config LIBERTAS_SDIO
24 tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
25 depends on LIBERTAS && MMC
26 ---help---
27 A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
28
29config LIBERTAS_SPI
30 tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
31 depends on LIBERTAS && SPI
32 ---help---
33 A driver for Marvell Libertas 8686 SPI devices.
34
35config LIBERTAS_DEBUG
36 bool "Enable full debugging output in the Libertas module."
37 depends on LIBERTAS
38 ---help---
39 Debugging support.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 0b6918584503..fa37039e0eae 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,5 +1,15 @@
1libertas-objs := main.o wext.o rx.o tx.o cmd.o cmdresp.o scan.o 11d.o \ 1libertas-y += assoc.o
2 debugfs.o persistcfg.o ethtool.o assoc.o 2libertas-y += cfg.o
3libertas-y += cmd.o
4libertas-y += cmdresp.o
5libertas-y += debugfs.o
6libertas-y += ethtool.o
7libertas-y += main.o
8libertas-y += persistcfg.o
9libertas-y += rx.o
10libertas-y += scan.o
11libertas-y += tx.o
12libertas-y += wext.o
3 13
4usb8xxx-objs += if_usb.o 14usb8xxx-objs += if_usb.o
5libertas_cs-objs += if_cs.o 15libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index ab6a2d518af0..2726c044430f 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -1,5 +1,5 @@
1================================================================================ 1================================================================================
2 README for USB8388 2 README for Libertas
3 3
4 (c) Copyright © 2003-2006, Marvell International Ltd. 4 (c) Copyright © 2003-2006, Marvell International Ltd.
5 All Rights Reserved 5 All Rights Reserved
@@ -226,4 +226,28 @@ setuserscan
226 All entries in the scan table (not just the new scan data when keep=1) 226 All entries in the scan table (not just the new scan data when keep=1)
227 will be displayed upon completion by use of the getscantable ioctl. 227 will be displayed upon completion by use of the getscantable ioctl.
228 228
229========================
230IWCONFIG COMMANDS
231========================
232power period
233
234 This command is used to configure the station in deep sleep mode /
235 auto deep sleep mode.
236
237 The timer is implemented to monitor the activities (command, event,
238 etc.). When an activity is detected station will exit from deep
239 sleep mode automatically and restart the timer. At timer expiry
240 (no activity for defined time period) the deep sleep mode is entered
241 automatically.
242
243 Note: this command is for SDIO interface only.
244
245 Usage:
246 To enable deep sleep mode do:
247 iwconfig wlan0 power period 0
248 To enable auto deep sleep mode with idle time period 5 seconds do:
249 iwconfig wlan0 power period 5
250 To disable deep sleep/auto deep sleep mode do:
251 iwconfig wlan0 power period -1
252
229============================================================================== 253==============================================================================
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index dd8732611ba9..751067369ba8 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -23,6 +23,13 @@ static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
23 */ 23 */
24#define CAPINFO_MASK (~(0xda00)) 24#define CAPINFO_MASK (~(0xda00))
25 25
26/**
27 * 802.11b/g supported bitrates (in 500Kb/s units)
28 */
29u8 lbs_bg_rates[MAX_RATES] =
30 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
310x00, 0x00 };
32
26 33
27/** 34/**
28 * @brief This function finds common rates between rates and card rates. 35 * @brief This function finds common rates between rates and card rates.
@@ -147,6 +154,397 @@ static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth
147} 154}
148 155
149 156
157int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
158 struct assoc_request *assoc)
159{
160 struct cmd_ds_802_11_set_wep cmd;
161 int ret = 0;
162
163 lbs_deb_enter(LBS_DEB_CMD);
164
165 memset(&cmd, 0, sizeof(cmd));
166 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
167 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
168
169 cmd.action = cpu_to_le16(cmd_action);
170
171 if (cmd_action == CMD_ACT_ADD) {
172 int i;
173
174 /* default tx key index */
175 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
176 CMD_WEP_KEY_INDEX_MASK);
177
178 /* Copy key types and material to host command structure */
179 for (i = 0; i < 4; i++) {
180 struct enc_key *pkey = &assoc->wep_keys[i];
181
182 switch (pkey->len) {
183 case KEY_LEN_WEP_40:
184 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
185 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
186 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
187 break;
188 case KEY_LEN_WEP_104:
189 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
190 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
191 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
192 break;
193 case 0:
194 break;
195 default:
196 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
197 i, pkey->len);
198 ret = -1;
199 goto done;
200 break;
201 }
202 }
203 } else if (cmd_action == CMD_ACT_REMOVE) {
204 /* ACT_REMOVE clears _all_ WEP keys */
205
206 /* default tx key index */
207 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
208 CMD_WEP_KEY_INDEX_MASK);
209 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
210 }
211
212 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
213done:
214 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
215 return ret;
216}
217
218int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
219 uint16_t *enable)
220{
221 struct cmd_ds_802_11_enable_rsn cmd;
222 int ret;
223
224 lbs_deb_enter(LBS_DEB_CMD);
225
226 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
227 cmd.action = cpu_to_le16(cmd_action);
228
229 if (cmd_action == CMD_ACT_GET)
230 cmd.enable = 0;
231 else {
232 if (*enable)
233 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
234 else
235 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
236 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
237 }
238
239 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
240 if (!ret && cmd_action == CMD_ACT_GET)
241 *enable = le16_to_cpu(cmd.enable);
242
243 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
244 return ret;
245}
246
247static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
248 struct enc_key *key)
249{
250 lbs_deb_enter(LBS_DEB_CMD);
251
252 if (key->flags & KEY_INFO_WPA_ENABLED)
253 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
254 if (key->flags & KEY_INFO_WPA_UNICAST)
255 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
256 if (key->flags & KEY_INFO_WPA_MCAST)
257 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
258
259 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
260 keyparam->keytypeid = cpu_to_le16(key->type);
261 keyparam->keylen = cpu_to_le16(key->len);
262 memcpy(keyparam->key, key->key, key->len);
263
264 /* Length field doesn't include the {type,length} header */
265 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
266 lbs_deb_leave(LBS_DEB_CMD);
267}
268
269int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
270 struct assoc_request *assoc)
271{
272 struct cmd_ds_802_11_key_material cmd;
273 int ret = 0;
274 int index = 0;
275
276 lbs_deb_enter(LBS_DEB_CMD);
277
278 cmd.action = cpu_to_le16(cmd_action);
279 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
280
281 if (cmd_action == CMD_ACT_GET) {
282 cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
283 } else {
284 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
285
286 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
287 set_one_wpa_key(&cmd.keyParamSet[index],
288 &assoc->wpa_unicast_key);
289 index++;
290 }
291
292 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
293 set_one_wpa_key(&cmd.keyParamSet[index],
294 &assoc->wpa_mcast_key);
295 index++;
296 }
297
298 /* The common header and as many keys as we included */
299 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
300 keyParamSet[index]));
301 }
302 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
303 /* Copy the returned key to driver private data */
304 if (!ret && cmd_action == CMD_ACT_GET) {
305 void *buf_ptr = cmd.keyParamSet;
306 void *resp_end = &(&cmd)[1];
307
308 while (buf_ptr < resp_end) {
309 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
310 struct enc_key *key;
311 uint16_t param_set_len = le16_to_cpu(keyparam->length);
312 uint16_t key_len = le16_to_cpu(keyparam->keylen);
313 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
314 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
315 void *end;
316
317 end = (void *)keyparam + sizeof(keyparam->type)
318 + sizeof(keyparam->length) + param_set_len;
319
320 /* Make sure we don't access past the end of the IEs */
321 if (end > resp_end)
322 break;
323
324 if (key_flags & KEY_INFO_WPA_UNICAST)
325 key = &priv->wpa_unicast_key;
326 else if (key_flags & KEY_INFO_WPA_MCAST)
327 key = &priv->wpa_mcast_key;
328 else
329 break;
330
331 /* Copy returned key into driver */
332 memset(key, 0, sizeof(struct enc_key));
333 if (key_len > sizeof(key->key))
334 break;
335 key->type = key_type;
336 key->flags = key_flags;
337 key->len = key_len;
338 memcpy(key->key, keyparam->key, key->len);
339
340 buf_ptr = end + 1;
341 }
342 }
343
344 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
345 return ret;
346}
347
348static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
349{
350/* Bit Rate
351* 15:13 Reserved
352* 12 54 Mbps
353* 11 48 Mbps
354* 10 36 Mbps
355* 9 24 Mbps
356* 8 18 Mbps
357* 7 12 Mbps
358* 6 9 Mbps
359* 5 6 Mbps
360* 4 Reserved
361* 3 11 Mbps
362* 2 5.5 Mbps
363* 1 2 Mbps
364* 0 1 Mbps
365**/
366
367 uint16_t ratemask;
368 int i = lbs_data_rate_to_fw_index(rate);
369 if (lower_rates_ok)
370 ratemask = (0x1fef >> (12 - i));
371 else
372 ratemask = (1 << i);
373 return cpu_to_le16(ratemask);
374}
375
376int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
377 uint16_t cmd_action)
378{
379 struct cmd_ds_802_11_rate_adapt_rateset cmd;
380 int ret;
381
382 lbs_deb_enter(LBS_DEB_CMD);
383
384 if (!priv->cur_rate && !priv->enablehwauto)
385 return -EINVAL;
386
387 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
388
389 cmd.action = cpu_to_le16(cmd_action);
390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
393 if (!ret && cmd_action == CMD_ACT_GET) {
394 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
395 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
396 }
397
398 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
399 return ret;
400}
401
402/**
403 * @brief Set the data rate
404 *
405 * @param priv A pointer to struct lbs_private structure
406 * @param rate The desired data rate, or 0 to clear a locked rate
407 *
408 * @return 0 on success, error on failure
409 */
410int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
411{
412 struct cmd_ds_802_11_data_rate cmd;
413 int ret = 0;
414
415 lbs_deb_enter(LBS_DEB_CMD);
416
417 memset(&cmd, 0, sizeof(cmd));
418 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
419
420 if (rate > 0) {
421 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
422 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
423 if (cmd.rates[0] == 0) {
424 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
425 " 0x%02X\n", rate);
426 ret = 0;
427 goto out;
428 }
429 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
430 } else {
431 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
432 lbs_deb_cmd("DATA_RATE: setting auto\n");
433 }
434
435 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
436 if (ret)
437 goto out;
438
439 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
440
441 /* FIXME: get actual rates FW can do if this command actually returns
442 * all data rates supported.
443 */
444 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
445 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
446
447out:
448 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
449 return ret;
450}
451
452
453int lbs_cmd_802_11_rssi(struct lbs_private *priv,
454 struct cmd_ds_command *cmd)
455{
456
457 lbs_deb_enter(LBS_DEB_CMD);
458 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
459 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
460 sizeof(struct cmd_header));
461 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
462
463 /* reset Beacon SNR/NF/RSSI values */
464 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
465 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
466 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
467 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
468 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
469 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
470
471 lbs_deb_leave(LBS_DEB_CMD);
472 return 0;
473}
474
475int lbs_ret_802_11_rssi(struct lbs_private *priv,
476 struct cmd_ds_command *resp)
477{
478 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
479
480 lbs_deb_enter(LBS_DEB_CMD);
481
482 /* store the non average value */
483 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
484 priv->NF[TYPE_BEACON][TYPE_NOAVG] =
485 get_unaligned_le16(&rssirsp->noisefloor);
486
487 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
488 priv->NF[TYPE_BEACON][TYPE_AVG] =
489 get_unaligned_le16(&rssirsp->avgnoisefloor);
490
491 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
492 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
493 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
494
495 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
496 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
497 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
498
499 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
500 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
501 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
502
503 lbs_deb_leave(LBS_DEB_CMD);
504 return 0;
505}
506
507
508int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
509 struct cmd_ds_command *cmd,
510 u16 cmd_action)
511{
512 struct cmd_ds_802_11_beacon_control
513 *bcn_ctrl = &cmd->params.bcn_ctrl;
514
515 lbs_deb_enter(LBS_DEB_CMD);
516 cmd->size =
517 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
518 + sizeof(struct cmd_header));
519 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
520
521 bcn_ctrl->action = cpu_to_le16(cmd_action);
522 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
523 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
524
525 lbs_deb_leave(LBS_DEB_CMD);
526 return 0;
527}
528
529int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
530 struct cmd_ds_command *resp)
531{
532 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
533 &resp->params.bcn_ctrl;
534
535 lbs_deb_enter(LBS_DEB_CMD);
536
537 if (bcn_ctrl->action == CMD_ACT_GET) {
538 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
539 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
540 }
541
542 lbs_deb_enter(LBS_DEB_CMD);
543 return 0;
544}
545
546
547
150static int lbs_assoc_post(struct lbs_private *priv, 548static int lbs_assoc_post(struct lbs_private *priv,
151 struct cmd_ds_802_11_associate_response *resp) 549 struct cmd_ds_802_11_associate_response *resp)
152{ 550{
@@ -226,7 +624,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
226 priv->connect_status = LBS_CONNECTED; 624 priv->connect_status = LBS_CONNECTED;
227 625
228 /* Update current SSID and BSSID */ 626 /* Update current SSID and BSSID */
229 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE); 627 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
230 priv->curbssparams.ssid_len = bss->ssid_len; 628 priv->curbssparams.ssid_len = bss->ssid_len;
231 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN); 629 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
232 630
@@ -369,12 +767,7 @@ static int lbs_associate(struct lbs_private *priv,
369 (u16)(pos - (u8 *) &cmd.iebuf)); 767 (u16)(pos - (u8 *) &cmd.iebuf));
370 768
371 /* update curbssparams */ 769 /* update curbssparams */
372 priv->curbssparams.channel = bss->phy.ds.channel; 770 priv->channel = bss->phy.ds.channel;
373
374 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
375 ret = -1;
376 goto done;
377 }
378 771
379 ret = lbs_cmd_with_response(priv, command, &cmd); 772 ret = lbs_cmd_with_response(priv, command, &cmd);
380 if (ret == 0) { 773 if (ret == 0) {
@@ -472,7 +865,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
472 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN); 865 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
473 866
474 /* Set the new SSID to current SSID */ 867 /* Set the new SSID to current SSID */
475 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE); 868 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
476 priv->curbssparams.ssid_len = bss->ssid_len; 869 priv->curbssparams.ssid_len = bss->ssid_len;
477 870
478 netif_carrier_on(priv->dev); 871 netif_carrier_on(priv->dev);
@@ -487,7 +880,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
487 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n", 880 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
488 print_ssid(ssid, bss->ssid, bss->ssid_len), 881 print_ssid(ssid, bss->ssid, bss->ssid_len),
489 priv->curbssparams.bssid, 882 priv->curbssparams.bssid,
490 priv->curbssparams.channel); 883 priv->channel);
491 884
492done: 885done:
493 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); 886 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
@@ -560,7 +953,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
560 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); 953 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
561 954
562 priv->adhoccreate = 0; 955 priv->adhoccreate = 0;
563 priv->curbssparams.channel = bss->channel; 956 priv->channel = bss->channel;
564 957
565 /* Build the join command */ 958 /* Build the join command */
566 memset(&cmd, 0, sizeof(cmd)); 959 memset(&cmd, 0, sizeof(cmd));
@@ -633,11 +1026,6 @@ static int lbs_adhoc_join(struct lbs_private *priv,
633 } 1026 }
634 } 1027 }
635 1028
636 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
637 ret = -1;
638 goto out;
639 }
640
641 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); 1029 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
642 if (ret == 0) { 1030 if (ret == 0) {
643 ret = lbs_adhoc_post(priv, 1031 ret = lbs_adhoc_post(priv,
@@ -737,12 +1125,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
737 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n", 1125 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
738 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]); 1126 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
739 1127
740 if (lbs_create_dnld_countryinfo_11d(priv)) {
741 lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
742 ret = -1;
743 goto out;
744 }
745
746 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n", 1128 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
747 assoc_req->channel, assoc_req->band); 1129 assoc_req->channel, assoc_req->band);
748 1130
@@ -1099,7 +1481,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
1099 /* else send START command */ 1481 /* else send START command */
1100 lbs_deb_assoc("SSID not found, creating adhoc network\n"); 1482 lbs_deb_assoc("SSID not found, creating adhoc network\n");
1101 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, 1483 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
1102 IW_ESSID_MAX_SIZE); 1484 IEEE80211_MAX_SSID_LEN);
1103 assoc_req->bss.ssid_len = assoc_req->ssid_len; 1485 assoc_req->bss.ssid_len = assoc_req->ssid_len;
1104 lbs_adhoc_start(priv, assoc_req); 1486 lbs_adhoc_start(priv, assoc_req);
1105 } 1487 }
@@ -1185,7 +1567,8 @@ static int assoc_helper_mode(struct lbs_private *priv,
1185 } 1567 }
1186 1568
1187 priv->mode = assoc_req->mode; 1569 priv->mode = assoc_req->mode;
1188 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode); 1570 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
1571 assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
1189 1572
1190done: 1573done:
1191 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 1574 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1205,7 +1588,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1205 goto done; 1588 goto done;
1206 } 1589 }
1207 1590
1208 if (assoc_req->channel == priv->curbssparams.channel) 1591 if (assoc_req->channel == priv->channel)
1209 goto done; 1592 goto done;
1210 1593
1211 if (priv->mesh_dev) { 1594 if (priv->mesh_dev) {
@@ -1217,7 +1600,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1217 } 1600 }
1218 1601
1219 lbs_deb_assoc("ASSOC: channel: %d -> %d\n", 1602 lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
1220 priv->curbssparams.channel, assoc_req->channel); 1603 priv->channel, assoc_req->channel);
1221 1604
1222 ret = lbs_set_channel(priv, assoc_req->channel); 1605 ret = lbs_set_channel(priv, assoc_req->channel);
1223 if (ret < 0) 1606 if (ret < 0)
@@ -1232,7 +1615,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1232 goto done; 1615 goto done;
1233 } 1616 }
1234 1617
1235 if (assoc_req->channel != priv->curbssparams.channel) { 1618 if (assoc_req->channel != priv->channel) {
1236 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n", 1619 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
1237 assoc_req->channel); 1620 assoc_req->channel);
1238 goto restore_mesh; 1621 goto restore_mesh;
@@ -1253,7 +1636,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1253 restore_mesh: 1636 restore_mesh:
1254 if (priv->mesh_dev) 1637 if (priv->mesh_dev)
1255 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1638 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1256 priv->curbssparams.channel); 1639 priv->channel);
1257 1640
1258 done: 1641 done:
1259 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 1642 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1475,7 +1858,7 @@ static int should_stop_adhoc(struct lbs_private *priv,
1475 } 1858 }
1476 1859
1477 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { 1860 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
1478 if (assoc_req->channel != priv->curbssparams.channel) 1861 if (assoc_req->channel != priv->channel)
1479 return 1; 1862 return 1;
1480 } 1863 }
1481 1864
@@ -1557,7 +1940,7 @@ static int lbs_find_best_network_ssid(struct lbs_private *priv,
1557 1940
1558 found = lbs_find_best_ssid_in_list(priv, preferred_mode); 1941 found = lbs_find_best_ssid_in_list(priv, preferred_mode);
1559 if (found && (found->ssid_len > 0)) { 1942 if (found && (found->ssid_len > 0)) {
1560 memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE); 1943 memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
1561 *out_ssid_len = found->ssid_len; 1944 *out_ssid_len = found->ssid_len;
1562 *out_mode = found->mode; 1945 *out_mode = found->mode;
1563 ret = 0; 1946 ret = 0;
@@ -1775,12 +2158,12 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
1775 assoc_req = priv->pending_assoc_req; 2158 assoc_req = priv->pending_assoc_req;
1776 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { 2159 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
1777 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid, 2160 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
1778 IW_ESSID_MAX_SIZE); 2161 IEEE80211_MAX_SSID_LEN);
1779 assoc_req->ssid_len = priv->curbssparams.ssid_len; 2162 assoc_req->ssid_len = priv->curbssparams.ssid_len;
1780 } 2163 }
1781 2164
1782 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) 2165 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
1783 assoc_req->channel = priv->curbssparams.channel; 2166 assoc_req->channel = priv->channel;
1784 2167
1785 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags)) 2168 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
1786 assoc_req->band = priv->curbssparams.band; 2169 assoc_req->band = priv->curbssparams.band;
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 6e765e9f91a3..40621b789fc5 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -3,7 +3,126 @@
3#ifndef _LBS_ASSOC_H_ 3#ifndef _LBS_ASSOC_H_
4#define _LBS_ASSOC_H_ 4#define _LBS_ASSOC_H_
5 5
6#include "dev.h" 6
7#include "defs.h"
8#include "host.h"
9
10
11struct lbs_private;
12
13/*
14 * In theory, the IE is limited to the IE length, 255,
15 * but in practice 64 bytes are enough.
16 */
17#define MAX_WPA_IE_LEN 64
18
19
20
21struct lbs_802_11_security {
22 u8 WPAenabled;
23 u8 WPA2enabled;
24 u8 wep_enabled;
25 u8 auth_mode;
26 u32 key_mgmt;
27};
28
29/** Current Basic Service Set State Structure */
30struct current_bss_params {
31 /** bssid */
32 u8 bssid[ETH_ALEN];
33 /** ssid */
34 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
35 u8 ssid_len;
36
37 /** band */
38 u8 band;
39 /** channel is directly in priv->channel */
40 /** zero-terminated array of supported data rates */
41 u8 rates[MAX_RATES + 1];
42};
43
44/**
45 * @brief Structure used to store information for each beacon/probe response
46 */
47struct bss_descriptor {
48 u8 bssid[ETH_ALEN];
49
50 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
51 u8 ssid_len;
52
53 u16 capability;
54 u32 rssi;
55 u32 channel;
56 u16 beaconperiod;
57 __le16 atimwindow;
58
59 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
60 u8 mode;
61
62 /* zero-terminated array of supported data rates */
63 u8 rates[MAX_RATES + 1];
64
65 unsigned long last_scanned;
66
67 union ieee_phy_param_set phy;
68 union ieee_ss_param_set ss;
69
70 u8 wpa_ie[MAX_WPA_IE_LEN];
71 size_t wpa_ie_len;
72 u8 rsn_ie[MAX_WPA_IE_LEN];
73 size_t rsn_ie_len;
74
75 u8 mesh;
76
77 struct list_head list;
78};
79
80/** Association request
81 *
82 * Encapsulates all the options that describe a specific assocation request
83 * or configuration of the wireless card's radio, mode, and security settings.
84 */
85struct assoc_request {
86#define ASSOC_FLAG_SSID 1
87#define ASSOC_FLAG_CHANNEL 2
88#define ASSOC_FLAG_BAND 3
89#define ASSOC_FLAG_MODE 4
90#define ASSOC_FLAG_BSSID 5
91#define ASSOC_FLAG_WEP_KEYS 6
92#define ASSOC_FLAG_WEP_TX_KEYIDX 7
93#define ASSOC_FLAG_WPA_MCAST_KEY 8
94#define ASSOC_FLAG_WPA_UCAST_KEY 9
95#define ASSOC_FLAG_SECINFO 10
96#define ASSOC_FLAG_WPA_IE 11
97 unsigned long flags;
98
99 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
100 u8 ssid_len;
101 u8 channel;
102 u8 band;
103 u8 mode;
104 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
105
106 /** WEP keys */
107 struct enc_key wep_keys[4];
108 u16 wep_tx_keyidx;
109
110 /** WPA keys */
111 struct enc_key wpa_mcast_key;
112 struct enc_key wpa_unicast_key;
113
114 struct lbs_802_11_security secinfo;
115
116 /** WPA Information Elements*/
117 u8 wpa_ie[MAX_WPA_IE_LEN];
118 u8 wpa_ie_len;
119
120 /* BSS to associate with for infrastructure of Ad-Hoc join */
121 struct bss_descriptor bss;
122};
123
124
125extern u8 lbs_bg_rates[MAX_RATES];
7 126
8void lbs_association_worker(struct work_struct *work); 127void lbs_association_worker(struct work_struct *work);
9struct assoc_request *lbs_get_association_request(struct lbs_private *priv); 128struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
@@ -13,4 +132,24 @@ int lbs_adhoc_stop(struct lbs_private *priv);
13int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 132int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
14 u8 bssid[ETH_ALEN], u16 reason); 133 u8 bssid[ETH_ALEN], u16 reason);
15 134
135int lbs_cmd_802_11_rssi(struct lbs_private *priv,
136 struct cmd_ds_command *cmd);
137int lbs_ret_802_11_rssi(struct lbs_private *priv,
138 struct cmd_ds_command *resp);
139
140int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
141 struct cmd_ds_command *cmd,
142 u16 cmd_action);
143int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
144 struct cmd_ds_command *resp);
145
146int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
147 struct assoc_request *assoc);
148
149int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
150 uint16_t *enable);
151
152int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
153 struct assoc_request *assoc);
154
16#endif /* _LBS_ASSOC_H */ 155#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
new file mode 100644
index 000000000000..4396dccd12ac
--- /dev/null
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -0,0 +1,198 @@
1/*
2 * Implement cfg80211 ("iw") support.
3 *
4 * Copyright (C) 2009 M&N Solutions GmbH, 61191 Rosbach, Germany
5 * Holger Schurig <hs4233@mail.mn-solutions.de>
6 *
7 */
8
9#include <net/cfg80211.h>
10
11#include "cfg.h"
12#include "cmd.h"
13
14
15#define CHAN2G(_channel, _freq, _flags) { \
16 .band = IEEE80211_BAND_2GHZ, \
17 .center_freq = (_freq), \
18 .hw_value = (_channel), \
19 .flags = (_flags), \
20 .max_antenna_gain = 0, \
21 .max_power = 30, \
22}
23
24static struct ieee80211_channel lbs_2ghz_channels[] = {
25 CHAN2G(1, 2412, 0),
26 CHAN2G(2, 2417, 0),
27 CHAN2G(3, 2422, 0),
28 CHAN2G(4, 2427, 0),
29 CHAN2G(5, 2432, 0),
30 CHAN2G(6, 2437, 0),
31 CHAN2G(7, 2442, 0),
32 CHAN2G(8, 2447, 0),
33 CHAN2G(9, 2452, 0),
34 CHAN2G(10, 2457, 0),
35 CHAN2G(11, 2462, 0),
36 CHAN2G(12, 2467, 0),
37 CHAN2G(13, 2472, 0),
38 CHAN2G(14, 2484, 0),
39};
40
41#define RATETAB_ENT(_rate, _rateid, _flags) { \
42 .bitrate = (_rate), \
43 .hw_value = (_rateid), \
44 .flags = (_flags), \
45}
46
47
48static struct ieee80211_rate lbs_rates[] = {
49 RATETAB_ENT(10, 0x1, 0),
50 RATETAB_ENT(20, 0x2, 0),
51 RATETAB_ENT(55, 0x4, 0),
52 RATETAB_ENT(110, 0x8, 0),
53 RATETAB_ENT(60, 0x10, 0),
54 RATETAB_ENT(90, 0x20, 0),
55 RATETAB_ENT(120, 0x40, 0),
56 RATETAB_ENT(180, 0x80, 0),
57 RATETAB_ENT(240, 0x100, 0),
58 RATETAB_ENT(360, 0x200, 0),
59 RATETAB_ENT(480, 0x400, 0),
60 RATETAB_ENT(540, 0x800, 0),
61};
62
63static struct ieee80211_supported_band lbs_band_2ghz = {
64 .channels = lbs_2ghz_channels,
65 .n_channels = ARRAY_SIZE(lbs_2ghz_channels),
66 .bitrates = lbs_rates,
67 .n_bitrates = ARRAY_SIZE(lbs_rates),
68};
69
70
71static const u32 cipher_suites[] = {
72 WLAN_CIPHER_SUITE_WEP40,
73 WLAN_CIPHER_SUITE_WEP104,
74 WLAN_CIPHER_SUITE_TKIP,
75 WLAN_CIPHER_SUITE_CCMP,
76};
77
78
79
80static int lbs_cfg_set_channel(struct wiphy *wiphy,
81 struct ieee80211_channel *chan,
82 enum nl80211_channel_type channel_type)
83{
84 struct lbs_private *priv = wiphy_priv(wiphy);
85 int ret = -ENOTSUPP;
86
87 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type);
88
89 if (channel_type != NL80211_CHAN_NO_HT)
90 goto out;
91
92 ret = lbs_set_channel(priv, chan->hw_value);
93
94 out:
95 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
96 return ret;
97}
98
99
100
101
102static struct cfg80211_ops lbs_cfg80211_ops = {
103 .set_channel = lbs_cfg_set_channel,
104};
105
106
107/*
108 * At this time lbs_private *priv doesn't even exist, so we just allocate
109 * memory and don't initialize the wiphy further. This is postponed until we
110 * can talk to the firmware and happens at registration time in
111 * lbs_cfg_wiphy_register().
112 */
113struct wireless_dev *lbs_cfg_alloc(struct device *dev)
114{
115 int ret = 0;
116 struct wireless_dev *wdev;
117
118 lbs_deb_enter(LBS_DEB_CFG80211);
119
120 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
121 if (!wdev) {
122 dev_err(dev, "cannot allocate wireless device\n");
123 return ERR_PTR(-ENOMEM);
124 }
125
126 wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private));
127 if (!wdev->wiphy) {
128 dev_err(dev, "cannot allocate wiphy\n");
129 ret = -ENOMEM;
130 goto err_wiphy_new;
131 }
132
133 lbs_deb_leave(LBS_DEB_CFG80211);
134 return wdev;
135
136 err_wiphy_new:
137 kfree(wdev);
138 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
139 return ERR_PTR(ret);
140}
141
142
143/*
144 * This function get's called after lbs_setup_firmware() determined the
145 * firmware capabities. So we can setup the wiphy according to our
146 * hardware/firmware.
147 */
148int lbs_cfg_register(struct lbs_private *priv)
149{
150 struct wireless_dev *wdev = priv->wdev;
151 int ret;
152
153 lbs_deb_enter(LBS_DEB_CFG80211);
154
155 wdev->wiphy->max_scan_ssids = 1;
156 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
157
158 /* TODO: BIT(NL80211_IFTYPE_ADHOC); */
159 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
160
161 /* TODO: honor priv->regioncode */
162 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
163
164 /*
165 * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
166 * never seen a firmware without WPA
167 */
168 wdev->wiphy->cipher_suites = cipher_suites;
169 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
170
171 ret = wiphy_register(wdev->wiphy);
172 if (ret < 0)
173 lbs_pr_err("cannot register wiphy device\n");
174
175 ret = register_netdev(priv->dev);
176 if (ret)
177 lbs_pr_err("cannot register network device\n");
178
179 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
180 return ret;
181}
182
183
184void lbs_cfg_free(struct lbs_private *priv)
185{
186 struct wireless_dev *wdev = priv->wdev;
187
188 lbs_deb_enter(LBS_DEB_CFG80211);
189
190 if (!wdev)
191 return;
192
193 if (wdev->wiphy) {
194 wiphy_unregister(wdev->wiphy);
195 wiphy_free(wdev->wiphy);
196 }
197 kfree(wdev);
198}
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
new file mode 100644
index 000000000000..e09a193a34d6
--- /dev/null
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -0,0 +1,16 @@
1#ifndef __LBS_CFG80211_H__
2#define __LBS_CFG80211_H__
3
4#include "dev.h"
5
6struct wireless_dev *lbs_cfg_alloc(struct device *dev);
7int lbs_cfg_register(struct lbs_private *priv);
8void lbs_cfg_free(struct lbs_private *priv);
9
10int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
11 u8 ssid_len);
12int lbs_scan_networks(struct lbs_private *priv, int full_scan);
13void lbs_cfg_scan_worker(struct work_struct *work);
14
15
16#endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 0a324dcd264c..1065ce29cd08 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,21 +3,21 @@
3 * It prepares command and sends it to firmware when it is ready. 3 * It prepares command and sends it to firmware when it is ready.
4 */ 4 */
5 5
6#include <net/iw_handler.h>
7#include <net/lib80211.h> 6#include <net/lib80211.h>
8#include <linux/kfifo.h> 7#include <linux/kfifo.h>
9#include <linux/sched.h> 8#include <linux/sched.h>
9
10#include "host.h" 10#include "host.h"
11#include "hostcmd.h"
12#include "decl.h" 11#include "decl.h"
13#include "defs.h" 12#include "defs.h"
14#include "dev.h" 13#include "dev.h"
15#include "assoc.h" 14#include "assoc.h"
16#include "wext.h" 15#include "wext.h"
16#include "scan.h"
17#include "cmd.h" 17#include "cmd.h"
18 18
19static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
20 19
20static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
21 21
22/** 22/**
23 * @brief Simple callback that copies response back into command 23 * @brief Simple callback that copies response back into command
@@ -77,6 +77,30 @@ static u8 is_command_allowed_in_ps(u16 cmd)
77} 77}
78 78
79/** 79/**
80 * @brief This function checks if the command is allowed.
81 *
82 * @param priv A pointer to lbs_private structure
83 * @return allowed or not allowed.
84 */
85
86static int lbs_is_cmd_allowed(struct lbs_private *priv)
87{
88 int ret = 1;
89
90 lbs_deb_enter(LBS_DEB_CMD);
91
92 if (!priv->is_auto_deep_sleep_enabled) {
93 if (priv->is_deep_sleep) {
94 lbs_deb_cmd("command not allowed in deep sleep\n");
95 ret = 0;
96 }
97 }
98
99 lbs_deb_leave(LBS_DEB_CMD);
100 return ret;
101}
102
103/**
80 * @brief Updates the hardware details like MAC address and regulatory region 104 * @brief Updates the hardware details like MAC address and regulatory region
81 * 105 *
82 * @param priv A pointer to struct lbs_private structure 106 * @param priv A pointer to struct lbs_private structure
@@ -169,11 +193,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
169 goto out; 193 goto out;
170 } 194 }
171 195
172 if (lbs_set_universaltable(priv, 0)) {
173 ret = -1;
174 goto out;
175 }
176
177out: 196out:
178 lbs_deb_leave(LBS_DEB_CMD); 197 lbs_deb_leave(LBS_DEB_CMD);
179 return ret; 198 return ret;
@@ -222,7 +241,7 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
222 241
223 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); 242 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE);
224 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + 243 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) +
225 S_DS_GEN); 244 sizeof(struct cmd_header));
226 psm->action = cpu_to_le16(cmd_action); 245 psm->action = cpu_to_le16(cmd_action);
227 psm->multipledtim = 0; 246 psm->multipledtim = 0;
228 switch (cmd_action) { 247 switch (cmd_action) {
@@ -251,33 +270,6 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
251 return 0; 270 return 0;
252} 271}
253 272
254int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
255 uint16_t cmd_action, uint16_t *timeout)
256{
257 struct cmd_ds_802_11_inactivity_timeout cmd;
258 int ret;
259
260 lbs_deb_enter(LBS_DEB_CMD);
261
262 cmd.hdr.command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT);
263 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
264
265 cmd.action = cpu_to_le16(cmd_action);
266
267 if (cmd_action == CMD_ACT_SET)
268 cmd.timeout = cpu_to_le16(*timeout);
269 else
270 cmd.timeout = 0;
271
272 ret = lbs_cmd_with_response(priv, CMD_802_11_INACTIVITY_TIMEOUT, &cmd);
273
274 if (!ret)
275 *timeout = le16_to_cpu(cmd.timeout);
276
277 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
278 return 0;
279}
280
281int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 273int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
282 struct sleep_params *sp) 274 struct sleep_params *sp)
283{ 275{
@@ -320,190 +312,53 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
320 return 0; 312 return 0;
321} 313}
322 314
323int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action, 315static int lbs_wait_for_ds_awake(struct lbs_private *priv)
324 struct assoc_request *assoc)
325{ 316{
326 struct cmd_ds_802_11_set_wep cmd;
327 int ret = 0; 317 int ret = 0;
328 318
329 lbs_deb_enter(LBS_DEB_CMD); 319 lbs_deb_enter(LBS_DEB_CMD);
330 320
331 memset(&cmd, 0, sizeof(cmd)); 321 if (priv->is_deep_sleep) {
332 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP); 322 if (!wait_event_interruptible_timeout(priv->ds_awake_q,
333 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 323 !priv->is_deep_sleep, (10 * HZ))) {
334 324 lbs_pr_err("ds_awake_q: timer expired\n");
335 cmd.action = cpu_to_le16(cmd_action); 325 ret = -1;
336
337 if (cmd_action == CMD_ACT_ADD) {
338 int i;
339
340 /* default tx key index */
341 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
342 CMD_WEP_KEY_INDEX_MASK);
343
344 /* Copy key types and material to host command structure */
345 for (i = 0; i < 4; i++) {
346 struct enc_key *pkey = &assoc->wep_keys[i];
347
348 switch (pkey->len) {
349 case KEY_LEN_WEP_40:
350 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
351 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
352 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
353 break;
354 case KEY_LEN_WEP_104:
355 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
356 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
357 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
358 break;
359 case 0:
360 break;
361 default:
362 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
363 i, pkey->len);
364 ret = -1;
365 goto done;
366 break;
367 }
368 } 326 }
369 } else if (cmd_action == CMD_ACT_REMOVE) {
370 /* ACT_REMOVE clears _all_ WEP keys */
371
372 /* default tx key index */
373 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
374 CMD_WEP_KEY_INDEX_MASK);
375 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
376 }
377
378 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
379done:
380 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
381 return ret;
382}
383
384int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
385 uint16_t *enable)
386{
387 struct cmd_ds_802_11_enable_rsn cmd;
388 int ret;
389
390 lbs_deb_enter(LBS_DEB_CMD);
391
392 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
393 cmd.action = cpu_to_le16(cmd_action);
394
395 if (cmd_action == CMD_ACT_GET)
396 cmd.enable = 0;
397 else {
398 if (*enable)
399 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
400 else
401 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
402 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
403 } 327 }
404 328
405 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
406 if (!ret && cmd_action == CMD_ACT_GET)
407 *enable = le16_to_cpu(cmd.enable);
408
409 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 329 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
410 return ret; 330 return ret;
411} 331}
412 332
413static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam, 333int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
414 struct enc_key *key)
415{
416 lbs_deb_enter(LBS_DEB_CMD);
417
418 if (key->flags & KEY_INFO_WPA_ENABLED)
419 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
420 if (key->flags & KEY_INFO_WPA_UNICAST)
421 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
422 if (key->flags & KEY_INFO_WPA_MCAST)
423 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
424
425 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
426 keyparam->keytypeid = cpu_to_le16(key->type);
427 keyparam->keylen = cpu_to_le16(key->len);
428 memcpy(keyparam->key, key->key, key->len);
429
430 /* Length field doesn't include the {type,length} header */
431 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
432 lbs_deb_leave(LBS_DEB_CMD);
433}
434
435int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
436 struct assoc_request *assoc)
437{ 334{
438 struct cmd_ds_802_11_key_material cmd; 335 int ret = 0;
439 int ret = 0;
440 int index = 0;
441 336
442 lbs_deb_enter(LBS_DEB_CMD); 337 lbs_deb_enter(LBS_DEB_CMD);
443 338
444 cmd.action = cpu_to_le16(cmd_action); 339 if (deep_sleep) {
445 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 340 if (priv->is_deep_sleep != 1) {
446 341 lbs_deb_cmd("deep sleep: sleep\n");
447 if (cmd_action == CMD_ACT_GET) { 342 BUG_ON(!priv->enter_deep_sleep);
448 cmd.hdr.size = cpu_to_le16(S_DS_GEN + 2); 343 ret = priv->enter_deep_sleep(priv);
449 } else { 344 if (!ret) {
450 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet)); 345 netif_stop_queue(priv->dev);
451 346 netif_carrier_off(priv->dev);
452 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) { 347 }
453 set_one_wpa_key(&cmd.keyParamSet[index], 348 } else {
454 &assoc->wpa_unicast_key); 349 lbs_pr_err("deep sleep: already enabled\n");
455 index++;
456 }
457
458 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
459 set_one_wpa_key(&cmd.keyParamSet[index],
460 &assoc->wpa_mcast_key);
461 index++;
462 } 350 }
463 351 } else {
464 /* The common header and as many keys as we included */ 352 if (priv->is_deep_sleep) {
465 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd), 353 lbs_deb_cmd("deep sleep: wakeup\n");
466 keyParamSet[index])); 354 BUG_ON(!priv->exit_deep_sleep);
467 } 355 ret = priv->exit_deep_sleep(priv);
468 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd); 356 if (!ret) {
469 /* Copy the returned key to driver private data */ 357 ret = lbs_wait_for_ds_awake(priv);
470 if (!ret && cmd_action == CMD_ACT_GET) { 358 if (ret)
471 void *buf_ptr = cmd.keyParamSet; 359 lbs_pr_err("deep sleep: wakeup"
472 void *resp_end = &(&cmd)[1]; 360 "failed\n");
473 361 }
474 while (buf_ptr < resp_end) {
475 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
476 struct enc_key *key;
477 uint16_t param_set_len = le16_to_cpu(keyparam->length);
478 uint16_t key_len = le16_to_cpu(keyparam->keylen);
479 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
480 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
481 void *end;
482
483 end = (void *)keyparam + sizeof(keyparam->type)
484 + sizeof(keyparam->length) + param_set_len;
485
486 /* Make sure we don't access past the end of the IEs */
487 if (end > resp_end)
488 break;
489
490 if (key_flags & KEY_INFO_WPA_UNICAST)
491 key = &priv->wpa_unicast_key;
492 else if (key_flags & KEY_INFO_WPA_MCAST)
493 key = &priv->wpa_mcast_key;
494 else
495 break;
496
497 /* Copy returned key into driver */
498 memset(key, 0, sizeof(struct enc_key));
499 if (key_len > sizeof(key->key))
500 break;
501 key->type = key_type;
502 key->flags = key_flags;
503 key->len = key_len;
504 memcpy(key->key, keyparam->key, key->len);
505
506 buf_ptr = end + 1;
507 } 362 }
508 } 363 }
509 364
@@ -535,7 +390,7 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
535 switch (oid) { 390 switch (oid) {
536 case SNMP_MIB_OID_BSS_TYPE: 391 case SNMP_MIB_OID_BSS_TYPE:
537 cmd.bufsize = cpu_to_le16(sizeof(u8)); 392 cmd.bufsize = cpu_to_le16(sizeof(u8));
538 cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1; 393 cmd.value[0] = val;
539 break; 394 break;
540 case SNMP_MIB_OID_11D_ENABLE: 395 case SNMP_MIB_OID_11D_ENABLE:
541 case SNMP_MIB_OID_FRAG_THRESHOLD: 396 case SNMP_MIB_OID_FRAG_THRESHOLD:
@@ -588,13 +443,7 @@ int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
588 443
589 switch (le16_to_cpu(cmd.bufsize)) { 444 switch (le16_to_cpu(cmd.bufsize)) {
590 case sizeof(u8): 445 case sizeof(u8):
591 if (oid == SNMP_MIB_OID_BSS_TYPE) { 446 *out_val = cmd.value[0];
592 if (cmd.value[0] == 2)
593 *out_val = IW_MODE_ADHOC;
594 else
595 *out_val = IW_MODE_INFRA;
596 } else
597 *out_val = cmd.value[0];
598 break; 447 break;
599 case sizeof(u16): 448 case sizeof(u16):
600 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value))); 449 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
@@ -681,7 +530,7 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
681 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); 530 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE);
682 cmd->size = 531 cmd->size =
683 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + 532 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) +
684 S_DS_GEN); 533 sizeof(struct cmd_header));
685 534
686 monitor->action = cpu_to_le16(cmd_action); 535 monitor->action = cpu_to_le16(cmd_action);
687 if (cmd_action == CMD_ACT_SET) { 536 if (cmd_action == CMD_ACT_SET) {
@@ -692,111 +541,6 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
692 return 0; 541 return 0;
693} 542}
694 543
695static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
696{
697/* Bit Rate
698* 15:13 Reserved
699* 12 54 Mbps
700* 11 48 Mbps
701* 10 36 Mbps
702* 9 24 Mbps
703* 8 18 Mbps
704* 7 12 Mbps
705* 6 9 Mbps
706* 5 6 Mbps
707* 4 Reserved
708* 3 11 Mbps
709* 2 5.5 Mbps
710* 1 2 Mbps
711* 0 1 Mbps
712**/
713
714 uint16_t ratemask;
715 int i = lbs_data_rate_to_fw_index(rate);
716 if (lower_rates_ok)
717 ratemask = (0x1fef >> (12 - i));
718 else
719 ratemask = (1 << i);
720 return cpu_to_le16(ratemask);
721}
722
723int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
724 uint16_t cmd_action)
725{
726 struct cmd_ds_802_11_rate_adapt_rateset cmd;
727 int ret;
728
729 lbs_deb_enter(LBS_DEB_CMD);
730
731 if (!priv->cur_rate && !priv->enablehwauto)
732 return -EINVAL;
733
734 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
735
736 cmd.action = cpu_to_le16(cmd_action);
737 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
738 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
739 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
740 if (!ret && cmd_action == CMD_ACT_GET) {
741 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
742 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
743 }
744
745 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
746 return ret;
747}
748EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
749
750/**
751 * @brief Set the data rate
752 *
753 * @param priv A pointer to struct lbs_private structure
754 * @param rate The desired data rate, or 0 to clear a locked rate
755 *
756 * @return 0 on success, error on failure
757 */
758int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
759{
760 struct cmd_ds_802_11_data_rate cmd;
761 int ret = 0;
762
763 lbs_deb_enter(LBS_DEB_CMD);
764
765 memset(&cmd, 0, sizeof(cmd));
766 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
767
768 if (rate > 0) {
769 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
770 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
771 if (cmd.rates[0] == 0) {
772 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
773 " 0x%02X\n", rate);
774 ret = 0;
775 goto out;
776 }
777 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
778 } else {
779 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
780 lbs_deb_cmd("DATA_RATE: setting auto\n");
781 }
782
783 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
784 if (ret)
785 goto out;
786
787 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd));
788
789 /* FIXME: get actual rates FW can do if this command actually returns
790 * all data rates supported.
791 */
792 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
793 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
794
795out:
796 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
797 return ret;
798}
799
800/** 544/**
801 * @brief Get the radio channel 545 * @brief Get the radio channel
802 * 546 *
@@ -804,7 +548,7 @@ out:
804 * 548 *
805 * @return The channel on success, error on failure 549 * @return The channel on success, error on failure
806 */ 550 */
807int lbs_get_channel(struct lbs_private *priv) 551static int lbs_get_channel(struct lbs_private *priv)
808{ 552{
809 struct cmd_ds_802_11_rf_channel cmd; 553 struct cmd_ds_802_11_rf_channel cmd;
810 int ret = 0; 554 int ret = 0;
@@ -836,7 +580,7 @@ int lbs_update_channel(struct lbs_private *priv)
836 580
837 ret = lbs_get_channel(priv); 581 ret = lbs_get_channel(priv);
838 if (ret > 0) { 582 if (ret > 0) {
839 priv->curbssparams.channel = ret; 583 priv->channel = ret;
840 ret = 0; 584 ret = 0;
841 } 585 }
842 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 586 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -855,7 +599,7 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
855{ 599{
856 struct cmd_ds_802_11_rf_channel cmd; 600 struct cmd_ds_802_11_rf_channel cmd;
857#ifdef DEBUG 601#ifdef DEBUG
858 u8 old_channel = priv->curbssparams.channel; 602 u8 old_channel = priv->channel;
859#endif 603#endif
860 int ret = 0; 604 int ret = 0;
861 605
@@ -870,36 +614,15 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
870 if (ret) 614 if (ret)
871 goto out; 615 goto out;
872 616
873 priv->curbssparams.channel = (uint8_t) le16_to_cpu(cmd.channel); 617 priv->channel = (uint8_t) le16_to_cpu(cmd.channel);
874 lbs_deb_cmd("channel switch from %d to %d\n", old_channel, 618 lbs_deb_cmd("channel switch from %d to %d\n", old_channel,
875 priv->curbssparams.channel); 619 priv->channel);
876 620
877out: 621out:
878 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 622 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
879 return ret; 623 return ret;
880} 624}
881 625
882static int lbs_cmd_802_11_rssi(struct lbs_private *priv,
883 struct cmd_ds_command *cmd)
884{
885
886 lbs_deb_enter(LBS_DEB_CMD);
887 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
888 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN);
889 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
890
891 /* reset Beacon SNR/NF/RSSI values */
892 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
893 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
894 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
895 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
896 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
897 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
898
899 lbs_deb_leave(LBS_DEB_CMD);
900 return 0;
901}
902
903static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr, 626static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
904 u8 cmd_action, void *pdata_buf) 627 u8 cmd_action, void *pdata_buf)
905{ 628{
@@ -916,7 +639,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
916 639
917 cmdptr->size = 640 cmdptr->size =
918 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access) 641 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access)
919 + S_DS_GEN); 642 + sizeof(struct cmd_header));
920 macreg = 643 macreg =
921 (struct cmd_ds_mac_reg_access *)&cmdptr->params. 644 (struct cmd_ds_mac_reg_access *)&cmdptr->params.
922 macreg; 645 macreg;
@@ -935,7 +658,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
935 cmdptr->size = 658 cmdptr->size =
936 cpu_to_le16(sizeof 659 cpu_to_le16(sizeof
937 (struct cmd_ds_bbp_reg_access) 660 (struct cmd_ds_bbp_reg_access)
938 + S_DS_GEN); 661 + sizeof(struct cmd_header));
939 bbpreg = 662 bbpreg =
940 (struct cmd_ds_bbp_reg_access *)&cmdptr->params. 663 (struct cmd_ds_bbp_reg_access *)&cmdptr->params.
941 bbpreg; 664 bbpreg;
@@ -954,7 +677,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
954 cmdptr->size = 677 cmdptr->size =
955 cpu_to_le16(sizeof 678 cpu_to_le16(sizeof
956 (struct cmd_ds_rf_reg_access) + 679 (struct cmd_ds_rf_reg_access) +
957 S_DS_GEN); 680 sizeof(struct cmd_header));
958 rfreg = 681 rfreg =
959 (struct cmd_ds_rf_reg_access *)&cmdptr->params. 682 (struct cmd_ds_rf_reg_access *)&cmdptr->params.
960 rfreg; 683 rfreg;
@@ -981,7 +704,8 @@ static int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
981 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 704 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
982 705
983 cmd->command = cpu_to_le16(CMD_BT_ACCESS); 706 cmd->command = cpu_to_le16(CMD_BT_ACCESS);
984 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN); 707 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) +
708 sizeof(struct cmd_header));
985 cmd->result = 0; 709 cmd->result = 0;
986 bt_access->action = cpu_to_le16(cmd_action); 710 bt_access->action = cpu_to_le16(cmd_action);
987 711
@@ -1018,7 +742,8 @@ static int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
1018 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 742 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
1019 743
1020 cmd->command = cpu_to_le16(CMD_FWT_ACCESS); 744 cmd->command = cpu_to_le16(CMD_FWT_ACCESS);
1021 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN); 745 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) +
746 sizeof(struct cmd_header));
1022 cmd->result = 0; 747 cmd->result = 0;
1023 748
1024 if (pdata_buf) 749 if (pdata_buf)
@@ -1124,7 +849,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1124 ie->val.mesh_id_len = priv->mesh_ssid_len; 849 ie->val.mesh_id_len = priv->mesh_ssid_len;
1125 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); 850 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
1126 ie->len = sizeof(struct mrvl_meshie_val) - 851 ie->len = sizeof(struct mrvl_meshie_val) -
1127 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len; 852 IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
1128 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); 853 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
1129 break; 854 break;
1130 case CMD_ACT_MESH_CONFIG_STOP: 855 case CMD_ACT_MESH_CONFIG_STOP:
@@ -1139,27 +864,6 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1139 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); 864 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1140} 865}
1141 866
1142static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
1143 struct cmd_ds_command *cmd,
1144 u16 cmd_action)
1145{
1146 struct cmd_ds_802_11_beacon_control
1147 *bcn_ctrl = &cmd->params.bcn_ctrl;
1148
1149 lbs_deb_enter(LBS_DEB_CMD);
1150 cmd->size =
1151 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
1152 + S_DS_GEN);
1153 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
1154
1155 bcn_ctrl->action = cpu_to_le16(cmd_action);
1156 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
1157 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
1158
1159 lbs_deb_leave(LBS_DEB_CMD);
1160 return 0;
1161}
1162
1163static void lbs_queue_cmd(struct lbs_private *priv, 867static void lbs_queue_cmd(struct lbs_private *priv,
1164 struct cmd_ctrl_node *cmdnode) 868 struct cmd_ctrl_node *cmdnode)
1165{ 869{
@@ -1243,8 +947,17 @@ static void lbs_submit_command(struct lbs_private *priv,
1243 timeo = HZ/4; 947 timeo = HZ/4;
1244 } 948 }
1245 949
1246 /* Setup the timer after transmit command */ 950 if (command == CMD_802_11_DEEP_SLEEP) {
1247 mod_timer(&priv->command_timer, jiffies + timeo); 951 if (priv->is_auto_deep_sleep_enabled) {
952 priv->wakeup_dev_required = 1;
953 priv->dnld_sent = 0;
954 }
955 priv->is_deep_sleep = 1;
956 lbs_complete_command(priv, cmdnode, 0);
957 } else {
958 /* Setup the timer after transmit command */
959 mod_timer(&priv->command_timer, jiffies + timeo);
960 }
1248 961
1249 lbs_deb_leave(LBS_DEB_HOST); 962 lbs_deb_leave(LBS_DEB_HOST);
1250} 963}
@@ -1391,6 +1104,11 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1391 goto done; 1104 goto done;
1392 } 1105 }
1393 1106
1107 if (!lbs_is_cmd_allowed(priv)) {
1108 ret = -EBUSY;
1109 goto done;
1110 }
1111
1394 cmdnode = lbs_get_cmd_ctrl_node(priv); 1112 cmdnode = lbs_get_cmd_ctrl_node(priv);
1395 1113
1396 if (cmdnode == NULL) { 1114 if (cmdnode == NULL) {
@@ -1441,7 +1159,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1441 1159
1442 cmdptr->command = cpu_to_le16(cmd_no); 1160 cmdptr->command = cpu_to_le16(cmd_no);
1443 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + 1161 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) +
1444 S_DS_GEN); 1162 sizeof(struct cmd_header));
1445 1163
1446 memmove(&cmdptr->params.afc, 1164 memmove(&cmdptr->params.afc,
1447 pdata_buf, sizeof(struct cmd_ds_802_11_afc)); 1165 pdata_buf, sizeof(struct cmd_ds_802_11_afc));
@@ -1449,45 +1167,17 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1449 ret = 0; 1167 ret = 0;
1450 goto done; 1168 goto done;
1451 1169
1452 case CMD_802_11D_DOMAIN_INFO:
1453 ret = lbs_cmd_802_11d_domain_info(priv, cmdptr,
1454 cmd_no, cmd_action);
1455 break;
1456
1457 case CMD_802_11_TPC_CFG: 1170 case CMD_802_11_TPC_CFG:
1458 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); 1171 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
1459 cmdptr->size = 1172 cmdptr->size =
1460 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + 1173 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) +
1461 S_DS_GEN); 1174 sizeof(struct cmd_header));
1462 1175
1463 memmove(&cmdptr->params.tpccfg, 1176 memmove(&cmdptr->params.tpccfg,
1464 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg)); 1177 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg));
1465 1178
1466 ret = 0; 1179 ret = 0;
1467 break; 1180 break;
1468 case CMD_802_11_LED_GPIO_CTRL:
1469 {
1470 struct mrvl_ie_ledgpio *gpio =
1471 (struct mrvl_ie_ledgpio*)
1472 cmdptr->params.ledgpio.data;
1473
1474 memmove(&cmdptr->params.ledgpio,
1475 pdata_buf,
1476 sizeof(struct cmd_ds_802_11_led_ctrl));
1477
1478 cmdptr->command =
1479 cpu_to_le16(CMD_802_11_LED_GPIO_CTRL);
1480
1481#define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8
1482 cmdptr->size =
1483 cpu_to_le16(le16_to_cpu(gpio->header.len)
1484 + S_DS_GEN
1485 + ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN);
1486 gpio->header.len = gpio->header.len;
1487
1488 ret = 0;
1489 break;
1490 }
1491 1181
1492 case CMD_BT_ACCESS: 1182 case CMD_BT_ACCESS:
1493 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); 1183 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
@@ -1497,15 +1187,13 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1497 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); 1187 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1498 break; 1188 break;
1499 1189
1500 case CMD_GET_TSF:
1501 cmdptr->command = cpu_to_le16(CMD_GET_TSF);
1502 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) +
1503 S_DS_GEN);
1504 ret = 0;
1505 break;
1506 case CMD_802_11_BEACON_CTRL: 1190 case CMD_802_11_BEACON_CTRL:
1507 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1191 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1508 break; 1192 break;
1193 case CMD_802_11_DEEP_SLEEP:
1194 cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP);
1195 cmdptr->size = cpu_to_le16(sizeof(struct cmd_header));
1196 break;
1509 default: 1197 default:
1510 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no); 1198 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
1511 ret = -1; 1199 ret = -1;
@@ -1823,30 +1511,6 @@ done:
1823 return ret; 1511 return ret;
1824} 1512}
1825 1513
1826void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
1827{
1828 union iwreq_data iwrq;
1829 u8 buf[50];
1830
1831 lbs_deb_enter(LBS_DEB_WEXT);
1832
1833 memset(&iwrq, 0, sizeof(union iwreq_data));
1834 memset(buf, 0, sizeof(buf));
1835
1836 snprintf(buf, sizeof(buf) - 1, "%s", str);
1837
1838 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
1839
1840 /* Send Event to upper layer */
1841 lbs_deb_wext("event indication string %s\n", (char *)buf);
1842 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
1843 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
1844
1845 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
1846
1847 lbs_deb_leave(LBS_DEB_WEXT);
1848}
1849
1850static void lbs_send_confirmsleep(struct lbs_private *priv) 1514static void lbs_send_confirmsleep(struct lbs_private *priv)
1851{ 1515{
1852 unsigned long flags; 1516 unsigned long flags;
@@ -2024,7 +1688,7 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
2024} 1688}
2025 1689
2026 1690
2027static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, 1691struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
2028 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, 1692 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
2029 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 1693 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
2030 unsigned long callback_arg) 1694 unsigned long callback_arg)
@@ -2039,6 +1703,11 @@ static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
2039 goto done; 1703 goto done;
2040 } 1704 }
2041 1705
1706 if (!lbs_is_cmd_allowed(priv)) {
1707 cmdnode = ERR_PTR(-EBUSY);
1708 goto done;
1709 }
1710
2042 cmdnode = lbs_get_cmd_ctrl_node(priv); 1711 cmdnode = lbs_get_cmd_ctrl_node(priv);
2043 if (cmdnode == NULL) { 1712 if (cmdnode == NULL) {
2044 lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); 1713 lbs_deb_host("PREP_CMD: cmdnode is NULL\n");
@@ -2117,5 +1786,3 @@ done:
2117 return ret; 1786 return ret;
2118} 1787}
2119EXPORT_SYMBOL_GPL(__lbs_cmd); 1788EXPORT_SYMBOL_GPL(__lbs_cmd);
2120
2121
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 392e578ca095..2862748aef70 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -3,11 +3,30 @@
3#ifndef _LBS_CMD_H_ 3#ifndef _LBS_CMD_H_
4#define _LBS_CMD_H_ 4#define _LBS_CMD_H_
5 5
6#include "hostcmd.h" 6#include "host.h"
7#include "dev.h" 7#include "dev.h"
8 8
9
10/* Command & response transfer between host and card */
11
12struct cmd_ctrl_node {
13 struct list_head list;
14 int result;
15 /* command response */
16 int (*callback)(struct lbs_private *,
17 unsigned long,
18 struct cmd_header *);
19 unsigned long callback_arg;
20 /* command data */
21 struct cmd_header *cmdbuf;
22 /* wait queue */
23 u16 cmdwaitqwoken;
24 wait_queue_head_t cmdwait_q;
25};
26
27
9/* lbs_cmd() infers the size of the buffer to copy data back into, from 28/* lbs_cmd() infers the size of the buffer to copy data back into, from
10 the size of the target of the pointer. Since the command to be sent 29 the size of the target of the pointer. Since the command to be sent
11 may often be smaller, that size is set in cmd->size by the caller.*/ 30 may often be smaller, that size is set in cmd->size by the caller.*/
12#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \ 31#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
13 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \ 32 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \
@@ -18,6 +37,11 @@
18#define lbs_cmd_with_response(priv, cmdnr, cmd) \ 37#define lbs_cmd_with_response(priv, cmdnr, cmd) \
19 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) 38 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
20 39
40int lbs_prepare_and_send_command(struct lbs_private *priv,
41 u16 cmd_no,
42 u16 cmd_action,
43 u16 wait_option, u32 cmd_oid, void *pdata_buf);
44
21void lbs_cmd_async(struct lbs_private *priv, uint16_t command, 45void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
22 struct cmd_header *in_cmd, int in_cmd_size); 46 struct cmd_header *in_cmd, int in_cmd_size);
23 47
@@ -26,62 +50,93 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 50 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
27 unsigned long callback_arg); 51 unsigned long callback_arg);
28 52
29int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 53struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
30 int8_t p1, int8_t p2); 54 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
55 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
56 unsigned long callback_arg);
31 57
32int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 58int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
33 int8_t p2, int usesnr); 59 struct cmd_header *resp);
34 60
35int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 61int lbs_allocate_cmd_buffer(struct lbs_private *priv);
36 int8_t p1, int8_t p2); 62int lbs_free_cmd_buffer(struct lbs_private *priv);
37 63
38int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 64int lbs_execute_next_command(struct lbs_private *priv);
39 int8_t p2, int usesnr); 65void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
66 int result);
67int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
40 68
41int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
42 struct cmd_header *resp);
43 69
44int lbs_update_hw_spec(struct lbs_private *priv); 70/* From cmdresp.c */
45 71
46int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, 72void lbs_mac_event_disconnected(struct lbs_private *priv);
47 struct cmd_ds_mesh_access *cmd);
48 73
49int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
50 74
51int lbs_get_channel(struct lbs_private *priv); 75
76/* Events */
77
78int lbs_process_event(struct lbs_private *priv, u32 event);
79
80
81/* Actual commands */
82
83int lbs_update_hw_spec(struct lbs_private *priv);
84
52int lbs_set_channel(struct lbs_private *priv, u8 channel); 85int lbs_set_channel(struct lbs_private *priv, u8 channel);
53 86
54int lbs_mesh_config_send(struct lbs_private *priv, 87int lbs_update_channel(struct lbs_private *priv);
55 struct cmd_ds_mesh_config *cmd,
56 uint16_t action, uint16_t type);
57int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
58 88
59int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, 89int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
60 struct wol_config *p_wol_config); 90 struct wol_config *p_wol_config);
61int lbs_suspend(struct lbs_private *priv);
62void lbs_resume(struct lbs_private *priv);
63 91
64int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
65 uint16_t cmd_action);
66int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
67 uint16_t cmd_action, uint16_t *timeout);
68int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 92int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
69 struct sleep_params *sp); 93 struct sleep_params *sp);
70int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
71 struct assoc_request *assoc);
72int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
73 uint16_t *enable);
74int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
75 struct assoc_request *assoc);
76 94
77int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, 95void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
78 s16 *maxlevel); 96
79int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); 97void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
98
99void lbs_ps_confirm_sleep(struct lbs_private *priv);
80 100
81int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); 101int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
82 102
103void lbs_set_mac_control(struct lbs_private *priv);
104
105int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
106 s16 *maxlevel);
107
83int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val); 108int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
84 109
85int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); 110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
86 111
112
113/* Mesh related */
114
115int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
116 struct cmd_ds_mesh_access *cmd);
117
118int lbs_mesh_config_send(struct lbs_private *priv,
119 struct cmd_ds_mesh_config *cmd,
120 uint16_t action, uint16_t type);
121
122int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
123
124
125/* Commands only used in wext.c, assoc. and scan.c */
126
127int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
128 int8_t p1, int8_t p2);
129
130int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
131 int8_t p2, int usesnr);
132
133int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
134
135int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
136 uint16_t cmd_action);
137
138int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
139
140int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
141
87#endif /* _LBS_CMD_H */ 142#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 23f684337fdd..21d57690c20a 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -11,6 +11,7 @@
11 11
12#include "host.h" 12#include "host.h"
13#include "decl.h" 13#include "decl.h"
14#include "cmd.h"
14#include "defs.h" 15#include "defs.h"
15#include "dev.h" 16#include "dev.h"
16#include "assoc.h" 17#include "assoc.h"
@@ -26,23 +27,17 @@
26 */ 27 */
27void lbs_mac_event_disconnected(struct lbs_private *priv) 28void lbs_mac_event_disconnected(struct lbs_private *priv)
28{ 29{
29 union iwreq_data wrqu;
30
31 if (priv->connect_status != LBS_CONNECTED) 30 if (priv->connect_status != LBS_CONNECTED)
32 return; 31 return;
33 32
34 lbs_deb_enter(LBS_DEB_ASSOC); 33 lbs_deb_enter(LBS_DEB_ASSOC);
35 34
36 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
37 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
38
39 /* 35 /*
40 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms. 36 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms.
41 * It causes problem in the Supplicant 37 * It causes problem in the Supplicant
42 */ 38 */
43
44 msleep_interruptible(1000); 39 msleep_interruptible(1000);
45 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 40 lbs_send_disconnect_notification(priv);
46 41
47 /* report disconnect to upper layer */ 42 /* report disconnect to upper layer */
48 netif_stop_queue(priv->dev); 43 netif_stop_queue(priv->dev);
@@ -67,7 +62,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
67 * no longer valid. 62 * no longer valid.
68 */ 63 */
69 memset(&priv->curbssparams.bssid, 0, ETH_ALEN); 64 memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
70 memset(&priv->curbssparams.ssid, 0, IW_ESSID_MAX_SIZE); 65 memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
71 priv->curbssparams.ssid_len = 0; 66 priv->curbssparams.ssid_len = 0;
72 67
73 if (priv->psstate != PS_STATE_FULL_POWER) { 68 if (priv->psstate != PS_STATE_FULL_POWER) {
@@ -78,32 +73,6 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
78 lbs_deb_leave(LBS_DEB_ASSOC); 73 lbs_deb_leave(LBS_DEB_ASSOC);
79} 74}
80 75
81/**
82 * @brief This function handles MIC failure event.
83 *
84 * @param priv A pointer to struct lbs_private structure
85 * @para event the event id
86 * @return n/a
87 */
88static void handle_mic_failureevent(struct lbs_private *priv, u32 event)
89{
90 char buf[50];
91
92 lbs_deb_enter(LBS_DEB_CMD);
93 memset(buf, 0, sizeof(buf));
94
95 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
96
97 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) {
98 strcat(buf, "unicast ");
99 } else {
100 strcat(buf, "multicast ");
101 }
102
103 lbs_send_iwevcustom_event(priv, buf);
104 lbs_deb_leave(LBS_DEB_CMD);
105}
106
107static int lbs_ret_reg_access(struct lbs_private *priv, 76static int lbs_ret_reg_access(struct lbs_private *priv,
108 u16 type, struct cmd_ds_command *resp) 77 u16 type, struct cmd_ds_command *resp)
109{ 78{
@@ -147,53 +116,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
147 return ret; 116 return ret;
148} 117}
149 118
150static int lbs_ret_802_11_rssi(struct lbs_private *priv,
151 struct cmd_ds_command *resp)
152{
153 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
154
155 lbs_deb_enter(LBS_DEB_CMD);
156
157 /* store the non average value */
158 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
159 priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
160
161 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
162 priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
163
164 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
165 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
166 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
167
168 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
169 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
170 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
171
172 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
173 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
174 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
175
176 lbs_deb_leave(LBS_DEB_CMD);
177 return 0;
178}
179
180static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
181 struct cmd_ds_command *resp)
182{
183 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
184 &resp->params.bcn_ctrl;
185
186 lbs_deb_enter(LBS_DEB_CMD);
187
188 if (bcn_ctrl->action == CMD_ACT_GET) {
189 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
190 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
191 }
192
193 lbs_deb_enter(LBS_DEB_CMD);
194 return 0;
195}
196
197static inline int handle_cmd_response(struct lbs_private *priv, 119static inline int handle_cmd_response(struct lbs_private *priv,
198 struct cmd_header *cmd_response) 120 struct cmd_header *cmd_response)
199{ 121{
@@ -227,29 +149,13 @@ static inline int handle_cmd_response(struct lbs_private *priv,
227 ret = lbs_ret_802_11_rssi(priv, resp); 149 ret = lbs_ret_802_11_rssi(priv, resp);
228 break; 150 break;
229 151
230 case CMD_RET(CMD_802_11D_DOMAIN_INFO):
231 ret = lbs_ret_802_11d_domain_info(resp);
232 break;
233
234 case CMD_RET(CMD_802_11_TPC_CFG): 152 case CMD_RET(CMD_802_11_TPC_CFG):
235 spin_lock_irqsave(&priv->driver_lock, flags); 153 spin_lock_irqsave(&priv->driver_lock, flags);
236 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg, 154 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
237 sizeof(struct cmd_ds_802_11_tpc_cfg)); 155 sizeof(struct cmd_ds_802_11_tpc_cfg));
238 spin_unlock_irqrestore(&priv->driver_lock, flags); 156 spin_unlock_irqrestore(&priv->driver_lock, flags);
239 break; 157 break;
240 case CMD_RET(CMD_802_11_LED_GPIO_CTRL):
241 spin_lock_irqsave(&priv->driver_lock, flags);
242 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.ledgpio,
243 sizeof(struct cmd_ds_802_11_led_ctrl));
244 spin_unlock_irqrestore(&priv->driver_lock, flags);
245 break;
246 158
247 case CMD_RET(CMD_GET_TSF):
248 spin_lock_irqsave(&priv->driver_lock, flags);
249 memcpy((void *)priv->cur_cmd->callback_arg,
250 &resp->params.gettsf.tsfvalue, sizeof(u64));
251 spin_unlock_irqrestore(&priv->driver_lock, flags);
252 break;
253 case CMD_RET(CMD_BT_ACCESS): 159 case CMD_RET(CMD_BT_ACCESS):
254 spin_lock_irqsave(&priv->driver_lock, flags); 160 spin_lock_irqsave(&priv->driver_lock, flags);
255 if (priv->cur_cmd->callback_arg) 161 if (priv->cur_cmd->callback_arg)
@@ -505,9 +411,21 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
505 411
506 case MACREG_INT_CODE_HOST_AWAKE: 412 case MACREG_INT_CODE_HOST_AWAKE:
507 lbs_deb_cmd("EVENT: host awake\n"); 413 lbs_deb_cmd("EVENT: host awake\n");
414 if (priv->reset_deep_sleep_wakeup)
415 priv->reset_deep_sleep_wakeup(priv);
416 priv->is_deep_sleep = 0;
508 lbs_send_confirmwake(priv); 417 lbs_send_confirmwake(priv);
509 break; 418 break;
510 419
420 case MACREG_INT_CODE_DEEP_SLEEP_AWAKE:
421 if (priv->reset_deep_sleep_wakeup)
422 priv->reset_deep_sleep_wakeup(priv);
423 lbs_deb_cmd("EVENT: ds awake\n");
424 priv->is_deep_sleep = 0;
425 priv->wakeup_dev_required = 0;
426 wake_up_interruptible(&priv->ds_awake_q);
427 break;
428
511 case MACREG_INT_CODE_PS_AWAKE: 429 case MACREG_INT_CODE_PS_AWAKE:
512 lbs_deb_cmd("EVENT: ps awake\n"); 430 lbs_deb_cmd("EVENT: ps awake\n");
513 /* handle unexpected PS AWAKE event */ 431 /* handle unexpected PS AWAKE event */
@@ -533,12 +451,12 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
533 451
534 case MACREG_INT_CODE_MIC_ERR_UNICAST: 452 case MACREG_INT_CODE_MIC_ERR_UNICAST:
535 lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n"); 453 lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n");
536 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_UNICAST); 454 lbs_send_mic_failureevent(priv, event);
537 break; 455 break;
538 456
539 case MACREG_INT_CODE_MIC_ERR_MULTICAST: 457 case MACREG_INT_CODE_MIC_ERR_MULTICAST:
540 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n"); 458 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n");
541 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST); 459 lbs_send_mic_failureevent(priv, event);
542 break; 460 break;
543 461
544 case MACREG_INT_CODE_MIB_CHANGED: 462 case MACREG_INT_CODE_MIB_CHANGED:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 893a55ca344a..587b0cb0088d 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -451,10 +451,12 @@ static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
451 CMD_MAC_REG_ACCESS, 0, 451 CMD_MAC_REG_ACCESS, 0,
452 CMD_OPTION_WAITFORRSP, 0, &offval); 452 CMD_OPTION_WAITFORRSP, 0, &offval);
453 mdelay(10); 453 mdelay(10);
454 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", 454 if (!ret) {
455 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
455 priv->mac_offset, priv->offsetvalue.value); 456 priv->mac_offset, priv->offsetvalue.value);
456 457
457 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 458 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
459 }
458 free_page(addr); 460 free_page(addr);
459 return ret; 461 return ret;
460} 462}
@@ -514,7 +516,8 @@ static ssize_t lbs_wrmac_write(struct file *file,
514 CMD_OPTION_WAITFORRSP, 0, &offval); 516 CMD_OPTION_WAITFORRSP, 0, &offval);
515 mdelay(10); 517 mdelay(10);
516 518
517 res = count; 519 if (!res)
520 res = count;
518out_unlock: 521out_unlock:
519 free_page(addr); 522 free_page(addr);
520 return res; 523 return res;
@@ -539,10 +542,12 @@ static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
539 CMD_BBP_REG_ACCESS, 0, 542 CMD_BBP_REG_ACCESS, 0,
540 CMD_OPTION_WAITFORRSP, 0, &offval); 543 CMD_OPTION_WAITFORRSP, 0, &offval);
541 mdelay(10); 544 mdelay(10);
542 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", 545 if (!ret) {
546 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
543 priv->bbp_offset, priv->offsetvalue.value); 547 priv->bbp_offset, priv->offsetvalue.value);
544 548
545 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 549 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
550 }
546 free_page(addr); 551 free_page(addr);
547 552
548 return ret; 553 return ret;
@@ -603,7 +608,8 @@ static ssize_t lbs_wrbbp_write(struct file *file,
603 CMD_OPTION_WAITFORRSP, 0, &offval); 608 CMD_OPTION_WAITFORRSP, 0, &offval);
604 mdelay(10); 609 mdelay(10);
605 610
606 res = count; 611 if (!res)
612 res = count;
607out_unlock: 613out_unlock:
608 free_page(addr); 614 free_page(addr);
609 return res; 615 return res;
@@ -628,10 +634,12 @@ static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
628 CMD_RF_REG_ACCESS, 0, 634 CMD_RF_REG_ACCESS, 0,
629 CMD_OPTION_WAITFORRSP, 0, &offval); 635 CMD_OPTION_WAITFORRSP, 0, &offval);
630 mdelay(10); 636 mdelay(10);
631 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", 637 if (!ret) {
638 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
632 priv->rf_offset, priv->offsetvalue.value); 639 priv->rf_offset, priv->offsetvalue.value);
633 640
634 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 641 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
642 }
635 free_page(addr); 643 free_page(addr);
636 644
637 return ret; 645 return ret;
@@ -692,7 +700,8 @@ static ssize_t lbs_wrrf_write(struct file *file,
692 CMD_OPTION_WAITFORRSP, 0, &offval); 700 CMD_OPTION_WAITFORRSP, 0, &offval);
693 mdelay(10); 701 mdelay(10);
694 702
695 res = count; 703 if (!res)
704 res = count;
696out_unlock: 705out_unlock:
697 free_page(addr); 706 free_page(addr);
698 return res; 707 return res;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 8b15380ae6e1..678f7c9f7503 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,71 +8,48 @@
8 8
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10 10
11#include "defs.h"
12 11
13/** Function Prototype Declaration */
14struct lbs_private; 12struct lbs_private;
15struct sk_buff; 13struct sk_buff;
16struct net_device; 14struct net_device;
17struct cmd_ctrl_node;
18struct cmd_ds_command;
19 15
20void lbs_set_mac_control(struct lbs_private *priv);
21 16
22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count); 17/* ethtool.c */
23 18extern const struct ethtool_ops lbs_ethtool_ops;
24int lbs_free_cmd_buffer(struct lbs_private *priv);
25
26int lbs_prepare_and_send_command(struct lbs_private *priv,
27 u16 cmd_no,
28 u16 cmd_action,
29 u16 wait_option, u32 cmd_oid, void *pdata_buf);
30 19
31int lbs_allocate_cmd_buffer(struct lbs_private *priv);
32int lbs_execute_next_command(struct lbs_private *priv);
33int lbs_process_event(struct lbs_private *priv, u32 event);
34void lbs_queue_event(struct lbs_private *priv, u32 event);
35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
36 20
37u32 lbs_fw_index_to_data_rate(u8 index); 21/* tx.c */
38u8 lbs_data_rate_to_fw_index(u32 rate); 22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
39
40/** The proc fs interface */
41int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
42void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
43 int result);
44netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, 23netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb,
45 struct net_device *dev); 24 struct net_device *dev);
46int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
47 25
26/* rx.c */
48int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *); 27int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *);
49 28
50void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
51void lbs_ps_confirm_sleep(struct lbs_private *priv);
52void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
53
54struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
55 struct lbs_private *priv,
56 u8 band,
57 u16 channel);
58
59void lbs_mac_event_disconnected(struct lbs_private *priv);
60
61void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
62 29
63/* persistcfg.c */ 30/* persistcfg.c */
64void lbs_persist_config_init(struct net_device *net); 31void lbs_persist_config_init(struct net_device *net);
65void lbs_persist_config_remove(struct net_device *net); 32void lbs_persist_config_remove(struct net_device *net);
66 33
34
67/* main.c */ 35/* main.c */
68struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
69 int *cfp_no);
70struct lbs_private *lbs_add_card(void *card, struct device *dmdev); 36struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
71void lbs_remove_card(struct lbs_private *priv); 37void lbs_remove_card(struct lbs_private *priv);
72int lbs_start_card(struct lbs_private *priv); 38int lbs_start_card(struct lbs_private *priv);
73void lbs_stop_card(struct lbs_private *priv); 39void lbs_stop_card(struct lbs_private *priv);
74void lbs_host_to_card_done(struct lbs_private *priv); 40void lbs_host_to_card_done(struct lbs_private *priv);
75 41
76int lbs_update_channel(struct lbs_private *priv); 42int lbs_suspend(struct lbs_private *priv);
43void lbs_resume(struct lbs_private *priv);
44
45void lbs_queue_event(struct lbs_private *priv, u32 event);
46void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
47
48int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
49int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
50
51u32 lbs_fw_index_to_data_rate(u8 index);
52u8 lbs_data_rate_to_fw_index(u32 rate);
53
77 54
78#endif 55#endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 72f3479a4d70..6b6ea9f7bf5b 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -42,6 +42,7 @@
42#define LBS_DEB_SDIO 0x00400000 42#define LBS_DEB_SDIO 0x00400000
43#define LBS_DEB_SYSFS 0x00800000 43#define LBS_DEB_SYSFS 0x00800000
44#define LBS_DEB_SPI 0x01000000 44#define LBS_DEB_SPI 0x01000000
45#define LBS_DEB_CFG80211 0x02000000
45 46
46extern unsigned int lbs_debug; 47extern unsigned int lbs_debug;
47 48
@@ -86,6 +87,7 @@ do { if ((lbs_debug & (grp)) == (grp)) \
86#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args) 87#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
87#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args) 88#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args)
88#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args) 89#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args)
90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
89 91
90#define lbs_pr_info(format, args...) \ 92#define lbs_pr_info(format, args...) \
91 printk(KERN_INFO DRV_NAME": " format, ## args) 93 printk(KERN_INFO DRV_NAME": " format, ## args)
@@ -320,7 +322,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
320extern const char lbs_driver_version[]; 322extern const char lbs_driver_version[];
321extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE]; 323extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE];
322 324
323extern u8 lbs_bg_rates[MAX_RATES];
324 325
325/** ENUM definition*/ 326/** ENUM definition*/
326/** SNRNF_TYPE */ 327/** SNRNF_TYPE */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index d3b69a4b4b5e..1a675111300d 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -6,75 +6,10 @@
6#ifndef _LBS_DEV_H_ 6#ifndef _LBS_DEV_H_
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
9#include <linux/netdevice.h> 9#include "scan.h"
10#include <linux/wireless.h> 10#include "assoc.h"
11#include <linux/ethtool.h>
12#include <linux/debugfs.h>
13 11
14#include "defs.h"
15#include "hostcmd.h"
16 12
17extern const struct ethtool_ops lbs_ethtool_ops;
18
19#define MAX_BSSID_PER_CHANNEL 16
20
21#define NR_TX_QUEUE 3
22
23/* For the extended Scan */
24#define MAX_EXTENDED_SCAN_BSSID_LIST MAX_BSSID_PER_CHANNEL * \
25 MRVDRV_MAX_CHANNEL_SIZE + 1
26
27#define MAX_REGION_CHANNEL_NUM 2
28
29/** Chan-freq-TxPower mapping table*/
30struct chan_freq_power {
31 /** channel Number */
32 u16 channel;
33 /** frequency of this channel */
34 u32 freq;
35 /** Max allowed Tx power level */
36 u16 maxtxpower;
37 /** TRUE:channel unsupported; FLASE:supported*/
38 u8 unsupported;
39};
40
41/** region-band mapping table*/
42struct region_channel {
43 /** TRUE if this entry is valid */
44 u8 valid;
45 /** region code for US, Japan ... */
46 u8 region;
47 /** band B/G/A, used for BAND_CONFIG cmd */
48 u8 band;
49 /** Actual No. of elements in the array below */
50 u8 nrcfp;
51 /** chan-freq-txpower mapping table*/
52 struct chan_freq_power *CFP;
53};
54
55struct lbs_802_11_security {
56 u8 WPAenabled;
57 u8 WPA2enabled;
58 u8 wep_enabled;
59 u8 auth_mode;
60 u32 key_mgmt;
61};
62
63/** Current Basic Service Set State Structure */
64struct current_bss_params {
65 /** bssid */
66 u8 bssid[ETH_ALEN];
67 /** ssid */
68 u8 ssid[IW_ESSID_MAX_SIZE + 1];
69 u8 ssid_len;
70
71 /** band */
72 u8 band;
73 /** channel */
74 u8 channel;
75 /** zero-terminated array of supported data rates */
76 u8 rates[MAX_RATES + 1];
77};
78 13
79/** sleep_params */ 14/** sleep_params */
80struct sleep_params { 15struct sleep_params {
@@ -100,95 +35,96 @@ struct lbs_mesh_stats {
100 35
101/** Private structure for the MV device */ 36/** Private structure for the MV device */
102struct lbs_private { 37struct lbs_private {
103 int mesh_open;
104 int mesh_fw_ver;
105 int infra_open;
106 int mesh_autostart_enabled;
107 38
108 char name[DEV_NAME_LEN]; 39 /* Basic networking */
109
110 void *card;
111 struct net_device *dev; 40 struct net_device *dev;
41 u32 connect_status;
42 int infra_open;
43 struct work_struct mcast_work;
44 u32 nr_of_multicastmacaddr;
45 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
112 46
47 /* CFG80211 */
48 struct wireless_dev *wdev;
49
50 /* Mesh */
113 struct net_device *mesh_dev; /* Virtual device */ 51 struct net_device *mesh_dev; /* Virtual device */
52 u32 mesh_connect_status;
53 struct lbs_mesh_stats mstats;
54 int mesh_open;
55 int mesh_fw_ver;
56 int mesh_autostart_enabled;
57 uint16_t mesh_tlv;
58 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
59 u8 mesh_ssid_len;
60 struct work_struct sync_channel;
61
62 /* Monitor mode */
114 struct net_device *rtap_net_dev; 63 struct net_device *rtap_net_dev;
64 u32 monitormode;
115 65
116 struct iw_statistics wstats; 66 /* Debugfs */
117 struct lbs_mesh_stats mstats;
118 struct dentry *debugfs_dir; 67 struct dentry *debugfs_dir;
119 struct dentry *debugfs_debug; 68 struct dentry *debugfs_debug;
120 struct dentry *debugfs_files[6]; 69 struct dentry *debugfs_files[6];
121
122 struct dentry *events_dir; 70 struct dentry *events_dir;
123 struct dentry *debugfs_events_files[6]; 71 struct dentry *debugfs_events_files[6];
124
125 struct dentry *regs_dir; 72 struct dentry *regs_dir;
126 struct dentry *debugfs_regs_files[6]; 73 struct dentry *debugfs_regs_files[6];
127 74
75 /* Hardware debugging */
128 u32 mac_offset; 76 u32 mac_offset;
129 u32 bbp_offset; 77 u32 bbp_offset;
130 u32 rf_offset; 78 u32 rf_offset;
79 struct lbs_offset_value offsetvalue;
131 80
132 /* Download sent: 81 /* Power management */
133 bit0 1/0=data_sent/data_tx_done, 82 u16 psmode;
134 bit1 1/0=cmd_sent/cmd_tx_done, 83 u32 psstate;
135 all other bits reserved 0 */ 84 u8 needtowakeup;
136 u8 dnld_sent;
137
138 /** thread to service interrupts */
139 struct task_struct *main_thread;
140 wait_queue_head_t waitq;
141 struct workqueue_struct *work_thread;
142 85
143 struct work_struct mcast_work; 86 /* Deep sleep */
87 int is_deep_sleep;
88 int is_auto_deep_sleep_enabled;
89 int wakeup_dev_required;
90 int is_activity_detected;
91 int auto_deep_sleep_timeout; /* in ms */
92 wait_queue_head_t ds_awake_q;
93 struct timer_list auto_deepsleep_timer;
144 94
145 /** Scanning */ 95 /* Hardware access */
146 struct delayed_work scan_work; 96 void *card;
147 struct delayed_work assoc_work; 97 u8 fw_ready;
148 struct work_struct sync_channel; 98 u8 surpriseremoved;
149 /* remember which channel was scanned last, != 0 if currently scanning */
150 int scan_channel;
151 u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
152 u8 scan_ssid_len;
153
154 /** Hardware access */
155 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 99 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
156 void (*reset_card) (struct lbs_private *priv); 100 void (*reset_card) (struct lbs_private *priv);
101 int (*enter_deep_sleep) (struct lbs_private *priv);
102 int (*exit_deep_sleep) (struct lbs_private *priv);
103 int (*reset_deep_sleep_wakeup) (struct lbs_private *priv);
157 104
158 /* Wake On LAN */ 105 /* Adapter info (from EEPROM) */
159 uint32_t wol_criteria;
160 uint8_t wol_gpio;
161 uint8_t wol_gap;
162
163 /** Wlan adapter data structure*/
164 /** STATUS variables */
165 u32 fwrelease; 106 u32 fwrelease;
166 u32 fwcapinfo; 107 u32 fwcapinfo;
108 u16 regioncode;
109 u8 current_addr[ETH_ALEN];
167 110
168 struct mutex lock; 111 /* Command download */
169 112 u8 dnld_sent;
170 /* TX packet ready to be sent... */ 113 /* bit0 1/0=data_sent/data_tx_done,
171 int tx_pending_len; /* -1 while building packet */ 114 bit1 1/0=cmd_sent/cmd_tx_done,
172 115 all other bits reserved 0 */
173 u8 tx_pending_buf[LBS_UPLD_SIZE];
174 /* protected by hard_start_xmit serialization */
175
176 /** command-related variables */
177 u16 seqnum; 116 u16 seqnum;
178
179 struct cmd_ctrl_node *cmd_array; 117 struct cmd_ctrl_node *cmd_array;
180 /** Current command */
181 struct cmd_ctrl_node *cur_cmd; 118 struct cmd_ctrl_node *cur_cmd;
182 int cur_cmd_retcode; 119 struct list_head cmdfreeq; /* free command buffers */
183 /** command Queues */ 120 struct list_head cmdpendingq; /* pending command buffers */
184 /** Free command buffers */
185 struct list_head cmdfreeq;
186 /** Pending command buffers */
187 struct list_head cmdpendingq;
188
189 wait_queue_head_t cmd_pending; 121 wait_queue_head_t cmd_pending;
122 struct timer_list command_timer;
123 int nr_retries;
124 int cmd_timed_out;
190 125
191 /* Command responses sent from the hardware to the driver */ 126 /* Command responses sent from the hardware to the driver */
127 int cur_cmd_retcode;
192 u8 resp_idx; 128 u8 resp_idx;
193 u8 resp_buf[2][LBS_UPLD_SIZE]; 129 u8 resp_buf[2][LBS_UPLD_SIZE];
194 u32 resp_len[2]; 130 u32 resp_len[2];
@@ -196,95 +132,76 @@ struct lbs_private {
196 /* Events sent from hardware to driver */ 132 /* Events sent from hardware to driver */
197 struct kfifo *event_fifo; 133 struct kfifo *event_fifo;
198 134
199 /* nickname */ 135 /** thread to service interrupts */
200 u8 nodename[16]; 136 struct task_struct *main_thread;
201 137 wait_queue_head_t waitq;
202 /** spin locks */ 138 struct workqueue_struct *work_thread;
203 spinlock_t driver_lock;
204
205 /** Timers */
206 struct timer_list command_timer;
207 int nr_retries;
208 int cmd_timed_out;
209
210 /** current ssid/bssid related parameters*/
211 struct current_bss_params curbssparams;
212
213 uint16_t mesh_tlv;
214 u8 mesh_ssid[IW_ESSID_MAX_SIZE + 1];
215 u8 mesh_ssid_len;
216
217 /* IW_MODE_* */
218 u8 mode;
219
220 /* Scan results list */
221 struct list_head network_list;
222 struct list_head network_free_list;
223 struct bss_descriptor *networks;
224
225 u16 beacon_period;
226 u8 beacon_enable;
227 u8 adhoccreate;
228
229 /** capability Info used in Association, start, join */
230 u16 capability;
231
232 /** MAC address information */
233 u8 current_addr[ETH_ALEN];
234 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
235 u32 nr_of_multicastmacaddr;
236 139
237 /** 802.11 statistics */ 140 /** Encryption stuff */
238// struct cmd_DS_802_11_GET_STAT wlan802_11Stat; 141 struct lbs_802_11_security secinfo;
142 struct enc_key wpa_mcast_key;
143 struct enc_key wpa_unicast_key;
144 u8 wpa_ie[MAX_WPA_IE_LEN];
145 u8 wpa_ie_len;
146 u16 wep_tx_keyidx;
147 struct enc_key wep_keys[4];
239 148
240 uint16_t enablehwauto; 149 /* Wake On LAN */
241 uint16_t ratebitmap; 150 uint32_t wol_criteria;
151 uint8_t wol_gpio;
152 uint8_t wol_gap;
242 153
154 /* Transmitting */
155 int tx_pending_len; /* -1 while building packet */
156 u8 tx_pending_buf[LBS_UPLD_SIZE];
157 /* protected by hard_start_xmit serialization */
243 u8 txretrycount; 158 u8 txretrycount;
244
245 /** Tx-related variables (for single packet tx) */
246 struct sk_buff *currenttxskb; 159 struct sk_buff *currenttxskb;
247 160
248 /** NIC Operation characteristics */ 161 /* Locks */
162 struct mutex lock;
163 spinlock_t driver_lock;
164
165 /* NIC/link operation characteristics */
249 u16 mac_control; 166 u16 mac_control;
250 u32 connect_status; 167 u8 radio_on;
251 u32 mesh_connect_status; 168 u8 channel;
252 u16 regioncode;
253 s16 txpower_cur; 169 s16 txpower_cur;
254 s16 txpower_min; 170 s16 txpower_min;
255 s16 txpower_max; 171 s16 txpower_max;
256 172
257 /** POWER MANAGEMENT AND PnP SUPPORT */ 173 /** Scanning */
258 u8 surpriseremoved; 174 struct delayed_work scan_work;
259 175 int scan_channel;
260 u16 psmode; /* Wlan802_11PowermodeCAM=disable 176 /* remember which channel was scanned last, != 0 if currently scanning */
261 Wlan802_11PowermodeMAX_PSP=enable */ 177 u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
262 u32 psstate; 178 u8 scan_ssid_len;
263 u8 needtowakeup;
264 179
180 /* Associating */
181 struct delayed_work assoc_work;
182 struct current_bss_params curbssparams;
183 u8 mode;
184 struct list_head network_list;
185 struct list_head network_free_list;
186 struct bss_descriptor *networks;
265 struct assoc_request * pending_assoc_req; 187 struct assoc_request * pending_assoc_req;
266 struct assoc_request * in_progress_assoc_req; 188 struct assoc_request * in_progress_assoc_req;
189 u16 capability;
190 uint16_t enablehwauto;
191 uint16_t ratebitmap;
267 192
268 /** Encryption parameter */ 193 /* ADHOC */
269 struct lbs_802_11_security secinfo; 194 u16 beacon_period;
270 195 u8 beacon_enable;
271 /** WEP keys */ 196 u8 adhoccreate;
272 struct enc_key wep_keys[4];
273 u16 wep_tx_keyidx;
274
275 /** WPA keys */
276 struct enc_key wpa_mcast_key;
277 struct enc_key wpa_unicast_key;
278
279/*
280 * In theory, the IE is limited to the IE length, 255,
281 * but in practice 64 bytes are enough.
282 */
283#define MAX_WPA_IE_LEN 64
284 197
285 /** WPA Information Elements*/ 198 /* WEXT */
286 u8 wpa_ie[MAX_WPA_IE_LEN]; 199 char name[DEV_NAME_LEN];
287 u8 wpa_ie_len; 200 u8 nodename[16];
201 struct iw_statistics wstats;
202 u8 cur_rate;
203#define MAX_REGION_CHANNEL_NUM 2
204 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
288 205
289 /** Requested Signal Strength*/ 206 /** Requested Signal Strength*/
290 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; 207 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
@@ -294,116 +211,8 @@ struct lbs_private {
294 u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; 211 u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
295 u16 nextSNRNF; 212 u16 nextSNRNF;
296 u16 numSNRNF; 213 u16 numSNRNF;
297
298 u8 radio_on;
299
300 /** data rate stuff */
301 u8 cur_rate;
302
303 /** RF calibration data */
304
305#define MAX_REGION_CHANNEL_NUM 2
306 /** region channel data */
307 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
308
309 struct region_channel universal_channel[MAX_REGION_CHANNEL_NUM];
310
311 /** 11D and Domain Regulatory Data */
312 struct lbs_802_11d_domain_reg domainreg;
313 struct parsed_region_chan_11d parsed_region_chan;
314
315 /** FSM variable for 11d support */
316 u32 enable11d;
317
318 /** MISCELLANEOUS */
319 struct lbs_offset_value offsetvalue;
320
321 u32 monitormode;
322 u8 fw_ready;
323}; 214};
324 215
325extern struct cmd_confirm_sleep confirm_sleep; 216extern struct cmd_confirm_sleep confirm_sleep;
326 217
327/**
328 * @brief Structure used to store information for each beacon/probe response
329 */
330struct bss_descriptor {
331 u8 bssid[ETH_ALEN];
332
333 u8 ssid[IW_ESSID_MAX_SIZE + 1];
334 u8 ssid_len;
335
336 u16 capability;
337 u32 rssi;
338 u32 channel;
339 u16 beaconperiod;
340 __le16 atimwindow;
341
342 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
343 u8 mode;
344
345 /* zero-terminated array of supported data rates */
346 u8 rates[MAX_RATES + 1];
347
348 unsigned long last_scanned;
349
350 union ieee_phy_param_set phy;
351 union ieee_ss_param_set ss;
352
353 struct ieee_ie_country_info_full_set countryinfo;
354
355 u8 wpa_ie[MAX_WPA_IE_LEN];
356 size_t wpa_ie_len;
357 u8 rsn_ie[MAX_WPA_IE_LEN];
358 size_t rsn_ie_len;
359
360 u8 mesh;
361
362 struct list_head list;
363};
364
365/** Association request
366 *
367 * Encapsulates all the options that describe a specific assocation request
368 * or configuration of the wireless card's radio, mode, and security settings.
369 */
370struct assoc_request {
371#define ASSOC_FLAG_SSID 1
372#define ASSOC_FLAG_CHANNEL 2
373#define ASSOC_FLAG_BAND 3
374#define ASSOC_FLAG_MODE 4
375#define ASSOC_FLAG_BSSID 5
376#define ASSOC_FLAG_WEP_KEYS 6
377#define ASSOC_FLAG_WEP_TX_KEYIDX 7
378#define ASSOC_FLAG_WPA_MCAST_KEY 8
379#define ASSOC_FLAG_WPA_UCAST_KEY 9
380#define ASSOC_FLAG_SECINFO 10
381#define ASSOC_FLAG_WPA_IE 11
382 unsigned long flags;
383
384 u8 ssid[IW_ESSID_MAX_SIZE + 1];
385 u8 ssid_len;
386 u8 channel;
387 u8 band;
388 u8 mode;
389 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
390
391 /** WEP keys */
392 struct enc_key wep_keys[4];
393 u16 wep_tx_keyidx;
394
395 /** WPA keys */
396 struct enc_key wpa_mcast_key;
397 struct enc_key wpa_unicast_key;
398
399 struct lbs_802_11_security secinfo;
400
401 /** WPA Information Elements*/
402 u8 wpa_ie[MAX_WPA_IE_LEN];
403 u8 wpa_ie_len;
404
405 /* BSS to associate with for infrastructure of Ad-Hoc join */
406 struct bss_descriptor bss;
407};
408
409#endif 218#endif
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index fe8f0cb737bc..3809c0b49464 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -1,201 +1,190 @@
1/** 1/**
2 * This file contains definitions of WLAN commands. 2 * This file function prototypes, data structure
3 * and definitions for all the host/station commands
3 */ 4 */
4 5
5#ifndef _LBS_HOST_H_ 6#ifndef _LBS_HOST_H_
6#define _LBS_HOST_H_ 7#define _LBS_HOST_H_
7 8
8/** PUBLIC DEFINITIONS */ 9#include "types.h"
9#define DEFAULT_AD_HOC_CHANNEL 6 10#include "defs.h"
10#define DEFAULT_AD_HOC_CHANNEL_A 36
11 11
12#define CMD_OPTION_WAITFORRSP 0x0002 12#define DEFAULT_AD_HOC_CHANNEL 6
13
14#define CMD_OPTION_WAITFORRSP 0x0002
13 15
14/** Host command IDs */ 16/** Host command IDs */
15 17
16/* Return command are almost always the same as the host command, but with 18/* Return command are almost always the same as the host command, but with
17 * bit 15 set high. There are a few exceptions, though... 19 * bit 15 set high. There are a few exceptions, though...
18 */ 20 */
19#define CMD_RET(cmd) (0x8000 | cmd) 21#define CMD_RET(cmd) (0x8000 | cmd)
20 22
21/* Return command convention exceptions: */ 23/* Return command convention exceptions: */
22#define CMD_RET_802_11_ASSOCIATE 0x8012 24#define CMD_RET_802_11_ASSOCIATE 0x8012
23 25
24/* Command codes */ 26/* Command codes */
25#define CMD_GET_HW_SPEC 0x0003 27#define CMD_GET_HW_SPEC 0x0003
26#define CMD_EEPROM_UPDATE 0x0004 28#define CMD_EEPROM_UPDATE 0x0004
27#define CMD_802_11_RESET 0x0005 29#define CMD_802_11_RESET 0x0005
28#define CMD_802_11_SCAN 0x0006 30#define CMD_802_11_SCAN 0x0006
29#define CMD_802_11_GET_LOG 0x000b 31#define CMD_802_11_GET_LOG 0x000b
30#define CMD_MAC_MULTICAST_ADR 0x0010 32#define CMD_MAC_MULTICAST_ADR 0x0010
31#define CMD_802_11_AUTHENTICATE 0x0011 33#define CMD_802_11_AUTHENTICATE 0x0011
32#define CMD_802_11_EEPROM_ACCESS 0x0059 34#define CMD_802_11_EEPROM_ACCESS 0x0059
33#define CMD_802_11_ASSOCIATE 0x0050 35#define CMD_802_11_ASSOCIATE 0x0050
34#define CMD_802_11_SET_WEP 0x0013 36#define CMD_802_11_SET_WEP 0x0013
35#define CMD_802_11_GET_STAT 0x0014 37#define CMD_802_11_GET_STAT 0x0014
36#define CMD_802_3_GET_STAT 0x0015 38#define CMD_802_3_GET_STAT 0x0015
37#define CMD_802_11_SNMP_MIB 0x0016 39#define CMD_802_11_SNMP_MIB 0x0016
38#define CMD_MAC_REG_MAP 0x0017 40#define CMD_MAC_REG_MAP 0x0017
39#define CMD_BBP_REG_MAP 0x0018 41#define CMD_BBP_REG_MAP 0x0018
40#define CMD_MAC_REG_ACCESS 0x0019 42#define CMD_MAC_REG_ACCESS 0x0019
41#define CMD_BBP_REG_ACCESS 0x001a 43#define CMD_BBP_REG_ACCESS 0x001a
42#define CMD_RF_REG_ACCESS 0x001b 44#define CMD_RF_REG_ACCESS 0x001b
43#define CMD_802_11_RADIO_CONTROL 0x001c 45#define CMD_802_11_RADIO_CONTROL 0x001c
44#define CMD_802_11_RF_CHANNEL 0x001d 46#define CMD_802_11_RF_CHANNEL 0x001d
45#define CMD_802_11_RF_TX_POWER 0x001e 47#define CMD_802_11_RF_TX_POWER 0x001e
46#define CMD_802_11_RSSI 0x001f 48#define CMD_802_11_RSSI 0x001f
47#define CMD_802_11_RF_ANTENNA 0x0020 49#define CMD_802_11_RF_ANTENNA 0x0020
48#define CMD_802_11_PS_MODE 0x0021 50#define CMD_802_11_PS_MODE 0x0021
49#define CMD_802_11_DATA_RATE 0x0022 51#define CMD_802_11_DATA_RATE 0x0022
50#define CMD_RF_REG_MAP 0x0023 52#define CMD_RF_REG_MAP 0x0023
51#define CMD_802_11_DEAUTHENTICATE 0x0024 53#define CMD_802_11_DEAUTHENTICATE 0x0024
52#define CMD_802_11_REASSOCIATE 0x0025 54#define CMD_802_11_REASSOCIATE 0x0025
53#define CMD_MAC_CONTROL 0x0028 55#define CMD_MAC_CONTROL 0x0028
54#define CMD_802_11_AD_HOC_START 0x002b 56#define CMD_802_11_AD_HOC_START 0x002b
55#define CMD_802_11_AD_HOC_JOIN 0x002c 57#define CMD_802_11_AD_HOC_JOIN 0x002c
56#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e 58#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e
57#define CMD_802_11_ENABLE_RSN 0x002f 59#define CMD_802_11_ENABLE_RSN 0x002f
58#define CMD_802_11_SET_AFC 0x003c 60#define CMD_802_11_SET_AFC 0x003c
59#define CMD_802_11_GET_AFC 0x003d 61#define CMD_802_11_GET_AFC 0x003d
60#define CMD_802_11_AD_HOC_STOP 0x0040 62#define CMD_802_11_DEEP_SLEEP 0x003e
61#define CMD_802_11_HOST_SLEEP_CFG 0x0043 63#define CMD_802_11_AD_HOC_STOP 0x0040
62#define CMD_802_11_WAKEUP_CONFIRM 0x0044 64#define CMD_802_11_HOST_SLEEP_CFG 0x0043
63#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045 65#define CMD_802_11_WAKEUP_CONFIRM 0x0044
64#define CMD_802_11_BEACON_STOP 0x0049 66#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045
65#define CMD_802_11_MAC_ADDRESS 0x004d 67#define CMD_802_11_BEACON_STOP 0x0049
66#define CMD_802_11_LED_GPIO_CTRL 0x004e 68#define CMD_802_11_MAC_ADDRESS 0x004d
67#define CMD_802_11_EEPROM_ACCESS 0x0059 69#define CMD_802_11_LED_GPIO_CTRL 0x004e
68#define CMD_802_11_BAND_CONFIG 0x0058 70#define CMD_802_11_EEPROM_ACCESS 0x0059
69#define CMD_GSPI_BUS_CONFIG 0x005a 71#define CMD_802_11_BAND_CONFIG 0x0058
70#define CMD_802_11D_DOMAIN_INFO 0x005b 72#define CMD_GSPI_BUS_CONFIG 0x005a
71#define CMD_802_11_KEY_MATERIAL 0x005e 73#define CMD_802_11D_DOMAIN_INFO 0x005b
72#define CMD_802_11_SLEEP_PARAMS 0x0066 74#define CMD_802_11_KEY_MATERIAL 0x005e
73#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 75#define CMD_802_11_SLEEP_PARAMS 0x0066
74#define CMD_802_11_SLEEP_PERIOD 0x0068 76#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067
75#define CMD_802_11_TPC_CFG 0x0072 77#define CMD_802_11_SLEEP_PERIOD 0x0068
76#define CMD_802_11_PA_CFG 0x0073 78#define CMD_802_11_TPC_CFG 0x0072
77#define CMD_802_11_FW_WAKE_METHOD 0x0074 79#define CMD_802_11_PA_CFG 0x0073
78#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 80#define CMD_802_11_FW_WAKE_METHOD 0x0074
79#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 81#define CMD_802_11_SUBSCRIBE_EVENT 0x0075
80#define CMD_802_11_TX_RATE_QUERY 0x007f 82#define CMD_802_11_RATE_ADAPT_RATESET 0x0076
81#define CMD_GET_TSF 0x0080 83#define CMD_802_11_TX_RATE_QUERY 0x007f
82#define CMD_BT_ACCESS 0x0087 84#define CMD_GET_TSF 0x0080
83#define CMD_FWT_ACCESS 0x0095 85#define CMD_BT_ACCESS 0x0087
84#define CMD_802_11_MONITOR_MODE 0x0098 86#define CMD_FWT_ACCESS 0x0095
85#define CMD_MESH_ACCESS 0x009b 87#define CMD_802_11_MONITOR_MODE 0x0098
86#define CMD_MESH_CONFIG_OLD 0x00a3 88#define CMD_MESH_ACCESS 0x009b
87#define CMD_MESH_CONFIG 0x00ac 89#define CMD_MESH_CONFIG_OLD 0x00a3
88#define CMD_SET_BOOT2_VER 0x00a5 90#define CMD_MESH_CONFIG 0x00ac
89#define CMD_FUNC_INIT 0x00a9 91#define CMD_SET_BOOT2_VER 0x00a5
90#define CMD_FUNC_SHUTDOWN 0x00aa 92#define CMD_FUNC_INIT 0x00a9
91#define CMD_802_11_BEACON_CTRL 0x00b0 93#define CMD_FUNC_SHUTDOWN 0x00aa
94#define CMD_802_11_BEACON_CTRL 0x00b0
92 95
93/* For the IEEE Power Save */ 96/* For the IEEE Power Save */
94#define CMD_SUBCMD_ENTER_PS 0x0030 97#define CMD_SUBCMD_ENTER_PS 0x0030
95#define CMD_SUBCMD_EXIT_PS 0x0031 98#define CMD_SUBCMD_EXIT_PS 0x0031
96#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 99#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034
97#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 100#define CMD_SUBCMD_FULL_POWERDOWN 0x0035
98#define CMD_SUBCMD_FULL_POWERUP 0x0036 101#define CMD_SUBCMD_FULL_POWERUP 0x0036
99 102
100#define CMD_ENABLE_RSN 0x0001 103#define CMD_ENABLE_RSN 0x0001
101#define CMD_DISABLE_RSN 0x0000 104#define CMD_DISABLE_RSN 0x0000
102 105
103#define CMD_ACT_GET 0x0000 106#define CMD_ACT_GET 0x0000
104#define CMD_ACT_SET 0x0001 107#define CMD_ACT_SET 0x0001
105#define CMD_ACT_GET_AES 0x0002
106#define CMD_ACT_SET_AES 0x0003
107#define CMD_ACT_REMOVE_AES 0x0004
108 108
109/* Define action or option for CMD_802_11_SET_WEP */ 109/* Define action or option for CMD_802_11_SET_WEP */
110#define CMD_ACT_ADD 0x0002 110#define CMD_ACT_ADD 0x0002
111#define CMD_ACT_REMOVE 0x0004 111#define CMD_ACT_REMOVE 0x0004
112#define CMD_ACT_USE_DEFAULT 0x0008
113
114#define CMD_TYPE_WEP_40_BIT 0x01
115#define CMD_TYPE_WEP_104_BIT 0x02
116 112
117#define CMD_NUM_OF_WEP_KEYS 4 113#define CMD_TYPE_WEP_40_BIT 0x01
114#define CMD_TYPE_WEP_104_BIT 0x02
118 115
119#define CMD_WEP_KEY_INDEX_MASK 0x3fff 116#define CMD_NUM_OF_WEP_KEYS 4
120 117
121/* Define action or option for CMD_802_11_RESET */ 118#define CMD_WEP_KEY_INDEX_MASK 0x3fff
122#define CMD_ACT_HALT 0x0003
123 119
124/* Define action or option for CMD_802_11_SCAN */ 120/* Define action or option for CMD_802_11_SCAN */
125#define CMD_BSS_TYPE_BSS 0x0001 121#define CMD_BSS_TYPE_BSS 0x0001
126#define CMD_BSS_TYPE_IBSS 0x0002 122#define CMD_BSS_TYPE_IBSS 0x0002
127#define CMD_BSS_TYPE_ANY 0x0003 123#define CMD_BSS_TYPE_ANY 0x0003
128 124
129/* Define action or option for CMD_802_11_SCAN */ 125/* Define action or option for CMD_802_11_SCAN */
130#define CMD_SCAN_TYPE_ACTIVE 0x0000 126#define CMD_SCAN_TYPE_ACTIVE 0x0000
131#define CMD_SCAN_TYPE_PASSIVE 0x0001 127#define CMD_SCAN_TYPE_PASSIVE 0x0001
132 128
133#define CMD_SCAN_RADIO_TYPE_BG 0 129#define CMD_SCAN_RADIO_TYPE_BG 0
134 130
135#define CMD_SCAN_PROBE_DELAY_TIME 0 131#define CMD_SCAN_PROBE_DELAY_TIME 0
136 132
137/* Define action or option for CMD_MAC_CONTROL */ 133/* Define action or option for CMD_MAC_CONTROL */
138#define CMD_ACT_MAC_RX_ON 0x0001 134#define CMD_ACT_MAC_RX_ON 0x0001
139#define CMD_ACT_MAC_TX_ON 0x0002 135#define CMD_ACT_MAC_TX_ON 0x0002
140#define CMD_ACT_MAC_LOOPBACK_ON 0x0004 136#define CMD_ACT_MAC_LOOPBACK_ON 0x0004
141#define CMD_ACT_MAC_WEP_ENABLE 0x0008 137#define CMD_ACT_MAC_WEP_ENABLE 0x0008
142#define CMD_ACT_MAC_INT_ENABLE 0x0010 138#define CMD_ACT_MAC_INT_ENABLE 0x0010
143#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 139#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020
144#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 140#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040
145#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 141#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
146#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 142#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
147#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 143#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400
148 144
149/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */ 145/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
150#define CMD_SUBSCRIBE_RSSI_LOW 0x0001 146#define CMD_SUBSCRIBE_RSSI_LOW 0x0001
151#define CMD_SUBSCRIBE_SNR_LOW 0x0002 147#define CMD_SUBSCRIBE_SNR_LOW 0x0002
152#define CMD_SUBSCRIBE_FAILCOUNT 0x0004 148#define CMD_SUBSCRIBE_FAILCOUNT 0x0004
153#define CMD_SUBSCRIBE_BCNMISS 0x0008 149#define CMD_SUBSCRIBE_BCNMISS 0x0008
154#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010 150#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010
155#define CMD_SUBSCRIBE_SNR_HIGH 0x0020 151#define CMD_SUBSCRIBE_SNR_HIGH 0x0020
156 152
157#define RADIO_PREAMBLE_LONG 0x00 153#define RADIO_PREAMBLE_LONG 0x00
158#define RADIO_PREAMBLE_SHORT 0x02 154#define RADIO_PREAMBLE_SHORT 0x02
159#define RADIO_PREAMBLE_AUTO 0x04 155#define RADIO_PREAMBLE_AUTO 0x04
160 156
161/* Define action or option for CMD_802_11_RF_CHANNEL */ 157/* Define action or option for CMD_802_11_RF_CHANNEL */
162#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 158#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
163#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 159#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
164 160
165/* Define action or option for CMD_802_11_DATA_RATE */ 161/* Define action or option for CMD_802_11_DATA_RATE */
166#define CMD_ACT_SET_TX_AUTO 0x0000 162#define CMD_ACT_SET_TX_AUTO 0x0000
167#define CMD_ACT_SET_TX_FIX_RATE 0x0001 163#define CMD_ACT_SET_TX_FIX_RATE 0x0001
168#define CMD_ACT_GET_TX_RATE 0x0002 164#define CMD_ACT_GET_TX_RATE 0x0002
169
170#define CMD_ACT_SET_RX 0x0001
171#define CMD_ACT_SET_TX 0x0002
172#define CMD_ACT_SET_BOTH 0x0003
173#define CMD_ACT_GET_RX 0x0004
174#define CMD_ACT_GET_TX 0x0008
175#define CMD_ACT_GET_BOTH 0x000c
176 165
177/* Define action or option for CMD_802_11_PS_MODE */ 166/* Define action or option for CMD_802_11_PS_MODE */
178#define CMD_TYPE_CAM 0x0000 167#define CMD_TYPE_CAM 0x0000
179#define CMD_TYPE_MAX_PSP 0x0001 168#define CMD_TYPE_MAX_PSP 0x0001
180#define CMD_TYPE_FAST_PSP 0x0002 169#define CMD_TYPE_FAST_PSP 0x0002
181 170
182/* Options for CMD_802_11_FW_WAKE_METHOD */ 171/* Options for CMD_802_11_FW_WAKE_METHOD */
183#define CMD_WAKE_METHOD_UNCHANGED 0x0000 172#define CMD_WAKE_METHOD_UNCHANGED 0x0000
184#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 173#define CMD_WAKE_METHOD_COMMAND_INT 0x0001
185#define CMD_WAKE_METHOD_GPIO 0x0002 174#define CMD_WAKE_METHOD_GPIO 0x0002
186 175
187/* Object IDs for CMD_802_11_SNMP_MIB */ 176/* Object IDs for CMD_802_11_SNMP_MIB */
188#define SNMP_MIB_OID_BSS_TYPE 0x0000 177#define SNMP_MIB_OID_BSS_TYPE 0x0000
189#define SNMP_MIB_OID_OP_RATE_SET 0x0001 178#define SNMP_MIB_OID_OP_RATE_SET 0x0001
190#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */ 179#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */
191#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */ 180#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */
192#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */ 181#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */
193#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005 182#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005
194#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006 183#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006
195#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007 184#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007
196#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008 185#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008
197#define SNMP_MIB_OID_11D_ENABLE 0x0009 186#define SNMP_MIB_OID_11D_ENABLE 0x0009
198#define SNMP_MIB_OID_11H_ENABLE 0x000A 187#define SNMP_MIB_OID_11H_ENABLE 0x000A
199 188
200/* Define action or option for CMD_BT_ACCESS */ 189/* Define action or option for CMD_BT_ACCESS */
201enum cmd_bt_access_opts { 190enum cmd_bt_access_opts {
@@ -302,4 +291,672 @@ enum cmd_mesh_config_types {
302#define MACREG_INT_CODE_MESH_AUTO_STARTED 35 291#define MACREG_INT_CODE_MESH_AUTO_STARTED 35
303#define MACREG_INT_CODE_FIRMWARE_READY 48 292#define MACREG_INT_CODE_FIRMWARE_READY 48
304 293
294
295/* 802.11-related definitions */
296
297/* TxPD descriptor */
298struct txpd {
299 /* union to cope up with later FW revisions */
300 union {
301 /* Current Tx packet status */
302 __le32 tx_status;
303 struct {
304 /* BSS type: client, AP, etc. */
305 u8 bss_type;
306 /* BSS number */
307 u8 bss_num;
308 /* Reserved */
309 __le16 reserved;
310 } bss;
311 } u;
312 /* Tx control */
313 __le32 tx_control;
314 __le32 tx_packet_location;
315 /* Tx packet length */
316 __le16 tx_packet_length;
317 /* First 2 byte of destination MAC address */
318 u8 tx_dest_addr_high[2];
319 /* Last 4 byte of destination MAC address */
320 u8 tx_dest_addr_low[4];
321 /* Pkt Priority */
322 u8 priority;
323 /* Pkt Trasnit Power control */
324 u8 powermgmt;
325 /* Amount of time the packet has been queued (units = 2ms) */
326 u8 pktdelay_2ms;
327 /* reserved */
328 u8 reserved1;
329} __attribute__ ((packed));
330
331/* RxPD Descriptor */
332struct rxpd {
333 /* union to cope up with later FW revisions */
334 union {
335 /* Current Rx packet status */
336 __le16 status;
337 struct {
338 /* BSS type: client, AP, etc. */
339 u8 bss_type;
340 /* BSS number */
341 u8 bss_num;
342 } __attribute__ ((packed)) bss;
343 } __attribute__ ((packed)) u;
344
345 /* SNR */
346 u8 snr;
347
348 /* Tx control */
349 u8 rx_control;
350
351 /* Pkt length */
352 __le16 pkt_len;
353
354 /* Noise Floor */
355 u8 nf;
356
357 /* Rx Packet Rate */
358 u8 rx_rate;
359
360 /* Pkt addr */
361 __le32 pkt_ptr;
362
363 /* Next Rx RxPD addr */
364 __le32 next_rxpd_ptr;
365
366 /* Pkt Priority */
367 u8 priority;
368 u8 reserved[3];
369} __attribute__ ((packed));
370
371struct cmd_header {
372 __le16 command;
373 __le16 size;
374 __le16 seqnum;
375 __le16 result;
376} __attribute__ ((packed));
377
378/* Generic structure to hold all key types. */
379struct enc_key {
380 u16 len;
381 u16 flags; /* KEY_INFO_* from defs.h */
382 u16 type; /* KEY_TYPE_* from defs.h */
383 u8 key[32];
384};
385
386/* lbs_offset_value */
387struct lbs_offset_value {
388 u32 offset;
389 u32 value;
390} __attribute__ ((packed));
391
392/*
393 * Define data structure for CMD_GET_HW_SPEC
394 * This structure defines the response for the GET_HW_SPEC command
395 */
396struct cmd_ds_get_hw_spec {
397 struct cmd_header hdr;
398
399 /* HW Interface version number */
400 __le16 hwifversion;
401 /* HW version number */
402 __le16 version;
403 /* Max number of TxPD FW can handle */
404 __le16 nr_txpd;
405 /* Max no of Multicast address */
406 __le16 nr_mcast_adr;
407 /* MAC address */
408 u8 permanentaddr[6];
409
410 /* region Code */
411 __le16 regioncode;
412
413 /* Number of antenna used */
414 __le16 nr_antenna;
415
416 /* FW release number, example 0x01030304 = 2.3.4p1 */
417 __le32 fwrelease;
418
419 /* Base Address of TxPD queue */
420 __le32 wcb_base;
421 /* Read Pointer of RxPd queue */
422 __le32 rxpd_rdptr;
423
424 /* Write Pointer of RxPd queue */
425 __le32 rxpd_wrptr;
426
427 /*FW/HW capability */
428 __le32 fwcapinfo;
429} __attribute__ ((packed));
430
431struct cmd_ds_802_11_subscribe_event {
432 struct cmd_header hdr;
433
434 __le16 action;
435 __le16 events;
436
437 /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
438 * number of TLVs. From the v5.1 manual, those TLVs would add up to
439 * 40 bytes. However, future firmware might add additional TLVs, so I
440 * bump this up a bit.
441 */
442 uint8_t tlv[128];
443} __attribute__ ((packed));
444
445/*
446 * This scan handle Country Information IE(802.11d compliant)
447 * Define data structure for CMD_802_11_SCAN
448 */
449struct cmd_ds_802_11_scan {
450 struct cmd_header hdr;
451
452 uint8_t bsstype;
453 uint8_t bssid[ETH_ALEN];
454 uint8_t tlvbuffer[0];
455} __attribute__ ((packed));
456
457struct cmd_ds_802_11_scan_rsp {
458 struct cmd_header hdr;
459
460 __le16 bssdescriptsize;
461 uint8_t nr_sets;
462 uint8_t bssdesc_and_tlvbuffer[0];
463} __attribute__ ((packed));
464
465struct cmd_ds_802_11_get_log {
466 struct cmd_header hdr;
467
468 __le32 mcasttxframe;
469 __le32 failed;
470 __le32 retry;
471 __le32 multiretry;
472 __le32 framedup;
473 __le32 rtssuccess;
474 __le32 rtsfailure;
475 __le32 ackfailure;
476 __le32 rxfrag;
477 __le32 mcastrxframe;
478 __le32 fcserror;
479 __le32 txframe;
480 __le32 wepundecryptable;
481} __attribute__ ((packed));
482
483struct cmd_ds_mac_control {
484 struct cmd_header hdr;
485 __le16 action;
486 u16 reserved;
487} __attribute__ ((packed));
488
489struct cmd_ds_mac_multicast_adr {
490 struct cmd_header hdr;
491 __le16 action;
492 __le16 nr_of_adrs;
493 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
494} __attribute__ ((packed));
495
496struct cmd_ds_802_11_authenticate {
497 struct cmd_header hdr;
498
499 u8 bssid[ETH_ALEN];
500 u8 authtype;
501 u8 reserved[10];
502} __attribute__ ((packed));
503
504struct cmd_ds_802_11_deauthenticate {
505 struct cmd_header hdr;
506
507 u8 macaddr[ETH_ALEN];
508 __le16 reasoncode;
509} __attribute__ ((packed));
510
511struct cmd_ds_802_11_associate {
512 struct cmd_header hdr;
513
514 u8 bssid[6];
515 __le16 capability;
516 __le16 listeninterval;
517 __le16 bcnperiod;
518 u8 dtimperiod;
519 u8 iebuf[512]; /* Enough for required and most optional IEs */
520} __attribute__ ((packed));
521
522struct cmd_ds_802_11_associate_response {
523 struct cmd_header hdr;
524
525 __le16 capability;
526 __le16 statuscode;
527 __le16 aid;
528 u8 iebuf[512];
529} __attribute__ ((packed));
530
531struct cmd_ds_802_11_set_wep {
532 struct cmd_header hdr;
533
534 /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
535 __le16 action;
536
537 /* key Index selected for Tx */
538 __le16 keyindex;
539
540 /* 40, 128bit or TXWEP */
541 uint8_t keytype[4];
542 uint8_t keymaterial[4][16];
543} __attribute__ ((packed));
544
545struct cmd_ds_802_11_snmp_mib {
546 struct cmd_header hdr;
547
548 __le16 action;
549 __le16 oid;
550 __le16 bufsize;
551 u8 value[128];
552} __attribute__ ((packed));
553
554struct cmd_ds_mac_reg_access {
555 __le16 action;
556 __le16 offset;
557 __le32 value;
558} __attribute__ ((packed));
559
560struct cmd_ds_bbp_reg_access {
561 __le16 action;
562 __le16 offset;
563 u8 value;
564 u8 reserved[3];
565} __attribute__ ((packed));
566
567struct cmd_ds_rf_reg_access {
568 __le16 action;
569 __le16 offset;
570 u8 value;
571 u8 reserved[3];
572} __attribute__ ((packed));
573
574struct cmd_ds_802_11_radio_control {
575 struct cmd_header hdr;
576
577 __le16 action;
578 __le16 control;
579} __attribute__ ((packed));
580
581struct cmd_ds_802_11_beacon_control {
582 __le16 action;
583 __le16 beacon_enable;
584 __le16 beacon_period;
585} __attribute__ ((packed));
586
587struct cmd_ds_802_11_sleep_params {
588 struct cmd_header hdr;
589
590 /* ACT_GET/ACT_SET */
591 __le16 action;
592
593 /* Sleep clock error in ppm */
594 __le16 error;
595
596 /* Wakeup offset in usec */
597 __le16 offset;
598
599 /* Clock stabilization time in usec */
600 __le16 stabletime;
601
602 /* control periodic calibration */
603 uint8_t calcontrol;
604
605 /* control the use of external sleep clock */
606 uint8_t externalsleepclk;
607
608 /* reserved field, should be set to zero */
609 __le16 reserved;
610} __attribute__ ((packed));
611
612struct cmd_ds_802_11_rf_channel {
613 struct cmd_header hdr;
614
615 __le16 action;
616 __le16 channel;
617 __le16 rftype; /* unused */
618 __le16 reserved; /* unused */
619 u8 channellist[32]; /* unused */
620} __attribute__ ((packed));
621
622struct cmd_ds_802_11_rssi {
623 /* weighting factor */
624 __le16 N;
625
626 __le16 reserved_0;
627 __le16 reserved_1;
628 __le16 reserved_2;
629} __attribute__ ((packed));
630
631struct cmd_ds_802_11_rssi_rsp {
632 __le16 SNR;
633 __le16 noisefloor;
634 __le16 avgSNR;
635 __le16 avgnoisefloor;
636} __attribute__ ((packed));
637
638struct cmd_ds_802_11_mac_address {
639 struct cmd_header hdr;
640
641 __le16 action;
642 u8 macadd[ETH_ALEN];
643} __attribute__ ((packed));
644
645struct cmd_ds_802_11_rf_tx_power {
646 struct cmd_header hdr;
647
648 __le16 action;
649 __le16 curlevel;
650 s8 maxlevel;
651 s8 minlevel;
652} __attribute__ ((packed));
653
654struct cmd_ds_802_11_monitor_mode {
655 __le16 action;
656 __le16 mode;
657} __attribute__ ((packed));
658
659struct cmd_ds_set_boot2_ver {
660 struct cmd_header hdr;
661
662 __le16 action;
663 __le16 version;
664} __attribute__ ((packed));
665
666struct cmd_ds_802_11_fw_wake_method {
667 struct cmd_header hdr;
668
669 __le16 action;
670 __le16 method;
671} __attribute__ ((packed));
672
673struct cmd_ds_802_11_ps_mode {
674 __le16 action;
675 __le16 nullpktinterval;
676 __le16 multipledtim;
677 __le16 reserved;
678 __le16 locallisteninterval;
679} __attribute__ ((packed));
680
681struct cmd_confirm_sleep {
682 struct cmd_header hdr;
683
684 __le16 action;
685 __le16 nullpktinterval;
686 __le16 multipledtim;
687 __le16 reserved;
688 __le16 locallisteninterval;
689} __attribute__ ((packed));
690
691struct cmd_ds_802_11_data_rate {
692 struct cmd_header hdr;
693
694 __le16 action;
695 __le16 reserved;
696 u8 rates[MAX_RATES];
697} __attribute__ ((packed));
698
699struct cmd_ds_802_11_rate_adapt_rateset {
700 struct cmd_header hdr;
701 __le16 action;
702 __le16 enablehwauto;
703 __le16 bitmap;
704} __attribute__ ((packed));
705
706struct cmd_ds_802_11_ad_hoc_start {
707 struct cmd_header hdr;
708
709 u8 ssid[IEEE80211_MAX_SSID_LEN];
710 u8 bsstype;
711 __le16 beaconperiod;
712 u8 dtimperiod; /* Reserved on v9 and later */
713 struct ieee_ie_ibss_param_set ibss;
714 u8 reserved1[4];
715 struct ieee_ie_ds_param_set ds;
716 u8 reserved2[4];
717 __le16 probedelay; /* Reserved on v9 and later */
718 __le16 capability;
719 u8 rates[MAX_RATES];
720 u8 tlv_memory_size_pad[100];
721} __attribute__ ((packed));
722
723struct cmd_ds_802_11_ad_hoc_result {
724 struct cmd_header hdr;
725
726 u8 pad[3];
727 u8 bssid[ETH_ALEN];
728} __attribute__ ((packed));
729
730struct adhoc_bssdesc {
731 u8 bssid[ETH_ALEN];
732 u8 ssid[IEEE80211_MAX_SSID_LEN];
733 u8 type;
734 __le16 beaconperiod;
735 u8 dtimperiod;
736 __le64 timestamp;
737 __le64 localtime;
738 struct ieee_ie_ds_param_set ds;
739 u8 reserved1[4];
740 struct ieee_ie_ibss_param_set ibss;
741 u8 reserved2[4];
742 __le16 capability;
743 u8 rates[MAX_RATES];
744
745 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
746 * Adhoc join command and will cause a binary layout mismatch with
747 * the firmware
748 */
749} __attribute__ ((packed));
750
751struct cmd_ds_802_11_ad_hoc_join {
752 struct cmd_header hdr;
753
754 struct adhoc_bssdesc bss;
755 __le16 failtimeout; /* Reserved on v9 and later */
756 __le16 probedelay; /* Reserved on v9 and later */
757} __attribute__ ((packed));
758
759struct cmd_ds_802_11_ad_hoc_stop {
760 struct cmd_header hdr;
761} __attribute__ ((packed));
762
763struct cmd_ds_802_11_enable_rsn {
764 struct cmd_header hdr;
765
766 __le16 action;
767 __le16 enable;
768} __attribute__ ((packed));
769
770struct MrvlIEtype_keyParamSet {
771 /* type ID */
772 __le16 type;
773
774 /* length of Payload */
775 __le16 length;
776
777 /* type of key: WEP=0, TKIP=1, AES=2 */
778 __le16 keytypeid;
779
780 /* key control Info specific to a keytypeid */
781 __le16 keyinfo;
782
783 /* length of key */
784 __le16 keylen;
785
786 /* key material of size keylen */
787 u8 key[32];
788} __attribute__ ((packed));
789
790#define MAX_WOL_RULES 16
791
792struct host_wol_rule {
793 uint8_t rule_no;
794 uint8_t rule_ops;
795 __le16 sig_offset;
796 __le16 sig_length;
797 __le16 reserve;
798 __be32 sig_mask;
799 __be32 signature;
800} __attribute__ ((packed));
801
802struct wol_config {
803 uint8_t action;
804 uint8_t pattern;
805 uint8_t no_rules_in_cmd;
806 uint8_t result;
807 struct host_wol_rule rule[MAX_WOL_RULES];
808} __attribute__ ((packed));
809
810struct cmd_ds_host_sleep {
811 struct cmd_header hdr;
812 __le32 criteria;
813 uint8_t gpio;
814 uint16_t gap;
815 struct wol_config wol_conf;
816} __attribute__ ((packed));
817
818
819
820struct cmd_ds_802_11_key_material {
821 struct cmd_header hdr;
822
823 __le16 action;
824 struct MrvlIEtype_keyParamSet keyParamSet[2];
825} __attribute__ ((packed));
826
827struct cmd_ds_802_11_eeprom_access {
828 struct cmd_header hdr;
829 __le16 action;
830 __le16 offset;
831 __le16 len;
832 /* firmware says it returns a maximum of 20 bytes */
833#define LBS_EEPROM_READ_LEN 20
834 u8 value[LBS_EEPROM_READ_LEN];
835} __attribute__ ((packed));
836
837struct cmd_ds_802_11_tpc_cfg {
838 struct cmd_header hdr;
839
840 __le16 action;
841 uint8_t enable;
842 int8_t P0;
843 int8_t P1;
844 int8_t P2;
845 uint8_t usesnr;
846} __attribute__ ((packed));
847
848
849struct cmd_ds_802_11_pa_cfg {
850 struct cmd_header hdr;
851
852 __le16 action;
853 uint8_t enable;
854 int8_t P0;
855 int8_t P1;
856 int8_t P2;
857} __attribute__ ((packed));
858
859
860struct cmd_ds_802_11_led_ctrl {
861 __le16 action;
862 __le16 numled;
863 u8 data[256];
864} __attribute__ ((packed));
865
866struct cmd_ds_802_11_afc {
867 __le16 afc_auto;
868 union {
869 struct {
870 __le16 threshold;
871 __le16 period;
872 };
873 struct {
874 __le16 timing_offset; /* signed */
875 __le16 carrier_offset; /* signed */
876 };
877 };
878} __attribute__ ((packed));
879
880struct cmd_tx_rate_query {
881 __le16 txrate;
882} __attribute__ ((packed));
883
884struct cmd_ds_get_tsf {
885 __le64 tsfvalue;
886} __attribute__ ((packed));
887
888struct cmd_ds_bt_access {
889 __le16 action;
890 __le32 id;
891 u8 addr1[ETH_ALEN];
892 u8 addr2[ETH_ALEN];
893} __attribute__ ((packed));
894
895struct cmd_ds_fwt_access {
896 __le16 action;
897 __le32 id;
898 u8 valid;
899 u8 da[ETH_ALEN];
900 u8 dir;
901 u8 ra[ETH_ALEN];
902 __le32 ssn;
903 __le32 dsn;
904 __le32 metric;
905 u8 rate;
906 u8 hopcount;
907 u8 ttl;
908 __le32 expiration;
909 u8 sleepmode;
910 __le32 snr;
911 __le32 references;
912 u8 prec[ETH_ALEN];
913} __attribute__ ((packed));
914
915struct cmd_ds_mesh_config {
916 struct cmd_header hdr;
917
918 __le16 action;
919 __le16 channel;
920 __le16 type;
921 __le16 length;
922 u8 data[128]; /* last position reserved */
923} __attribute__ ((packed));
924
925struct cmd_ds_mesh_access {
926 struct cmd_header hdr;
927
928 __le16 action;
929 __le32 data[32]; /* last position reserved */
930} __attribute__ ((packed));
931
932/* Number of stats counters returned by the firmware */
933#define MESH_STATS_NUM 8
934
935struct cmd_ds_command {
936 /* command header */
937 __le16 command;
938 __le16 size;
939 __le16 seqnum;
940 __le16 result;
941
942 /* command Body */
943 union {
944 struct cmd_ds_802_11_ps_mode psmode;
945 struct cmd_ds_802_11_monitor_mode monitor;
946 struct cmd_ds_802_11_rssi rssi;
947 struct cmd_ds_802_11_rssi_rsp rssirsp;
948 struct cmd_ds_mac_reg_access macreg;
949 struct cmd_ds_bbp_reg_access bbpreg;
950 struct cmd_ds_rf_reg_access rfreg;
951
952 struct cmd_ds_802_11_tpc_cfg tpccfg;
953 struct cmd_ds_802_11_afc afc;
954 struct cmd_ds_802_11_led_ctrl ledgpio;
955
956 struct cmd_ds_bt_access bt;
957 struct cmd_ds_fwt_access fwt;
958 struct cmd_ds_802_11_beacon_control bcn_ctrl;
959 } params;
960} __attribute__ ((packed));
961
305#endif 962#endif
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
deleted file mode 100644
index c8a1998d4744..000000000000
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ /dev/null
@@ -1,800 +0,0 @@
1/*
2 * This file contains the function prototypes, data structure
3 * and defines for all the host/station commands
4 */
5#ifndef _LBS_HOSTCMD_H
6#define _LBS_HOSTCMD_H
7
8#include <linux/wireless.h>
9#include "11d.h"
10#include "types.h"
11
12/* 802.11-related definitions */
13
14/* TxPD descriptor */
15struct txpd {
16 /* union to cope up with later FW revisions */
17 union {
18 /* Current Tx packet status */
19 __le32 tx_status;
20 struct {
21 /* BSS type: client, AP, etc. */
22 u8 bss_type;
23 /* BSS number */
24 u8 bss_num;
25 /* Reserved */
26 __le16 reserved;
27 } bss;
28 } u;
29 /* Tx control */
30 __le32 tx_control;
31 __le32 tx_packet_location;
32 /* Tx packet length */
33 __le16 tx_packet_length;
34 /* First 2 byte of destination MAC address */
35 u8 tx_dest_addr_high[2];
36 /* Last 4 byte of destination MAC address */
37 u8 tx_dest_addr_low[4];
38 /* Pkt Priority */
39 u8 priority;
40 /* Pkt Trasnit Power control */
41 u8 powermgmt;
42 /* Amount of time the packet has been queued in the driver (units = 2ms) */
43 u8 pktdelay_2ms;
44 /* reserved */
45 u8 reserved1;
46} __attribute__ ((packed));
47
48/* RxPD Descriptor */
49struct rxpd {
50 /* union to cope up with later FW revisions */
51 union {
52 /* Current Rx packet status */
53 __le16 status;
54 struct {
55 /* BSS type: client, AP, etc. */
56 u8 bss_type;
57 /* BSS number */
58 u8 bss_num;
59 } __attribute__ ((packed)) bss;
60 } __attribute__ ((packed)) u;
61
62 /* SNR */
63 u8 snr;
64
65 /* Tx control */
66 u8 rx_control;
67
68 /* Pkt length */
69 __le16 pkt_len;
70
71 /* Noise Floor */
72 u8 nf;
73
74 /* Rx Packet Rate */
75 u8 rx_rate;
76
77 /* Pkt addr */
78 __le32 pkt_ptr;
79
80 /* Next Rx RxPD addr */
81 __le32 next_rxpd_ptr;
82
83 /* Pkt Priority */
84 u8 priority;
85 u8 reserved[3];
86} __attribute__ ((packed));
87
88struct cmd_header {
89 __le16 command;
90 __le16 size;
91 __le16 seqnum;
92 __le16 result;
93} __attribute__ ((packed));
94
95struct cmd_ctrl_node {
96 struct list_head list;
97 int result;
98 /* command response */
99 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *);
100 unsigned long callback_arg;
101 /* command data */
102 struct cmd_header *cmdbuf;
103 /* wait queue */
104 u16 cmdwaitqwoken;
105 wait_queue_head_t cmdwait_q;
106};
107
108/* Generic structure to hold all key types. */
109struct enc_key {
110 u16 len;
111 u16 flags; /* KEY_INFO_* from defs.h */
112 u16 type; /* KEY_TYPE_* from defs.h */
113 u8 key[32];
114};
115
116/* lbs_offset_value */
117struct lbs_offset_value {
118 u32 offset;
119 u32 value;
120} __attribute__ ((packed));
121
122/* Define general data structure */
123/* cmd_DS_GEN */
124struct cmd_ds_gen {
125 __le16 command;
126 __le16 size;
127 __le16 seqnum;
128 __le16 result;
129 void *cmdresp[0];
130} __attribute__ ((packed));
131
132#define S_DS_GEN sizeof(struct cmd_ds_gen)
133
134
135/*
136 * Define data structure for CMD_GET_HW_SPEC
137 * This structure defines the response for the GET_HW_SPEC command
138 */
139struct cmd_ds_get_hw_spec {
140 struct cmd_header hdr;
141
142 /* HW Interface version number */
143 __le16 hwifversion;
144 /* HW version number */
145 __le16 version;
146 /* Max number of TxPD FW can handle */
147 __le16 nr_txpd;
148 /* Max no of Multicast address */
149 __le16 nr_mcast_adr;
150 /* MAC address */
151 u8 permanentaddr[6];
152
153 /* region Code */
154 __le16 regioncode;
155
156 /* Number of antenna used */
157 __le16 nr_antenna;
158
159 /* FW release number, example 0x01030304 = 2.3.4p1 */
160 __le32 fwrelease;
161
162 /* Base Address of TxPD queue */
163 __le32 wcb_base;
164 /* Read Pointer of RxPd queue */
165 __le32 rxpd_rdptr;
166
167 /* Write Pointer of RxPd queue */
168 __le32 rxpd_wrptr;
169
170 /*FW/HW capability */
171 __le32 fwcapinfo;
172} __attribute__ ((packed));
173
174struct cmd_ds_802_11_subscribe_event {
175 struct cmd_header hdr;
176
177 __le16 action;
178 __le16 events;
179
180 /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
181 * number of TLVs. From the v5.1 manual, those TLVs would add up to
182 * 40 bytes. However, future firmware might add additional TLVs, so I
183 * bump this up a bit.
184 */
185 uint8_t tlv[128];
186} __attribute__ ((packed));
187
188/*
189 * This scan handle Country Information IE(802.11d compliant)
190 * Define data structure for CMD_802_11_SCAN
191 */
192struct cmd_ds_802_11_scan {
193 struct cmd_header hdr;
194
195 uint8_t bsstype;
196 uint8_t bssid[ETH_ALEN];
197 uint8_t tlvbuffer[0];
198#if 0
199 mrvlietypes_ssidparamset_t ssidParamSet;
200 mrvlietypes_chanlistparamset_t ChanListParamSet;
201 mrvlietypes_ratesparamset_t OpRateSet;
202#endif
203} __attribute__ ((packed));
204
205struct cmd_ds_802_11_scan_rsp {
206 struct cmd_header hdr;
207
208 __le16 bssdescriptsize;
209 uint8_t nr_sets;
210 uint8_t bssdesc_and_tlvbuffer[0];
211} __attribute__ ((packed));
212
213struct cmd_ds_802_11_get_log {
214 struct cmd_header hdr;
215
216 __le32 mcasttxframe;
217 __le32 failed;
218 __le32 retry;
219 __le32 multiretry;
220 __le32 framedup;
221 __le32 rtssuccess;
222 __le32 rtsfailure;
223 __le32 ackfailure;
224 __le32 rxfrag;
225 __le32 mcastrxframe;
226 __le32 fcserror;
227 __le32 txframe;
228 __le32 wepundecryptable;
229} __attribute__ ((packed));
230
231struct cmd_ds_mac_control {
232 struct cmd_header hdr;
233 __le16 action;
234 u16 reserved;
235} __attribute__ ((packed));
236
237struct cmd_ds_mac_multicast_adr {
238 struct cmd_header hdr;
239 __le16 action;
240 __le16 nr_of_adrs;
241 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
242} __attribute__ ((packed));
243
244struct cmd_ds_gspi_bus_config {
245 struct cmd_header hdr;
246 __le16 action;
247 __le16 bus_delay_mode;
248 __le16 host_time_delay_to_read_port;
249 __le16 host_time_delay_to_read_register;
250} __attribute__ ((packed));
251
252struct cmd_ds_802_11_authenticate {
253 struct cmd_header hdr;
254
255 u8 bssid[ETH_ALEN];
256 u8 authtype;
257 u8 reserved[10];
258} __attribute__ ((packed));
259
260struct cmd_ds_802_11_deauthenticate {
261 struct cmd_header hdr;
262
263 u8 macaddr[ETH_ALEN];
264 __le16 reasoncode;
265} __attribute__ ((packed));
266
267struct cmd_ds_802_11_associate {
268 struct cmd_header hdr;
269
270 u8 bssid[6];
271 __le16 capability;
272 __le16 listeninterval;
273 __le16 bcnperiod;
274 u8 dtimperiod;
275 u8 iebuf[512]; /* Enough for required and most optional IEs */
276} __attribute__ ((packed));
277
278struct cmd_ds_802_11_associate_response {
279 struct cmd_header hdr;
280
281 __le16 capability;
282 __le16 statuscode;
283 __le16 aid;
284 u8 iebuf[512];
285} __attribute__ ((packed));
286
287struct cmd_ds_802_11_set_wep {
288 struct cmd_header hdr;
289
290 /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
291 __le16 action;
292
293 /* key Index selected for Tx */
294 __le16 keyindex;
295
296 /* 40, 128bit or TXWEP */
297 uint8_t keytype[4];
298 uint8_t keymaterial[4][16];
299} __attribute__ ((packed));
300
301struct cmd_ds_802_3_get_stat {
302 __le32 xmitok;
303 __le32 rcvok;
304 __le32 xmiterror;
305 __le32 rcverror;
306 __le32 rcvnobuffer;
307 __le32 rcvcrcerror;
308} __attribute__ ((packed));
309
310struct cmd_ds_802_11_get_stat {
311 __le32 txfragmentcnt;
312 __le32 mcasttxframecnt;
313 __le32 failedcnt;
314 __le32 retrycnt;
315 __le32 Multipleretrycnt;
316 __le32 rtssuccesscnt;
317 __le32 rtsfailurecnt;
318 __le32 ackfailurecnt;
319 __le32 frameduplicatecnt;
320 __le32 rxfragmentcnt;
321 __le32 mcastrxframecnt;
322 __le32 fcserrorcnt;
323 __le32 bcasttxframecnt;
324 __le32 bcastrxframecnt;
325 __le32 txbeacon;
326 __le32 rxbeacon;
327 __le32 wepundecryptable;
328} __attribute__ ((packed));
329
330struct cmd_ds_802_11_snmp_mib {
331 struct cmd_header hdr;
332
333 __le16 action;
334 __le16 oid;
335 __le16 bufsize;
336 u8 value[128];
337} __attribute__ ((packed));
338
339struct cmd_ds_mac_reg_map {
340 __le16 buffersize;
341 u8 regmap[128];
342 __le16 reserved;
343} __attribute__ ((packed));
344
345struct cmd_ds_bbp_reg_map {
346 __le16 buffersize;
347 u8 regmap[128];
348 __le16 reserved;
349} __attribute__ ((packed));
350
351struct cmd_ds_rf_reg_map {
352 __le16 buffersize;
353 u8 regmap[64];
354 __le16 reserved;
355} __attribute__ ((packed));
356
357struct cmd_ds_mac_reg_access {
358 __le16 action;
359 __le16 offset;
360 __le32 value;
361} __attribute__ ((packed));
362
363struct cmd_ds_bbp_reg_access {
364 __le16 action;
365 __le16 offset;
366 u8 value;
367 u8 reserved[3];
368} __attribute__ ((packed));
369
370struct cmd_ds_rf_reg_access {
371 __le16 action;
372 __le16 offset;
373 u8 value;
374 u8 reserved[3];
375} __attribute__ ((packed));
376
377struct cmd_ds_802_11_radio_control {
378 struct cmd_header hdr;
379
380 __le16 action;
381 __le16 control;
382} __attribute__ ((packed));
383
384struct cmd_ds_802_11_beacon_control {
385 __le16 action;
386 __le16 beacon_enable;
387 __le16 beacon_period;
388} __attribute__ ((packed));
389
390struct cmd_ds_802_11_sleep_params {
391 struct cmd_header hdr;
392
393 /* ACT_GET/ACT_SET */
394 __le16 action;
395
396 /* Sleep clock error in ppm */
397 __le16 error;
398
399 /* Wakeup offset in usec */
400 __le16 offset;
401
402 /* Clock stabilization time in usec */
403 __le16 stabletime;
404
405 /* control periodic calibration */
406 uint8_t calcontrol;
407
408 /* control the use of external sleep clock */
409 uint8_t externalsleepclk;
410
411 /* reserved field, should be set to zero */
412 __le16 reserved;
413} __attribute__ ((packed));
414
415struct cmd_ds_802_11_inactivity_timeout {
416 struct cmd_header hdr;
417
418 /* ACT_GET/ACT_SET */
419 __le16 action;
420
421 /* Inactivity timeout in msec */
422 __le16 timeout;
423} __attribute__ ((packed));
424
425struct cmd_ds_802_11_rf_channel {
426 struct cmd_header hdr;
427
428 __le16 action;
429 __le16 channel;
430 __le16 rftype; /* unused */
431 __le16 reserved; /* unused */
432 u8 channellist[32]; /* unused */
433} __attribute__ ((packed));
434
435struct cmd_ds_802_11_rssi {
436 /* weighting factor */
437 __le16 N;
438
439 __le16 reserved_0;
440 __le16 reserved_1;
441 __le16 reserved_2;
442} __attribute__ ((packed));
443
444struct cmd_ds_802_11_rssi_rsp {
445 __le16 SNR;
446 __le16 noisefloor;
447 __le16 avgSNR;
448 __le16 avgnoisefloor;
449} __attribute__ ((packed));
450
451struct cmd_ds_802_11_mac_address {
452 struct cmd_header hdr;
453
454 __le16 action;
455 u8 macadd[ETH_ALEN];
456} __attribute__ ((packed));
457
458struct cmd_ds_802_11_rf_tx_power {
459 struct cmd_header hdr;
460
461 __le16 action;
462 __le16 curlevel;
463 s8 maxlevel;
464 s8 minlevel;
465} __attribute__ ((packed));
466
467struct cmd_ds_802_11_rf_antenna {
468 __le16 action;
469
470 /* Number of antennas or 0xffff(diversity) */
471 __le16 antennamode;
472
473} __attribute__ ((packed));
474
475struct cmd_ds_802_11_monitor_mode {
476 __le16 action;
477 __le16 mode;
478} __attribute__ ((packed));
479
480struct cmd_ds_set_boot2_ver {
481 struct cmd_header hdr;
482
483 __le16 action;
484 __le16 version;
485} __attribute__ ((packed));
486
487struct cmd_ds_802_11_fw_wake_method {
488 struct cmd_header hdr;
489
490 __le16 action;
491 __le16 method;
492} __attribute__ ((packed));
493
494struct cmd_ds_802_11_sleep_period {
495 struct cmd_header hdr;
496
497 __le16 action;
498 __le16 period;
499} __attribute__ ((packed));
500
501struct cmd_ds_802_11_ps_mode {
502 __le16 action;
503 __le16 nullpktinterval;
504 __le16 multipledtim;
505 __le16 reserved;
506 __le16 locallisteninterval;
507} __attribute__ ((packed));
508
509struct cmd_confirm_sleep {
510 struct cmd_header hdr;
511
512 __le16 action;
513 __le16 nullpktinterval;
514 __le16 multipledtim;
515 __le16 reserved;
516 __le16 locallisteninterval;
517} __attribute__ ((packed));
518
519struct cmd_ds_802_11_data_rate {
520 struct cmd_header hdr;
521
522 __le16 action;
523 __le16 reserved;
524 u8 rates[MAX_RATES];
525} __attribute__ ((packed));
526
527struct cmd_ds_802_11_rate_adapt_rateset {
528 struct cmd_header hdr;
529 __le16 action;
530 __le16 enablehwauto;
531 __le16 bitmap;
532} __attribute__ ((packed));
533
534struct cmd_ds_802_11_ad_hoc_start {
535 struct cmd_header hdr;
536
537 u8 ssid[IW_ESSID_MAX_SIZE];
538 u8 bsstype;
539 __le16 beaconperiod;
540 u8 dtimperiod; /* Reserved on v9 and later */
541 struct ieee_ie_ibss_param_set ibss;
542 u8 reserved1[4];
543 struct ieee_ie_ds_param_set ds;
544 u8 reserved2[4];
545 __le16 probedelay; /* Reserved on v9 and later */
546 __le16 capability;
547 u8 rates[MAX_RATES];
548 u8 tlv_memory_size_pad[100];
549} __attribute__ ((packed));
550
551struct cmd_ds_802_11_ad_hoc_result {
552 struct cmd_header hdr;
553
554 u8 pad[3];
555 u8 bssid[ETH_ALEN];
556} __attribute__ ((packed));
557
558struct adhoc_bssdesc {
559 u8 bssid[ETH_ALEN];
560 u8 ssid[IW_ESSID_MAX_SIZE];
561 u8 type;
562 __le16 beaconperiod;
563 u8 dtimperiod;
564 __le64 timestamp;
565 __le64 localtime;
566 struct ieee_ie_ds_param_set ds;
567 u8 reserved1[4];
568 struct ieee_ie_ibss_param_set ibss;
569 u8 reserved2[4];
570 __le16 capability;
571 u8 rates[MAX_RATES];
572
573 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
574 * Adhoc join command and will cause a binary layout mismatch with
575 * the firmware
576 */
577} __attribute__ ((packed));
578
579struct cmd_ds_802_11_ad_hoc_join {
580 struct cmd_header hdr;
581
582 struct adhoc_bssdesc bss;
583 __le16 failtimeout; /* Reserved on v9 and later */
584 __le16 probedelay; /* Reserved on v9 and later */
585} __attribute__ ((packed));
586
587struct cmd_ds_802_11_ad_hoc_stop {
588 struct cmd_header hdr;
589} __attribute__ ((packed));
590
591struct cmd_ds_802_11_enable_rsn {
592 struct cmd_header hdr;
593
594 __le16 action;
595 __le16 enable;
596} __attribute__ ((packed));
597
598struct MrvlIEtype_keyParamSet {
599 /* type ID */
600 __le16 type;
601
602 /* length of Payload */
603 __le16 length;
604
605 /* type of key: WEP=0, TKIP=1, AES=2 */
606 __le16 keytypeid;
607
608 /* key control Info specific to a keytypeid */
609 __le16 keyinfo;
610
611 /* length of key */
612 __le16 keylen;
613
614 /* key material of size keylen */
615 u8 key[32];
616} __attribute__ ((packed));
617
618#define MAX_WOL_RULES 16
619
620struct host_wol_rule {
621 uint8_t rule_no;
622 uint8_t rule_ops;
623 __le16 sig_offset;
624 __le16 sig_length;
625 __le16 reserve;
626 __be32 sig_mask;
627 __be32 signature;
628} __attribute__ ((packed));
629
630struct wol_config {
631 uint8_t action;
632 uint8_t pattern;
633 uint8_t no_rules_in_cmd;
634 uint8_t result;
635 struct host_wol_rule rule[MAX_WOL_RULES];
636} __attribute__ ((packed));
637
638struct cmd_ds_host_sleep {
639 struct cmd_header hdr;
640 __le32 criteria;
641 uint8_t gpio;
642 uint16_t gap;
643 struct wol_config wol_conf;
644} __attribute__ ((packed));
645
646
647
648struct cmd_ds_802_11_key_material {
649 struct cmd_header hdr;
650
651 __le16 action;
652 struct MrvlIEtype_keyParamSet keyParamSet[2];
653} __attribute__ ((packed));
654
655struct cmd_ds_802_11_eeprom_access {
656 struct cmd_header hdr;
657 __le16 action;
658 __le16 offset;
659 __le16 len;
660 /* firmware says it returns a maximum of 20 bytes */
661#define LBS_EEPROM_READ_LEN 20
662 u8 value[LBS_EEPROM_READ_LEN];
663} __attribute__ ((packed));
664
665struct cmd_ds_802_11_tpc_cfg {
666 struct cmd_header hdr;
667
668 __le16 action;
669 uint8_t enable;
670 int8_t P0;
671 int8_t P1;
672 int8_t P2;
673 uint8_t usesnr;
674} __attribute__ ((packed));
675
676
677struct cmd_ds_802_11_pa_cfg {
678 struct cmd_header hdr;
679
680 __le16 action;
681 uint8_t enable;
682 int8_t P0;
683 int8_t P1;
684 int8_t P2;
685} __attribute__ ((packed));
686
687
688struct cmd_ds_802_11_led_ctrl {
689 __le16 action;
690 __le16 numled;
691 u8 data[256];
692} __attribute__ ((packed));
693
694struct cmd_ds_802_11_afc {
695 __le16 afc_auto;
696 union {
697 struct {
698 __le16 threshold;
699 __le16 period;
700 };
701 struct {
702 __le16 timing_offset; /* signed */
703 __le16 carrier_offset; /* signed */
704 };
705 };
706} __attribute__ ((packed));
707
708struct cmd_tx_rate_query {
709 __le16 txrate;
710} __attribute__ ((packed));
711
712struct cmd_ds_get_tsf {
713 __le64 tsfvalue;
714} __attribute__ ((packed));
715
716struct cmd_ds_bt_access {
717 __le16 action;
718 __le32 id;
719 u8 addr1[ETH_ALEN];
720 u8 addr2[ETH_ALEN];
721} __attribute__ ((packed));
722
723struct cmd_ds_fwt_access {
724 __le16 action;
725 __le32 id;
726 u8 valid;
727 u8 da[ETH_ALEN];
728 u8 dir;
729 u8 ra[ETH_ALEN];
730 __le32 ssn;
731 __le32 dsn;
732 __le32 metric;
733 u8 rate;
734 u8 hopcount;
735 u8 ttl;
736 __le32 expiration;
737 u8 sleepmode;
738 __le32 snr;
739 __le32 references;
740 u8 prec[ETH_ALEN];
741} __attribute__ ((packed));
742
743
744struct cmd_ds_mesh_config {
745 struct cmd_header hdr;
746
747 __le16 action;
748 __le16 channel;
749 __le16 type;
750 __le16 length;
751 u8 data[128]; /* last position reserved */
752} __attribute__ ((packed));
753
754
755struct cmd_ds_mesh_access {
756 struct cmd_header hdr;
757
758 __le16 action;
759 __le32 data[32]; /* last position reserved */
760} __attribute__ ((packed));
761
762/* Number of stats counters returned by the firmware */
763#define MESH_STATS_NUM 8
764
765struct cmd_ds_command {
766 /* command header */
767 __le16 command;
768 __le16 size;
769 __le16 seqnum;
770 __le16 result;
771
772 /* command Body */
773 union {
774 struct cmd_ds_802_11_ps_mode psmode;
775 struct cmd_ds_802_11_get_stat gstat;
776 struct cmd_ds_802_3_get_stat gstat_8023;
777 struct cmd_ds_802_11_rf_antenna rant;
778 struct cmd_ds_802_11_monitor_mode monitor;
779 struct cmd_ds_802_11_rssi rssi;
780 struct cmd_ds_802_11_rssi_rsp rssirsp;
781 struct cmd_ds_mac_reg_access macreg;
782 struct cmd_ds_bbp_reg_access bbpreg;
783 struct cmd_ds_rf_reg_access rfreg;
784
785 struct cmd_ds_802_11d_domain_info domaininfo;
786 struct cmd_ds_802_11d_domain_info domaininforesp;
787
788 struct cmd_ds_802_11_tpc_cfg tpccfg;
789 struct cmd_ds_802_11_afc afc;
790 struct cmd_ds_802_11_led_ctrl ledgpio;
791
792 struct cmd_tx_rate_query txrate;
793 struct cmd_ds_bt_access bt;
794 struct cmd_ds_fwt_access fwt;
795 struct cmd_ds_get_tsf gettsf;
796 struct cmd_ds_802_11_beacon_control bcn_ctrl;
797 } params;
798} __attribute__ ((packed));
799
800#endif
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 62381768f2d5..875516db319c 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -48,6 +48,7 @@
48MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>"); 48MODULE_AUTHOR("Holger Schurig <hs4233@mail.mn-solutions.de>");
49MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards"); 49MODULE_DESCRIPTION("Driver for Marvell 83xx compact flash WLAN cards");
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_FIRMWARE("libertas_cs_helper.fw");
51 52
52 53
53 54
@@ -946,6 +947,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
946 card->priv = priv; 947 card->priv = priv;
947 priv->card = card; 948 priv->card = card;
948 priv->hw_host_to_card = if_cs_host_to_card; 949 priv->hw_host_to_card = if_cs_host_to_card;
950 priv->enter_deep_sleep = NULL;
951 priv->exit_deep_sleep = NULL;
952 priv->reset_deep_sleep_wakeup = NULL;
949 priv->fw_ready = 1; 953 priv->fw_ready = 1;
950 954
951 /* Now actually get the IRQ */ 955 /* Now actually get the IRQ */
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 485a8d406525..09fcfad742e7 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -99,6 +99,12 @@ static struct if_sdio_model if_sdio_models[] = {
99 .firmware = "sd8688.bin", 99 .firmware = "sd8688.bin",
100 }, 100 },
101}; 101};
102MODULE_FIRMWARE("sd8385_helper.bin");
103MODULE_FIRMWARE("sd8385.bin");
104MODULE_FIRMWARE("sd8686_helper.bin");
105MODULE_FIRMWARE("sd8686.bin");
106MODULE_FIRMWARE("sd8688_helper.bin");
107MODULE_FIRMWARE("sd8688.bin");
102 108
103struct if_sdio_packet { 109struct if_sdio_packet {
104 struct if_sdio_packet *next; 110 struct if_sdio_packet *next;
@@ -831,6 +837,58 @@ out:
831 return ret; 837 return ret;
832} 838}
833 839
840static int if_sdio_enter_deep_sleep(struct lbs_private *priv)
841{
842 int ret = -1;
843 struct cmd_header cmd;
844
845 memset(&cmd, 0, sizeof(cmd));
846
847 lbs_deb_sdio("send DEEP_SLEEP command\n");
848 ret = __lbs_cmd(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd),
849 lbs_cmd_copyback, (unsigned long) &cmd);
850 if (ret)
851 lbs_pr_err("DEEP_SLEEP cmd failed\n");
852
853 mdelay(200);
854 return ret;
855}
856
857static int if_sdio_exit_deep_sleep(struct lbs_private *priv)
858{
859 struct if_sdio_card *card = priv->card;
860 int ret = -1;
861
862 lbs_deb_enter(LBS_DEB_SDIO);
863 sdio_claim_host(card->func);
864
865 sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret);
866 if (ret)
867 lbs_pr_err("sdio_writeb failed!\n");
868
869 sdio_release_host(card->func);
870 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
871 return ret;
872}
873
874static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv)
875{
876 struct if_sdio_card *card = priv->card;
877 int ret = -1;
878
879 lbs_deb_enter(LBS_DEB_SDIO);
880 sdio_claim_host(card->func);
881
882 sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret);
883 if (ret)
884 lbs_pr_err("sdio_writeb failed!\n");
885
886 sdio_release_host(card->func);
887 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
888 return ret;
889
890}
891
834/*******************************************************************/ 892/*******************************************************************/
835/* SDIO callbacks */ 893/* SDIO callbacks */
836/*******************************************************************/ 894/*******************************************************************/
@@ -859,6 +917,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
859 * Ignore the define name, this really means the card has 917 * Ignore the define name, this really means the card has
860 * successfully received the command. 918 * successfully received the command.
861 */ 919 */
920 card->priv->is_activity_detected = 1;
862 if (cause & IF_SDIO_H_INT_DNLD) 921 if (cause & IF_SDIO_H_INT_DNLD)
863 lbs_host_to_card_done(card->priv); 922 lbs_host_to_card_done(card->priv);
864 923
@@ -998,6 +1057,9 @@ static int if_sdio_probe(struct sdio_func *func,
998 1057
999 priv->card = card; 1058 priv->card = card;
1000 priv->hw_host_to_card = if_sdio_host_to_card; 1059 priv->hw_host_to_card = if_sdio_host_to_card;
1060 priv->enter_deep_sleep = if_sdio_enter_deep_sleep;
1061 priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
1062 priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
1001 1063
1002 priv->fw_ready = 1; 1064 priv->fw_ready = 1;
1003 1065
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
index 60c9b2fcef03..12179c1dc9c9 100644
--- a/drivers/net/wireless/libertas/if_sdio.h
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -51,5 +51,6 @@
51#define IF_SDIO_EVENT 0x80fc 51#define IF_SDIO_EVENT 0x80fc
52 52
53#define IF_SDIO_BLOCK_SIZE 256 53#define IF_SDIO_BLOCK_SIZE 256
54 54#define CONFIGURATION_REG 0x03
55#define HOST_POWER_UP (0x1U << 1)
55#endif 56#endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 5b3672c4d0cc..bf4bfbae6227 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -32,12 +32,6 @@
32#include "dev.h" 32#include "dev.h"
33#include "if_spi.h" 33#include "if_spi.h"
34 34
35struct if_spi_packet {
36 struct list_head list;
37 u16 blen;
38 u8 buffer[0] __attribute__((aligned(4)));
39};
40
41struct if_spi_card { 35struct if_spi_card {
42 struct spi_device *spi; 36 struct spi_device *spi;
43 struct lbs_private *priv; 37 struct lbs_private *priv;
@@ -66,33 +60,10 @@ struct if_spi_card {
66 struct semaphore spi_thread_terminated; 60 struct semaphore spi_thread_terminated;
67 61
68 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; 62 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
69
70 /* A buffer of incoming packets from libertas core.
71 * Since we can't sleep in hw_host_to_card, we have to buffer
72 * them. */
73 struct list_head cmd_packet_list;
74 struct list_head data_packet_list;
75
76 /* Protects cmd_packet_list and data_packet_list */
77 spinlock_t buffer_lock;
78}; 63};
79 64
80static void free_if_spi_card(struct if_spi_card *card) 65static void free_if_spi_card(struct if_spi_card *card)
81{ 66{
82 struct list_head *cursor, *next;
83 struct if_spi_packet *packet;
84
85 BUG_ON(card->run_thread);
86 list_for_each_safe(cursor, next, &card->cmd_packet_list) {
87 packet = container_of(cursor, struct if_spi_packet, list);
88 list_del(&packet->list);
89 kfree(packet);
90 }
91 list_for_each_safe(cursor, next, &card->data_packet_list) {
92 packet = container_of(cursor, struct if_spi_packet, list);
93 list_del(&packet->list);
94 kfree(packet);
95 }
96 spi_set_drvdata(card->spi, NULL); 67 spi_set_drvdata(card->spi, NULL);
97 kfree(card); 68 kfree(card);
98} 69}
@@ -774,40 +745,6 @@ out:
774 return err; 745 return err;
775} 746}
776 747
777/* Move data or a command from the host to the card. */
778static void if_spi_h2c(struct if_spi_card *card,
779 struct if_spi_packet *packet, int type)
780{
781 int err = 0;
782 u16 int_type, port_reg;
783
784 switch (type) {
785 case MVMS_DAT:
786 int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
787 port_reg = IF_SPI_DATA_RDWRPORT_REG;
788 break;
789 case MVMS_CMD:
790 int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
791 port_reg = IF_SPI_CMD_RDWRPORT_REG;
792 break;
793 default:
794 lbs_pr_err("can't transfer buffer of type %d\n", type);
795 err = -EINVAL;
796 goto out;
797 }
798
799 /* Write the data to the card */
800 err = spu_write(card, port_reg, packet->buffer, packet->blen);
801 if (err)
802 goto out;
803
804out:
805 kfree(packet);
806
807 if (err)
808 lbs_pr_err("%s: error %d\n", __func__, err);
809}
810
811/* Inform the host about a card event */ 748/* Inform the host about a card event */
812static void if_spi_e2h(struct if_spi_card *card) 749static void if_spi_e2h(struct if_spi_card *card)
813{ 750{
@@ -837,8 +774,6 @@ static int lbs_spi_thread(void *data)
837 int err; 774 int err;
838 struct if_spi_card *card = data; 775 struct if_spi_card *card = data;
839 u16 hiStatus; 776 u16 hiStatus;
840 unsigned long flags;
841 struct if_spi_packet *packet;
842 777
843 while (1) { 778 while (1) {
844 /* Wait to be woken up by one of two things. First, our ISR 779 /* Wait to be woken up by one of two things. First, our ISR
@@ -877,43 +812,9 @@ static int lbs_spi_thread(void *data)
877 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || 812 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
878 (card->priv->psstate != PS_STATE_FULL_POWER && 813 (card->priv->psstate != PS_STATE_FULL_POWER &&
879 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { 814 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
880 /* This means two things. First of all,
881 * if there was a previous command sent, the card has
882 * successfully received it.
883 * Secondly, it is now ready to download another
884 * command.
885 */
886 lbs_host_to_card_done(card->priv); 815 lbs_host_to_card_done(card->priv);
887
888 /* Do we have any command packets from the host to
889 * send? */
890 packet = NULL;
891 spin_lock_irqsave(&card->buffer_lock, flags);
892 if (!list_empty(&card->cmd_packet_list)) {
893 packet = (struct if_spi_packet *)(card->
894 cmd_packet_list.next);
895 list_del(&packet->list);
896 }
897 spin_unlock_irqrestore(&card->buffer_lock, flags);
898
899 if (packet)
900 if_spi_h2c(card, packet, MVMS_CMD);
901 } 816 }
902 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
903 /* Do we have any data packets from the host to
904 * send? */
905 packet = NULL;
906 spin_lock_irqsave(&card->buffer_lock, flags);
907 if (!list_empty(&card->data_packet_list)) {
908 packet = (struct if_spi_packet *)(card->
909 data_packet_list.next);
910 list_del(&packet->list);
911 }
912 spin_unlock_irqrestore(&card->buffer_lock, flags);
913 817
914 if (packet)
915 if_spi_h2c(card, packet, MVMS_DAT);
916 }
917 if (hiStatus & IF_SPI_HIST_CARD_EVENT) 818 if (hiStatus & IF_SPI_HIST_CARD_EVENT)
918 if_spi_e2h(card); 819 if_spi_e2h(card);
919 820
@@ -942,40 +843,18 @@ static int if_spi_host_to_card(struct lbs_private *priv,
942 u8 type, u8 *buf, u16 nb) 843 u8 type, u8 *buf, u16 nb)
943{ 844{
944 int err = 0; 845 int err = 0;
945 unsigned long flags;
946 struct if_spi_card *card = priv->card; 846 struct if_spi_card *card = priv->card;
947 struct if_spi_packet *packet;
948 u16 blen;
949 847
950 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); 848 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
951 849
952 if (nb == 0) { 850 nb = ALIGN(nb, 4);
953 lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
954 err = -EINVAL;
955 goto out;
956 }
957 blen = ALIGN(nb, 4);
958 packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
959 if (!packet) {
960 err = -ENOMEM;
961 goto out;
962 }
963 packet->blen = blen;
964 memcpy(packet->buffer, buf, nb);
965 memset(packet->buffer + nb, 0, blen - nb);
966 851
967 switch (type) { 852 switch (type) {
968 case MVMS_CMD: 853 case MVMS_CMD:
969 priv->dnld_sent = DNLD_CMD_SENT; 854 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
970 spin_lock_irqsave(&card->buffer_lock, flags);
971 list_add_tail(&packet->list, &card->cmd_packet_list);
972 spin_unlock_irqrestore(&card->buffer_lock, flags);
973 break; 855 break;
974 case MVMS_DAT: 856 case MVMS_DAT:
975 priv->dnld_sent = DNLD_DATA_SENT; 857 err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
976 spin_lock_irqsave(&card->buffer_lock, flags);
977 list_add_tail(&packet->list, &card->data_packet_list);
978 spin_unlock_irqrestore(&card->buffer_lock, flags);
979 break; 858 break;
980 default: 859 default:
981 lbs_pr_err("can't transfer buffer of type %d", type); 860 lbs_pr_err("can't transfer buffer of type %d", type);
@@ -983,9 +862,6 @@ static int if_spi_host_to_card(struct lbs_private *priv,
983 break; 862 break;
984 } 863 }
985 864
986 /* Wake up the spi thread */
987 up(&card->spi_ready);
988out:
989 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err); 865 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
990 return err; 866 return err;
991} 867}
@@ -1026,6 +902,10 @@ static int if_spi_calculate_fw_names(u16 card_id,
1026 chip_id_to_device_name[i].name); 902 chip_id_to_device_name[i].name);
1027 return 0; 903 return 0;
1028} 904}
905MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
906MODULE_FIRMWARE("libertas/gspi8385.bin");
907MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
908MODULE_FIRMWARE("libertas/gspi8686.bin");
1029 909
1030static int __devinit if_spi_probe(struct spi_device *spi) 910static int __devinit if_spi_probe(struct spi_device *spi)
1031{ 911{
@@ -1062,9 +942,6 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1062 942
1063 sema_init(&card->spi_ready, 0); 943 sema_init(&card->spi_ready, 0);
1064 sema_init(&card->spi_thread_terminated, 0); 944 sema_init(&card->spi_thread_terminated, 0);
1065 INIT_LIST_HEAD(&card->cmd_packet_list);
1066 INIT_LIST_HEAD(&card->data_packet_list);
1067 spin_lock_init(&card->buffer_lock);
1068 945
1069 /* Initialize the SPI Interface Unit */ 946 /* Initialize the SPI Interface Unit */
1070 err = spu_init(card, pdata->use_dummy_writes); 947 err = spu_init(card, pdata->use_dummy_writes);
@@ -1117,6 +994,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1117 card->priv = priv; 994 card->priv = priv;
1118 priv->card = card; 995 priv->card = card;
1119 priv->hw_host_to_card = if_spi_host_to_card; 996 priv->hw_host_to_card = if_spi_host_to_card;
997 priv->enter_deep_sleep = NULL;
998 priv->exit_deep_sleep = NULL;
999 priv->reset_deep_sleep_wakeup = NULL;
1120 priv->fw_ready = 1; 1000 priv->fw_ready = 1;
1121 1001
1122 /* Initialize interrupt handling stuff. */ 1002 /* Initialize interrupt handling stuff. */
@@ -1138,6 +1018,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1138 goto terminate_thread; 1018 goto terminate_thread;
1139 } 1019 }
1140 1020
1021 /* poke the IRQ handler so that we don't miss the first interrupt */
1022 up(&card->spi_ready);
1023
1141 /* Start the card. 1024 /* Start the card.
1142 * This will call register_netdev, and we'll start 1025 * This will call register_netdev, and we'll start
1143 * getting interrupts... */ 1026 * getting interrupts... */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 3fac4efa5ac8..65e174595d12 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -28,6 +28,8 @@
28static char *lbs_fw_name = "usb8388.bin"; 28static char *lbs_fw_name = "usb8388.bin";
29module_param_named(fw_name, lbs_fw_name, charp, 0644); 29module_param_named(fw_name, lbs_fw_name, charp, 0644);
30 30
31MODULE_FIRMWARE("usb8388.bin");
32
31static struct usb_device_id if_usb_table[] = { 33static struct usb_device_id if_usb_table[] = {
32 /* Enter the device signature inside */ 34 /* Enter the device signature inside */
33 { USB_DEVICE(0x1286, 0x2001) }, 35 { USB_DEVICE(0x1286, 0x2001) },
@@ -300,6 +302,9 @@ static int if_usb_probe(struct usb_interface *intf,
300 cardp->priv->fw_ready = 1; 302 cardp->priv->fw_ready = 1;
301 303
302 priv->hw_host_to_card = if_usb_host_to_card; 304 priv->hw_host_to_card = if_usb_host_to_card;
305 priv->enter_deep_sleep = NULL;
306 priv->exit_deep_sleep = NULL;
307 priv->reset_deep_sleep_wakeup = NULL;
303#ifdef CONFIG_OLPC 308#ifdef CONFIG_OLPC
304 if (machine_is_olpc()) 309 if (machine_is_olpc())
305 priv->reset_card = if_usb_reset_olpc_card; 310 priv->reset_card = if_usb_reset_olpc_card;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 87b4e497faa2..01f46cf288d7 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -14,11 +14,13 @@
14#include <linux/stddef.h> 14#include <linux/stddef.h>
15#include <linux/ieee80211.h> 15#include <linux/ieee80211.h>
16#include <net/iw_handler.h> 16#include <net/iw_handler.h>
17#include <net/cfg80211.h>
17 18
18#include "host.h" 19#include "host.h"
19#include "decl.h" 20#include "decl.h"
20#include "dev.h" 21#include "dev.h"
21#include "wext.h" 22#include "wext.h"
23#include "cfg.h"
22#include "debugfs.h" 24#include "debugfs.h"
23#include "scan.h" 25#include "scan.h"
24#include "assoc.h" 26#include "assoc.h"
@@ -43,119 +45,6 @@ module_param_named(libertas_debug, lbs_debug, int, 0644);
43struct cmd_confirm_sleep confirm_sleep; 45struct cmd_confirm_sleep confirm_sleep;
44 46
45 47
46#define LBS_TX_PWR_DEFAULT 20 /*100mW */
47#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
48#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
49#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
50#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
51
52/* Format { channel, frequency (MHz), maxtxpower } */
53/* band: 'B/G', region: USA FCC/Canada IC */
54static struct chan_freq_power channel_freq_power_US_BG[] = {
55 {1, 2412, LBS_TX_PWR_US_DEFAULT},
56 {2, 2417, LBS_TX_PWR_US_DEFAULT},
57 {3, 2422, LBS_TX_PWR_US_DEFAULT},
58 {4, 2427, LBS_TX_PWR_US_DEFAULT},
59 {5, 2432, LBS_TX_PWR_US_DEFAULT},
60 {6, 2437, LBS_TX_PWR_US_DEFAULT},
61 {7, 2442, LBS_TX_PWR_US_DEFAULT},
62 {8, 2447, LBS_TX_PWR_US_DEFAULT},
63 {9, 2452, LBS_TX_PWR_US_DEFAULT},
64 {10, 2457, LBS_TX_PWR_US_DEFAULT},
65 {11, 2462, LBS_TX_PWR_US_DEFAULT}
66};
67
68/* band: 'B/G', region: Europe ETSI */
69static struct chan_freq_power channel_freq_power_EU_BG[] = {
70 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
71 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
72 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
73 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
74 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
75 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
76 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
77 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
78 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
79 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
80 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
81 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
82 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
83};
84
85/* band: 'B/G', region: Spain */
86static struct chan_freq_power channel_freq_power_SPN_BG[] = {
87 {10, 2457, LBS_TX_PWR_DEFAULT},
88 {11, 2462, LBS_TX_PWR_DEFAULT}
89};
90
91/* band: 'B/G', region: France */
92static struct chan_freq_power channel_freq_power_FR_BG[] = {
93 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
94 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
95 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
96 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
97};
98
99/* band: 'B/G', region: Japan */
100static struct chan_freq_power channel_freq_power_JPN_BG[] = {
101 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
102 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
103 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
104 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
105 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
106 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
107 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
108 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
109 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
110 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
111 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
112 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
113 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
114 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
115};
116
117/**
118 * the structure for channel, frequency and power
119 */
120struct region_cfp_table {
121 u8 region;
122 struct chan_freq_power *cfp_BG;
123 int cfp_no_BG;
124};
125
126/**
127 * the structure for the mapping between region and CFP
128 */
129static struct region_cfp_table region_cfp_table[] = {
130 {0x10, /*US FCC */
131 channel_freq_power_US_BG,
132 ARRAY_SIZE(channel_freq_power_US_BG),
133 }
134 ,
135 {0x20, /*CANADA IC */
136 channel_freq_power_US_BG,
137 ARRAY_SIZE(channel_freq_power_US_BG),
138 }
139 ,
140 {0x30, /*EU*/ channel_freq_power_EU_BG,
141 ARRAY_SIZE(channel_freq_power_EU_BG),
142 }
143 ,
144 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
145 ARRAY_SIZE(channel_freq_power_SPN_BG),
146 }
147 ,
148 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
149 ARRAY_SIZE(channel_freq_power_FR_BG),
150 }
151 ,
152 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
153 ARRAY_SIZE(channel_freq_power_JPN_BG),
154 }
155 ,
156/*Add new region here */
157};
158
159/** 48/**
160 * the table to keep region code 49 * the table to keep region code
161 */ 50 */
@@ -163,13 +52,6 @@ u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
163 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; 52 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 };
164 53
165/** 54/**
166 * 802.11b/g supported bitrates (in 500Kb/s units)
167 */
168u8 lbs_bg_rates[MAX_RATES] =
169 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
1700x00, 0x00 };
171
172/**
173 * FW rate table. FW refers to rates by their index in this table, not by the 55 * FW rate table. FW refers to rates by their index in this table, not by the
174 * rate value itself. Values of 0x00 are 56 * rate value itself. Values of 0x00 are
175 * reserved positions. 57 * reserved positions.
@@ -403,7 +285,7 @@ static ssize_t lbs_mesh_set(struct device *dev,
403 return count; 285 return count;
404 if (enable) 286 if (enable)
405 action = CMD_ACT_MESH_CONFIG_START; 287 action = CMD_ACT_MESH_CONFIG_START;
406 ret = lbs_mesh_config(priv, action, priv->curbssparams.channel); 288 ret = lbs_mesh_config(priv, action, priv->channel);
407 if (ret) 289 if (ret)
408 return ret; 290 return ret;
409 291
@@ -574,8 +456,10 @@ void lbs_host_to_card_done(struct lbs_private *priv)
574 priv->dnld_sent = DNLD_RES_RECEIVED; 456 priv->dnld_sent = DNLD_RES_RECEIVED;
575 457
576 /* Wake main thread if commands are pending */ 458 /* Wake main thread if commands are pending */
577 if (!priv->cur_cmd || priv->tx_pending_len > 0) 459 if (!priv->cur_cmd || priv->tx_pending_len > 0) {
578 wake_up_interruptible(&priv->waitq); 460 if (!priv->wakeup_dev_required)
461 wake_up_interruptible(&priv->waitq);
462 }
579 463
580 spin_unlock_irqrestore(&priv->driver_lock, flags); 464 spin_unlock_irqrestore(&priv->driver_lock, flags);
581 lbs_deb_leave(LBS_DEB_THREAD); 465 lbs_deb_leave(LBS_DEB_THREAD);
@@ -770,7 +654,8 @@ static int lbs_thread(void *data)
770 shouldsleep = 0; /* We have a command response */ 654 shouldsleep = 0; /* We have a command response */
771 else if (priv->cur_cmd) 655 else if (priv->cur_cmd)
772 shouldsleep = 1; /* Can't send a command; one already running */ 656 shouldsleep = 1; /* Can't send a command; one already running */
773 else if (!list_empty(&priv->cmdpendingq)) 657 else if (!list_empty(&priv->cmdpendingq) &&
658 !(priv->wakeup_dev_required))
774 shouldsleep = 0; /* We have a command to send */ 659 shouldsleep = 0; /* We have a command to send */
775 else if (__kfifo_len(priv->event_fifo)) 660 else if (__kfifo_len(priv->event_fifo))
776 shouldsleep = 0; /* We have an event to process */ 661 shouldsleep = 0; /* We have an event to process */
@@ -822,6 +707,26 @@ static int lbs_thread(void *data)
822 } 707 }
823 spin_unlock_irq(&priv->driver_lock); 708 spin_unlock_irq(&priv->driver_lock);
824 709
710 /* Process hardware events, e.g. card removed, link lost */
711 spin_lock_irq(&priv->driver_lock);
712 while (__kfifo_len(priv->event_fifo)) {
713 u32 event;
714 __kfifo_get(priv->event_fifo, (unsigned char *) &event,
715 sizeof(event));
716 spin_unlock_irq(&priv->driver_lock);
717 lbs_process_event(priv, event);
718 spin_lock_irq(&priv->driver_lock);
719 }
720 spin_unlock_irq(&priv->driver_lock);
721
722 if (priv->wakeup_dev_required) {
723 lbs_deb_thread("Waking up device...\n");
724 /* Wake up device */
725 if (priv->exit_deep_sleep(priv))
726 lbs_deb_thread("Wakeup device failed\n");
727 continue;
728 }
729
825 /* command timeout stuff */ 730 /* command timeout stuff */
826 if (priv->cmd_timed_out && priv->cur_cmd) { 731 if (priv->cmd_timed_out && priv->cur_cmd) {
827 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 732 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
@@ -849,18 +754,7 @@ static int lbs_thread(void *data)
849 } 754 }
850 priv->cmd_timed_out = 0; 755 priv->cmd_timed_out = 0;
851 756
852 /* Process hardware events, e.g. card removed, link lost */
853 spin_lock_irq(&priv->driver_lock);
854 while (__kfifo_len(priv->event_fifo)) {
855 u32 event;
856 757
857 __kfifo_get(priv->event_fifo, (unsigned char *) &event,
858 sizeof(event));
859 spin_unlock_irq(&priv->driver_lock);
860 lbs_process_event(priv, event);
861 spin_lock_irq(&priv->driver_lock);
862 }
863 spin_unlock_irq(&priv->driver_lock);
864 758
865 if (!priv->fw_ready) 759 if (!priv->fw_ready)
866 continue; 760 continue;
@@ -894,6 +788,9 @@ static int lbs_thread(void *data)
894 (priv->psstate == PS_STATE_PRE_SLEEP)) 788 (priv->psstate == PS_STATE_PRE_SLEEP))
895 continue; 789 continue;
896 790
791 if (priv->is_deep_sleep)
792 continue;
793
897 /* Execute the next command */ 794 /* Execute the next command */
898 if (!priv->dnld_sent && !priv->cur_cmd) 795 if (!priv->dnld_sent && !priv->cur_cmd)
899 lbs_execute_next_command(priv); 796 lbs_execute_next_command(priv);
@@ -928,6 +825,7 @@ static int lbs_thread(void *data)
928 } 825 }
929 826
930 del_timer(&priv->command_timer); 827 del_timer(&priv->command_timer);
828 del_timer(&priv->auto_deepsleep_timer);
931 wake_up_all(&priv->cmd_pending); 829 wake_up_all(&priv->cmd_pending);
932 830
933 lbs_deb_leave(LBS_DEB_THREAD); 831 lbs_deb_leave(LBS_DEB_THREAD);
@@ -1050,6 +948,62 @@ out:
1050 lbs_deb_leave(LBS_DEB_CMD); 948 lbs_deb_leave(LBS_DEB_CMD);
1051} 949}
1052 950
951/**
952 * This function put the device back to deep sleep mode when timer expires
953 * and no activity (command, event, data etc.) is detected.
954 */
955static void auto_deepsleep_timer_fn(unsigned long data)
956{
957 struct lbs_private *priv = (struct lbs_private *)data;
958 int ret;
959
960 lbs_deb_enter(LBS_DEB_CMD);
961
962 if (priv->is_activity_detected) {
963 priv->is_activity_detected = 0;
964 } else {
965 if (priv->is_auto_deep_sleep_enabled &&
966 (!priv->wakeup_dev_required) &&
967 (priv->connect_status != LBS_CONNECTED)) {
968 lbs_deb_main("Entering auto deep sleep mode...\n");
969 ret = lbs_prepare_and_send_command(priv,
970 CMD_802_11_DEEP_SLEEP, 0,
971 0, 0, NULL);
972 if (ret)
973 lbs_pr_err("Enter Deep Sleep command failed\n");
974 }
975 }
976 mod_timer(&priv->auto_deepsleep_timer , jiffies +
977 (priv->auto_deep_sleep_timeout * HZ)/1000);
978 lbs_deb_leave(LBS_DEB_CMD);
979}
980
981int lbs_enter_auto_deep_sleep(struct lbs_private *priv)
982{
983 lbs_deb_enter(LBS_DEB_SDIO);
984
985 priv->is_auto_deep_sleep_enabled = 1;
986 if (priv->is_deep_sleep)
987 priv->wakeup_dev_required = 1;
988 mod_timer(&priv->auto_deepsleep_timer ,
989 jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000);
990
991 lbs_deb_leave(LBS_DEB_SDIO);
992 return 0;
993}
994
995int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
996{
997 lbs_deb_enter(LBS_DEB_SDIO);
998
999 priv->is_auto_deep_sleep_enabled = 0;
1000 priv->auto_deep_sleep_timeout = 0;
1001 del_timer(&priv->auto_deepsleep_timer);
1002
1003 lbs_deb_leave(LBS_DEB_SDIO);
1004 return 0;
1005}
1006
1053static void lbs_sync_channel_worker(struct work_struct *work) 1007static void lbs_sync_channel_worker(struct work_struct *work)
1054{ 1008{
1055 struct lbs_private *priv = container_of(work, struct lbs_private, 1009 struct lbs_private *priv = container_of(work, struct lbs_private,
@@ -1092,18 +1046,24 @@ static int lbs_init_adapter(struct lbs_private *priv)
1092 priv->mesh_connect_status = LBS_DISCONNECTED; 1046 priv->mesh_connect_status = LBS_DISCONNECTED;
1093 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1047 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1094 priv->mode = IW_MODE_INFRA; 1048 priv->mode = IW_MODE_INFRA;
1095 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1049 priv->channel = DEFAULT_AD_HOC_CHANNEL;
1096 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1050 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1097 priv->radio_on = 1; 1051 priv->radio_on = 1;
1098 priv->enablehwauto = 1; 1052 priv->enablehwauto = 1;
1099 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1053 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
1100 priv->psmode = LBS802_11POWERMODECAM; 1054 priv->psmode = LBS802_11POWERMODECAM;
1101 priv->psstate = PS_STATE_FULL_POWER; 1055 priv->psstate = PS_STATE_FULL_POWER;
1056 priv->is_deep_sleep = 0;
1057 priv->is_auto_deep_sleep_enabled = 0;
1058 priv->wakeup_dev_required = 0;
1059 init_waitqueue_head(&priv->ds_awake_q);
1102 1060
1103 mutex_init(&priv->lock); 1061 mutex_init(&priv->lock);
1104 1062
1105 setup_timer(&priv->command_timer, command_timer_fn, 1063 setup_timer(&priv->command_timer, command_timer_fn,
1106 (unsigned long)priv); 1064 (unsigned long)priv);
1065 setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
1066 (unsigned long)priv);
1107 1067
1108 INIT_LIST_HEAD(&priv->cmdfreeq); 1068 INIT_LIST_HEAD(&priv->cmdfreeq);
1109 INIT_LIST_HEAD(&priv->cmdpendingq); 1069 INIT_LIST_HEAD(&priv->cmdpendingq);
@@ -1142,6 +1102,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
1142 if (priv->event_fifo) 1102 if (priv->event_fifo)
1143 kfifo_free(priv->event_fifo); 1103 kfifo_free(priv->event_fifo);
1144 del_timer(&priv->command_timer); 1104 del_timer(&priv->command_timer);
1105 del_timer(&priv->auto_deepsleep_timer);
1145 kfree(priv->networks); 1106 kfree(priv->networks);
1146 priv->networks = NULL; 1107 priv->networks = NULL;
1147 1108
@@ -1168,31 +1129,41 @@ static const struct net_device_ops lbs_netdev_ops = {
1168 */ 1129 */
1169struct lbs_private *lbs_add_card(void *card, struct device *dmdev) 1130struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1170{ 1131{
1171 struct net_device *dev = NULL; 1132 struct net_device *dev;
1133 struct wireless_dev *wdev;
1172 struct lbs_private *priv = NULL; 1134 struct lbs_private *priv = NULL;
1173 1135
1174 lbs_deb_enter(LBS_DEB_MAIN); 1136 lbs_deb_enter(LBS_DEB_MAIN);
1175 1137
1176 /* Allocate an Ethernet device and register it */ 1138 /* Allocate an Ethernet device and register it */
1177 dev = alloc_etherdev(sizeof(struct lbs_private)); 1139 wdev = lbs_cfg_alloc(dmdev);
1178 if (!dev) { 1140 if (IS_ERR(wdev)) {
1179 lbs_pr_err("init wlanX device failed\n"); 1141 lbs_pr_err("cfg80211 init failed\n");
1180 goto done; 1142 goto done;
1181 } 1143 }
1182 priv = netdev_priv(dev); 1144 /* TODO? */
1183 dev->ml_priv = priv; 1145 wdev->iftype = NL80211_IFTYPE_STATION;
1146 priv = wdev_priv(wdev);
1147 priv->wdev = wdev;
1184 1148
1185 if (lbs_init_adapter(priv)) { 1149 if (lbs_init_adapter(priv)) {
1186 lbs_pr_err("failed to initialize adapter structure.\n"); 1150 lbs_pr_err("failed to initialize adapter structure.\n");
1187 goto err_init_adapter; 1151 goto err_wdev;
1188 } 1152 }
1189 1153
1154 //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
1155 dev = alloc_netdev(0, "wlan%d", ether_setup);
1156 if (!dev) {
1157 dev_err(dmdev, "no memory for network device instance\n");
1158 goto err_adapter;
1159 }
1160
1161 dev->ieee80211_ptr = wdev;
1162 dev->ml_priv = priv;
1163 SET_NETDEV_DEV(dev, dmdev);
1164 wdev->netdev = dev;
1190 priv->dev = dev; 1165 priv->dev = dev;
1191 priv->card = card;
1192 priv->mesh_open = 0;
1193 priv->infra_open = 0;
1194 1166
1195 /* Setup the OS Interface to our functions */
1196 dev->netdev_ops = &lbs_netdev_ops; 1167 dev->netdev_ops = &lbs_netdev_ops;
1197 dev->watchdog_timeo = 5 * HZ; 1168 dev->watchdog_timeo = 5 * HZ;
1198 dev->ethtool_ops = &lbs_ethtool_ops; 1169 dev->ethtool_ops = &lbs_ethtool_ops;
@@ -1201,7 +1172,14 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1201#endif 1172#endif
1202 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1173 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1203 1174
1204 SET_NETDEV_DEV(dev, dmdev); 1175
1176 // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ??
1177
1178
1179 priv->card = card;
1180 priv->mesh_open = 0;
1181 priv->infra_open = 0;
1182
1205 1183
1206 priv->rtap_net_dev = NULL; 1184 priv->rtap_net_dev = NULL;
1207 strcpy(dev->name, "wlan%d"); 1185 strcpy(dev->name, "wlan%d");
@@ -1211,7 +1189,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1211 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); 1189 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
1212 if (IS_ERR(priv->main_thread)) { 1190 if (IS_ERR(priv->main_thread)) {
1213 lbs_deb_thread("Error creating main thread.\n"); 1191 lbs_deb_thread("Error creating main thread.\n");
1214 goto err_init_adapter; 1192 goto err_ndev;
1215 } 1193 }
1216 1194
1217 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 1195 priv->work_thread = create_singlethread_workqueue("lbs_worker");
@@ -1228,9 +1206,15 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1228 1206
1229 goto done; 1207 goto done;
1230 1208
1231err_init_adapter: 1209 err_ndev:
1232 lbs_free_adapter(priv);
1233 free_netdev(dev); 1210 free_netdev(dev);
1211
1212 err_adapter:
1213 lbs_free_adapter(priv);
1214
1215 err_wdev:
1216 lbs_cfg_free(priv);
1217
1234 priv = NULL; 1218 priv = NULL;
1235 1219
1236done: 1220done:
@@ -1243,7 +1227,6 @@ EXPORT_SYMBOL_GPL(lbs_add_card);
1243void lbs_remove_card(struct lbs_private *priv) 1227void lbs_remove_card(struct lbs_private *priv)
1244{ 1228{
1245 struct net_device *dev = priv->dev; 1229 struct net_device *dev = priv->dev;
1246 union iwreq_data wrqu;
1247 1230
1248 lbs_deb_enter(LBS_DEB_MAIN); 1231 lbs_deb_enter(LBS_DEB_MAIN);
1249 1232
@@ -1268,15 +1251,19 @@ void lbs_remove_card(struct lbs_private *priv)
1268 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); 1251 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
1269 } 1252 }
1270 1253
1271 memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); 1254 lbs_send_disconnect_notification(priv);
1272 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1255
1273 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 1256 if (priv->is_deep_sleep) {
1257 priv->is_deep_sleep = 0;
1258 wake_up_interruptible(&priv->ds_awake_q);
1259 }
1274 1260
1275 /* Stop the thread servicing the interrupts */ 1261 /* Stop the thread servicing the interrupts */
1276 priv->surpriseremoved = 1; 1262 priv->surpriseremoved = 1;
1277 kthread_stop(priv->main_thread); 1263 kthread_stop(priv->main_thread);
1278 1264
1279 lbs_free_adapter(priv); 1265 lbs_free_adapter(priv);
1266 lbs_cfg_free(priv);
1280 1267
1281 priv->dev = NULL; 1268 priv->dev = NULL;
1282 free_netdev(dev); 1269 free_netdev(dev);
@@ -1298,11 +1285,8 @@ int lbs_start_card(struct lbs_private *priv)
1298 if (ret) 1285 if (ret)
1299 goto done; 1286 goto done;
1300 1287
1301 /* init 802.11d */ 1288 if (lbs_cfg_register(priv)) {
1302 lbs_init_11d(priv); 1289 lbs_pr_err("cannot register device\n");
1303
1304 if (register_netdev(dev)) {
1305 lbs_pr_err("cannot register ethX device\n");
1306 goto done; 1290 goto done;
1307 } 1291 }
1308 1292
@@ -1327,10 +1311,10 @@ int lbs_start_card(struct lbs_private *priv)
1327 1311
1328 priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID; 1312 priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
1329 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1313 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1330 priv->curbssparams.channel)) { 1314 priv->channel)) {
1331 priv->mesh_tlv = TLV_TYPE_MESH_ID; 1315 priv->mesh_tlv = TLV_TYPE_MESH_ID;
1332 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1316 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1333 priv->curbssparams.channel)) 1317 priv->channel))
1334 priv->mesh_tlv = 0; 1318 priv->mesh_tlv = 0;
1335 } 1319 }
1336 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 1320 } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
@@ -1339,7 +1323,7 @@ int lbs_start_card(struct lbs_private *priv)
1339 */ 1323 */
1340 priv->mesh_tlv = TLV_TYPE_MESH_ID; 1324 priv->mesh_tlv = TLV_TYPE_MESH_ID;
1341 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1325 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1342 priv->curbssparams.channel)) 1326 priv->channel))
1343 priv->mesh_tlv = 0; 1327 priv->mesh_tlv = 0;
1344 } 1328 }
1345 if (priv->mesh_tlv) { 1329 if (priv->mesh_tlv) {
@@ -1392,6 +1376,7 @@ void lbs_stop_card(struct lbs_private *priv)
1392 1376
1393 /* Delete the timeout of the currently processing command */ 1377 /* Delete the timeout of the currently processing command */
1394 del_timer_sync(&priv->command_timer); 1378 del_timer_sync(&priv->command_timer);
1379 del_timer_sync(&priv->auto_deepsleep_timer);
1395 1380
1396 /* Flush pending command nodes */ 1381 /* Flush pending command nodes */
1397 spin_lock_irqsave(&priv->driver_lock, flags); 1382 spin_lock_irqsave(&priv->driver_lock, flags);
@@ -1509,68 +1494,6 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1509 lbs_deb_leave(LBS_DEB_MESH); 1494 lbs_deb_leave(LBS_DEB_MESH);
1510} 1495}
1511 1496
1512/**
1513 * @brief This function finds the CFP in
1514 * region_cfp_table based on region and band parameter.
1515 *
1516 * @param region The region code
1517 * @param band The band
1518 * @param cfp_no A pointer to CFP number
1519 * @return A pointer to CFP
1520 */
1521struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
1522{
1523 int i, end;
1524
1525 lbs_deb_enter(LBS_DEB_MAIN);
1526
1527 end = ARRAY_SIZE(region_cfp_table);
1528
1529 for (i = 0; i < end ; i++) {
1530 lbs_deb_main("region_cfp_table[i].region=%d\n",
1531 region_cfp_table[i].region);
1532 if (region_cfp_table[i].region == region) {
1533 *cfp_no = region_cfp_table[i].cfp_no_BG;
1534 lbs_deb_leave(LBS_DEB_MAIN);
1535 return region_cfp_table[i].cfp_BG;
1536 }
1537 }
1538
1539 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
1540 return NULL;
1541}
1542
1543int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
1544{
1545 int ret = 0;
1546 int i = 0;
1547
1548 struct chan_freq_power *cfp;
1549 int cfp_no;
1550
1551 lbs_deb_enter(LBS_DEB_MAIN);
1552
1553 memset(priv->region_channel, 0, sizeof(priv->region_channel));
1554
1555 cfp = lbs_get_region_cfp_table(region, &cfp_no);
1556 if (cfp != NULL) {
1557 priv->region_channel[i].nrcfp = cfp_no;
1558 priv->region_channel[i].CFP = cfp;
1559 } else {
1560 lbs_deb_main("wrong region code %#x in band B/G\n",
1561 region);
1562 ret = -1;
1563 goto out;
1564 }
1565 priv->region_channel[i].valid = 1;
1566 priv->region_channel[i].region = region;
1567 priv->region_channel[i].band = band;
1568 i++;
1569out:
1570 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
1571 return ret;
1572}
1573
1574void lbs_queue_event(struct lbs_private *priv, u32 event) 1497void lbs_queue_event(struct lbs_private *priv, u32 event)
1575{ 1498{
1576 unsigned long flags; 1499 unsigned long flags;
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 18fe29faf99b..871f914a75fc 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -187,9 +187,9 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
187 if (ret) 187 if (ret)
188 return ret; 188 return ret;
189 189
190 if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) { 190 if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) {
191 lbs_pr_err("inconsistent mesh ID length"); 191 lbs_pr_err("inconsistent mesh ID length");
192 defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE; 192 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
193 } 193 }
194 194
195 /* SSID not null terminated: reserve room for \0 + \n */ 195 /* SSID not null terminated: reserve room for \0 + \n */
@@ -214,7 +214,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
214 int len; 214 int len;
215 int ret; 215 int ret;
216 216
217 if (count < 2 || count > IW_ESSID_MAX_SIZE + 1) 217 if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1)
218 return -EINVAL; 218 return -EINVAL;
219 219
220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); 220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
@@ -233,7 +233,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
233 /* SSID len */ 233 /* SSID len */
234 ie->val.mesh_id_len = len; 234 ie->val.mesh_id_len = len;
235 /* IE len */ 235 /* IE len */
236 ie->len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len; 236 ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len;
237 237
238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, 238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
239 CMD_TYPE_MESH_SET_MESH_IE); 239 CMD_TYPE_MESH_SET_MESH_IE);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 65f02cc6752f..9f18a19cc49d 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,7 +4,7 @@
4#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7#include "hostcmd.h" 7#include "host.h"
8#include "radiotap.h" 8#include "radiotap.h"
9#include "decl.h" 9#include "decl.h"
10#include "dev.h" 10#include "dev.h"
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 6c95af3023cc..c6a6c042b82f 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -12,18 +12,19 @@
12#include <net/lib80211.h> 12#include <net/lib80211.h>
13 13
14#include "host.h" 14#include "host.h"
15#include "decl.h"
16#include "dev.h" 15#include "dev.h"
17#include "scan.h" 16#include "scan.h"
17#include "assoc.h"
18#include "wext.h"
18#include "cmd.h" 19#include "cmd.h"
19 20
20//! Approximate amount of data needed to pass a scan result back to iwlist 21//! Approximate amount of data needed to pass a scan result back to iwlist
21#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ 22#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \
22 + IW_ESSID_MAX_SIZE \ 23 + IEEE80211_MAX_SSID_LEN \
23 + IW_EV_UINT_LEN \ 24 + IW_EV_UINT_LEN \
24 + IW_EV_FREQ_LEN \ 25 + IW_EV_FREQ_LEN \
25 + IW_EV_QUAL_LEN \ 26 + IW_EV_QUAL_LEN \
26 + IW_ESSID_MAX_SIZE \ 27 + IEEE80211_MAX_SSID_LEN \
27 + IW_EV_PARAM_LEN \ 28 + IW_EV_PARAM_LEN \
28 + 40) /* 40 for WPAIE */ 29 + 40) /* 40 for WPAIE */
29 30
@@ -121,6 +122,189 @@ static inline int is_same_network(struct bss_descriptor *src,
121 122
122 123
123 124
125/*********************************************************************/
126/* */
127/* Region channel support */
128/* */
129/*********************************************************************/
130
131#define LBS_TX_PWR_DEFAULT 20 /*100mW */
132#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
133#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
134#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
135#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
136
137/* Format { channel, frequency (MHz), maxtxpower } */
138/* band: 'B/G', region: USA FCC/Canada IC */
139static struct chan_freq_power channel_freq_power_US_BG[] = {
140 {1, 2412, LBS_TX_PWR_US_DEFAULT},
141 {2, 2417, LBS_TX_PWR_US_DEFAULT},
142 {3, 2422, LBS_TX_PWR_US_DEFAULT},
143 {4, 2427, LBS_TX_PWR_US_DEFAULT},
144 {5, 2432, LBS_TX_PWR_US_DEFAULT},
145 {6, 2437, LBS_TX_PWR_US_DEFAULT},
146 {7, 2442, LBS_TX_PWR_US_DEFAULT},
147 {8, 2447, LBS_TX_PWR_US_DEFAULT},
148 {9, 2452, LBS_TX_PWR_US_DEFAULT},
149 {10, 2457, LBS_TX_PWR_US_DEFAULT},
150 {11, 2462, LBS_TX_PWR_US_DEFAULT}
151};
152
153/* band: 'B/G', region: Europe ETSI */
154static struct chan_freq_power channel_freq_power_EU_BG[] = {
155 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
156 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
157 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
158 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
159 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
160 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
161 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
162 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
163 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
164 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
165 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
166 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
167 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
168};
169
170/* band: 'B/G', region: Spain */
171static struct chan_freq_power channel_freq_power_SPN_BG[] = {
172 {10, 2457, LBS_TX_PWR_DEFAULT},
173 {11, 2462, LBS_TX_PWR_DEFAULT}
174};
175
176/* band: 'B/G', region: France */
177static struct chan_freq_power channel_freq_power_FR_BG[] = {
178 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
179 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
180 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
181 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
182};
183
184/* band: 'B/G', region: Japan */
185static struct chan_freq_power channel_freq_power_JPN_BG[] = {
186 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
187 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
188 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
189 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
190 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
191 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
192 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
193 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
194 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
195 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
196 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
197 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
198 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
199 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
200};
201
202/**
203 * the structure for channel, frequency and power
204 */
205struct region_cfp_table {
206 u8 region;
207 struct chan_freq_power *cfp_BG;
208 int cfp_no_BG;
209};
210
211/**
212 * the structure for the mapping between region and CFP
213 */
214static struct region_cfp_table region_cfp_table[] = {
215 {0x10, /*US FCC */
216 channel_freq_power_US_BG,
217 ARRAY_SIZE(channel_freq_power_US_BG),
218 }
219 ,
220 {0x20, /*CANADA IC */
221 channel_freq_power_US_BG,
222 ARRAY_SIZE(channel_freq_power_US_BG),
223 }
224 ,
225 {0x30, /*EU*/ channel_freq_power_EU_BG,
226 ARRAY_SIZE(channel_freq_power_EU_BG),
227 }
228 ,
229 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
230 ARRAY_SIZE(channel_freq_power_SPN_BG),
231 }
232 ,
233 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
234 ARRAY_SIZE(channel_freq_power_FR_BG),
235 }
236 ,
237 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
238 ARRAY_SIZE(channel_freq_power_JPN_BG),
239 }
240 ,
241/*Add new region here */
242};
243
244/**
245 * @brief This function finds the CFP in
246 * region_cfp_table based on region and band parameter.
247 *
248 * @param region The region code
249 * @param band The band
250 * @param cfp_no A pointer to CFP number
251 * @return A pointer to CFP
252 */
253static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
254{
255 int i, end;
256
257 lbs_deb_enter(LBS_DEB_MAIN);
258
259 end = ARRAY_SIZE(region_cfp_table);
260
261 for (i = 0; i < end ; i++) {
262 lbs_deb_main("region_cfp_table[i].region=%d\n",
263 region_cfp_table[i].region);
264 if (region_cfp_table[i].region == region) {
265 *cfp_no = region_cfp_table[i].cfp_no_BG;
266 lbs_deb_leave(LBS_DEB_MAIN);
267 return region_cfp_table[i].cfp_BG;
268 }
269 }
270
271 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
272 return NULL;
273}
274
275int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
276{
277 int ret = 0;
278 int i = 0;
279
280 struct chan_freq_power *cfp;
281 int cfp_no;
282
283 lbs_deb_enter(LBS_DEB_MAIN);
284
285 memset(priv->region_channel, 0, sizeof(priv->region_channel));
286
287 cfp = lbs_get_region_cfp_table(region, &cfp_no);
288 if (cfp != NULL) {
289 priv->region_channel[i].nrcfp = cfp_no;
290 priv->region_channel[i].CFP = cfp;
291 } else {
292 lbs_deb_main("wrong region code %#x in band B/G\n",
293 region);
294 ret = -1;
295 goto out;
296 }
297 priv->region_channel[i].valid = 1;
298 priv->region_channel[i].region = region;
299 priv->region_channel[i].band = band;
300 i++;
301out:
302 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
303 return ret;
304}
305
306
307
124 308
125/*********************************************************************/ 309/*********************************************************************/
126/* */ 310/* */
@@ -161,31 +345,15 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
161 scantype = CMD_SCAN_TYPE_ACTIVE; 345 scantype = CMD_SCAN_TYPE_ACTIVE;
162 346
163 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) { 347 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
164 if (priv->enable11d && (priv->connect_status != LBS_CONNECTED) 348 if (!priv->region_channel[rgnidx].valid)
165 && (priv->mesh_connect_status != LBS_CONNECTED)) { 349 continue;
166 /* Scan all the supported chan for the first scan */ 350 scanregion = &priv->region_channel[rgnidx];
167 if (!priv->universal_channel[rgnidx].valid)
168 continue;
169 scanregion = &priv->universal_channel[rgnidx];
170
171 /* clear the parsed_region_chan for the first scan */
172 memset(&priv->parsed_region_chan, 0x00,
173 sizeof(priv->parsed_region_chan));
174 } else {
175 if (!priv->region_channel[rgnidx].valid)
176 continue;
177 scanregion = &priv->region_channel[rgnidx];
178 }
179 351
180 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) { 352 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
181 struct chanscanparamset *chan = &scanchanlist[chanidx]; 353 struct chanscanparamset *chan = &scanchanlist[chanidx];
182 354
183 cfp = scanregion->CFP + nextchan; 355 cfp = scanregion->CFP + nextchan;
184 356
185 if (priv->enable11d)
186 scantype = lbs_get_scan_type_11d(cfp->channel,
187 &priv->parsed_region_chan);
188
189 if (scanregion->band == BAND_B || scanregion->band == BAND_G) 357 if (scanregion->band == BAND_B || scanregion->band == BAND_G)
190 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG; 358 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
191 359
@@ -519,7 +687,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
519 struct ieee_ie_cf_param_set *cf; 687 struct ieee_ie_cf_param_set *cf;
520 struct ieee_ie_ibss_param_set *ibss; 688 struct ieee_ie_ibss_param_set *ibss;
521 DECLARE_SSID_BUF(ssid); 689 DECLARE_SSID_BUF(ssid);
522 struct ieee_ie_country_info_set *pcountryinfo;
523 uint8_t *pos, *end, *p; 690 uint8_t *pos, *end, *p;
524 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; 691 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
525 uint16_t beaconsize = 0; 692 uint16_t beaconsize = 0;
@@ -642,26 +809,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
642 lbs_deb_scan("got IBSS IE\n"); 809 lbs_deb_scan("got IBSS IE\n");
643 break; 810 break;
644 811
645 case WLAN_EID_COUNTRY:
646 pcountryinfo = (struct ieee_ie_country_info_set *) pos;
647 lbs_deb_scan("got COUNTRY IE\n");
648 if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
649 || pcountryinfo->header.len > 254) {
650 lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
651 __func__,
652 pcountryinfo->header.len,
653 sizeof(pcountryinfo->countrycode));
654 ret = -1;
655 goto done;
656 }
657
658 memcpy(&bss->countryinfo, pcountryinfo,
659 pcountryinfo->header.len + 2);
660 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
661 (uint8_t *) pcountryinfo,
662 (int) (pcountryinfo->header.len + 2));
663 break;
664
665 case WLAN_EID_EXT_SUPP_RATES: 812 case WLAN_EID_EXT_SUPP_RATES:
666 /* only process extended supported rate if data rate is 813 /* only process extended supported rate if data rate is
667 * already found. Data rate IE should come before 814 * already found. Data rate IE should come before
@@ -812,7 +959,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
812 /* SSID */ 959 /* SSID */
813 iwe.cmd = SIOCGIWESSID; 960 iwe.cmd = SIOCGIWESSID;
814 iwe.u.data.flags = 1; 961 iwe.u.data.flags = 1;
815 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IW_ESSID_MAX_SIZE); 962 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
816 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); 963 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
817 964
818 /* Mode */ 965 /* Mode */
@@ -1022,9 +1169,12 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1022 return -EAGAIN; 1169 return -EAGAIN;
1023 1170
1024 /* Update RSSI if current BSS is a locally created ad-hoc BSS */ 1171 /* Update RSSI if current BSS is a locally created ad-hoc BSS */
1025 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) 1172 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
1026 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 1173 err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
1027 CMD_OPTION_WAITFORRSP, 0, NULL); 1174 CMD_OPTION_WAITFORRSP, 0, NULL);
1175 if (err)
1176 goto out;
1177 }
1028 1178
1029 mutex_lock(&priv->lock); 1179 mutex_lock(&priv->lock);
1030 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { 1180 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
@@ -1058,7 +1208,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1058 1208
1059 dwrq->length = (ev - extra); 1209 dwrq->length = (ev - extra);
1060 dwrq->flags = 0; 1210 dwrq->flags = 0;
1061 1211out:
1062 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err); 1212 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
1063 return err; 1213 return err;
1064} 1214}
@@ -1141,11 +1291,11 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1141 /* The size of the TLV buffer is equal to the entire command response 1291 /* The size of the TLV buffer is equal to the entire command response
1142 * size (scanrespsize) minus the fixed fields (sizeof()'s), the 1292 * size (scanrespsize) minus the fixed fields (sizeof()'s), the
1143 * BSS Descriptions (bssdescriptsize as bytesLef) and the command 1293 * BSS Descriptions (bssdescriptsize as bytesLef) and the command
1144 * response header (S_DS_GEN) 1294 * response header (sizeof(struct cmd_header))
1145 */ 1295 */
1146 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize) 1296 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
1147 + sizeof(scanresp->nr_sets) 1297 + sizeof(scanresp->nr_sets)
1148 + S_DS_GEN); 1298 + sizeof(struct cmd_header));
1149 1299
1150 /* 1300 /*
1151 * Process each scan response returned (scanresp->nr_sets). Save 1301 * Process each scan response returned (scanresp->nr_sets). Save
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index fab7d5d097fc..8fb1706d7526 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -9,8 +9,36 @@
9 9
10#include <net/iw_handler.h> 10#include <net/iw_handler.h>
11 11
12struct lbs_private;
13
12#define MAX_NETWORK_COUNT 128 14#define MAX_NETWORK_COUNT 128
13 15
16/** Chan-freq-TxPower mapping table*/
17struct chan_freq_power {
18 /** channel Number */
19 u16 channel;
20 /** frequency of this channel */
21 u32 freq;
22 /** Max allowed Tx power level */
23 u16 maxtxpower;
24 /** TRUE:channel unsupported; FLASE:supported*/
25 u8 unsupported;
26};
27
28/** region-band mapping table*/
29struct region_channel {
30 /** TRUE if this entry is valid */
31 u8 valid;
32 /** region code for US, Japan ... */
33 u8 region;
34 /** band B/G/A, used for BAND_CONFIG cmd */
35 u8 band;
36 /** Actual No. of elements in the array below */
37 u8 nrcfp;
38 /** chan-freq-txpower mapping table*/
39 struct chan_freq_power *CFP;
40};
41
14/** 42/**
15 * @brief Maximum number of channels that can be sent in a setuserscan ioctl 43 * @brief Maximum number of channels that can be sent in a setuserscan ioctl
16 */ 44 */
@@ -18,6 +46,8 @@
18 46
19int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); 47int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
20 48
49int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
50
21int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, 51int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
22 u8 ssid_len); 52 u8 ssid_len);
23 53
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 8c3766a6e8e7..eb856adbf8ea 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -5,7 +5,7 @@
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7 7
8#include "hostcmd.h" 8#include "host.h"
9#include "radiotap.h" 9#include "radiotap.h"
10#include "decl.h" 10#include "decl.h"
11#include "defs.h" 11#include "defs.h"
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 99905df65b25..3e72c86ceca8 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -5,8 +5,8 @@
5#define _LBS_TYPES_H_ 5#define _LBS_TYPES_H_
6 6
7#include <linux/if_ether.h> 7#include <linux/if_ether.h>
8#include <linux/ieee80211.h>
8#include <asm/byteorder.h> 9#include <asm/byteorder.h>
9#include <linux/wireless.h>
10 10
11struct ieee_ie_header { 11struct ieee_ie_header {
12 u8 id; 12 u8 id;
@@ -247,7 +247,7 @@ struct mrvl_meshie_val {
247 uint8_t active_metric_id; 247 uint8_t active_metric_id;
248 uint8_t mesh_capability; 248 uint8_t mesh_capability;
249 uint8_t mesh_id_len; 249 uint8_t mesh_id_len;
250 uint8_t mesh_id[IW_ESSID_MAX_SIZE]; 250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
251} __attribute__ ((packed)); 251} __attribute__ ((packed));
252 252
253struct mrvl_meshie { 253struct mrvl_meshie {
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index be837a0d2517..a8eb9e1fcf36 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -45,6 +45,63 @@ static inline void lbs_cancel_association_work(struct lbs_private *priv)
45 priv->pending_assoc_req = NULL; 45 priv->pending_assoc_req = NULL;
46} 46}
47 47
48void lbs_send_disconnect_notification(struct lbs_private *priv)
49{
50 union iwreq_data wrqu;
51
52 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
53 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
54 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
55}
56
57static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
58{
59 union iwreq_data iwrq;
60 u8 buf[50];
61
62 lbs_deb_enter(LBS_DEB_WEXT);
63
64 memset(&iwrq, 0, sizeof(union iwreq_data));
65 memset(buf, 0, sizeof(buf));
66
67 snprintf(buf, sizeof(buf) - 1, "%s", str);
68
69 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
70
71 /* Send Event to upper layer */
72 lbs_deb_wext("event indication string %s\n", (char *)buf);
73 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
74 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
75
76 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
77
78 lbs_deb_leave(LBS_DEB_WEXT);
79}
80
81/**
82 * @brief This function handles MIC failure event.
83 *
84 * @param priv A pointer to struct lbs_private structure
85 * @para event the event id
86 * @return n/a
87 */
88void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
89{
90 char buf[50];
91
92 lbs_deb_enter(LBS_DEB_CMD);
93 memset(buf, 0, sizeof(buf));
94
95 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
96
97 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
98 strcat(buf, "unicast ");
99 else
100 strcat(buf, "multicast ");
101
102 lbs_send_iwevcustom_event(priv, buf);
103 lbs_deb_leave(LBS_DEB_CMD);
104}
48 105
49/** 106/**
50 * @brief Find the channel frequency power info with specific channel 107 * @brief Find the channel frequency power info with specific channel
@@ -66,8 +123,6 @@ struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
66 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { 123 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
67 rc = &priv->region_channel[j]; 124 rc = &priv->region_channel[j];
68 125
69 if (priv->enable11d)
70 rc = &priv->universal_channel[j];
71 if (!rc->valid || !rc->CFP) 126 if (!rc->valid || !rc->CFP)
72 continue; 127 continue;
73 if (rc->band != band) 128 if (rc->band != band)
@@ -107,8 +162,6 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(
107 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { 162 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
108 rc = &priv->region_channel[j]; 163 rc = &priv->region_channel[j];
109 164
110 if (priv->enable11d)
111 rc = &priv->universal_channel[j];
112 if (!rc->valid || !rc->CFP) 165 if (!rc->valid || !rc->CFP)
113 continue; 166 continue;
114 if (rc->band != band) 167 if (rc->band != band)
@@ -169,12 +222,12 @@ static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
169 lbs_deb_enter(LBS_DEB_WEXT); 222 lbs_deb_enter(LBS_DEB_WEXT);
170 223
171 cfp = lbs_find_cfp_by_band_and_channel(priv, 0, 224 cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
172 priv->curbssparams.channel); 225 priv->channel);
173 226
174 if (!cfp) { 227 if (!cfp) {
175 if (priv->curbssparams.channel) 228 if (priv->channel)
176 lbs_deb_wext("invalid channel %d\n", 229 lbs_deb_wext("invalid channel %d\n",
177 priv->curbssparams.channel); 230 priv->channel);
178 return -EINVAL; 231 return -EINVAL;
179 } 232 }
180 233
@@ -547,8 +600,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
547 struct chan_freq_power *cfp; 600 struct chan_freq_power *cfp;
548 u8 rates[MAX_RATES + 1]; 601 u8 rates[MAX_RATES + 1];
549 602
550 u8 flag = 0;
551
552 lbs_deb_enter(LBS_DEB_WEXT); 603 lbs_deb_enter(LBS_DEB_WEXT);
553 604
554 dwrq->length = sizeof(struct iw_range); 605 dwrq->length = sizeof(struct iw_range);
@@ -570,52 +621,21 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
570 621
571 range->scan_capa = IW_SCAN_CAPA_ESSID; 622 range->scan_capa = IW_SCAN_CAPA_ESSID;
572 623
573 if (priv->enable11d && 624 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
574 (priv->connect_status == LBS_CONNECTED || 625 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
575 priv->mesh_connect_status == LBS_CONNECTED)) { 626 cfp = priv->region_channel[j].CFP;
576 u8 chan_no;
577 u8 band;
578
579 struct parsed_region_chan_11d *parsed_region_chan =
580 &priv->parsed_region_chan;
581
582 if (parsed_region_chan == NULL) {
583 lbs_deb_wext("11d: parsed_region_chan is NULL\n");
584 goto out;
585 }
586 band = parsed_region_chan->band;
587 lbs_deb_wext("band %d, nr_char %d\n", band,
588 parsed_region_chan->nr_chan);
589
590 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES) 627 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
591 && (i < parsed_region_chan->nr_chan); i++) { 628 && priv->region_channel[j].valid
592 chan_no = parsed_region_chan->chanpwr[i].chan; 629 && cfp
593 lbs_deb_wext("chan_no %d\n", chan_no); 630 && (i < priv->region_channel[j].nrcfp); i++) {
594 range->freq[range->num_frequency].i = (long)chan_no; 631 range->freq[range->num_frequency].i =
632 (long)cfp->channel;
595 range->freq[range->num_frequency].m = 633 range->freq[range->num_frequency].m =
596 (long)lbs_chan_2_freq(chan_no) * 100000; 634 (long)cfp->freq * 100000;
597 range->freq[range->num_frequency].e = 1; 635 range->freq[range->num_frequency].e = 1;
636 cfp++;
598 range->num_frequency++; 637 range->num_frequency++;
599 } 638 }
600 flag = 1;
601 }
602 if (!flag) {
603 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
604 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
605 cfp = priv->region_channel[j].CFP;
606 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
607 && priv->region_channel[j].valid
608 && cfp
609 && (i < priv->region_channel[j].nrcfp); i++) {
610 range->freq[range->num_frequency].i =
611 (long)cfp->channel;
612 range->freq[range->num_frequency].m =
613 (long)cfp->freq * 100000;
614 range->freq[range->num_frequency].e = 1;
615 cfp++;
616 range->num_frequency++;
617 }
618 }
619 } 639 }
620 640
621 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n", 641 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
@@ -700,7 +720,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
700 | IW_ENC_CAPA_CIPHER_CCMP; 720 | IW_ENC_CAPA_CIPHER_CCMP;
701 } 721 }
702 722
703out:
704 lbs_deb_leave(LBS_DEB_WEXT); 723 lbs_deb_leave(LBS_DEB_WEXT);
705 return 0; 724 return 0;
706} 725}
@@ -709,6 +728,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
709 struct iw_param *vwrq, char *extra) 728 struct iw_param *vwrq, char *extra)
710{ 729{
711 struct lbs_private *priv = dev->ml_priv; 730 struct lbs_private *priv = dev->ml_priv;
731 int ret = 0;
712 732
713 lbs_deb_enter(LBS_DEB_WEXT); 733 lbs_deb_enter(LBS_DEB_WEXT);
714 734
@@ -737,8 +757,54 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
737 "setting power timeout is not supported\n"); 757 "setting power timeout is not supported\n");
738 return -EINVAL; 758 return -EINVAL;
739 } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { 759 } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
740 lbs_deb_wext("setting power period not supported\n"); 760 vwrq->value = vwrq->value / 1000;
741 return -EINVAL; 761 if (!priv->enter_deep_sleep) {
762 lbs_pr_err("deep sleep feature is not implemented "
763 "for this interface driver\n");
764 return -EINVAL;
765 }
766
767 if (priv->connect_status == LBS_CONNECTED) {
768 if ((priv->is_auto_deep_sleep_enabled) &&
769 (vwrq->value == -1000)) {
770 lbs_exit_auto_deep_sleep(priv);
771 return 0;
772 } else {
773 lbs_pr_err("can't use deep sleep cmd in "
774 "connected state\n");
775 return -EINVAL;
776 }
777 }
778
779 if ((vwrq->value < 0) && (vwrq->value != -1000)) {
780 lbs_pr_err("unknown option\n");
781 return -EINVAL;
782 }
783
784 if (vwrq->value > 0) {
785 if (!priv->is_auto_deep_sleep_enabled) {
786 priv->is_activity_detected = 0;
787 priv->auto_deep_sleep_timeout = vwrq->value;
788 lbs_enter_auto_deep_sleep(priv);
789 } else {
790 priv->auto_deep_sleep_timeout = vwrq->value;
791 lbs_deb_debugfs("auto deep sleep: "
792 "already enabled\n");
793 }
794 return 0;
795 } else {
796 if (priv->is_auto_deep_sleep_enabled) {
797 lbs_exit_auto_deep_sleep(priv);
798 /* Try to exit deep sleep if auto */
799 /*deep sleep disabled */
800 ret = lbs_set_deep_sleep(priv, 0);
801 }
802 if (vwrq->value == 0)
803 ret = lbs_set_deep_sleep(priv, 1);
804 else if (vwrq->value == -1000)
805 ret = lbs_set_deep_sleep(priv, 0);
806 return ret;
807 }
742 } 808 }
743 809
744 if (priv->psmode != LBS802_11POWERMODECAM) { 810 if (priv->psmode != LBS802_11POWERMODECAM) {
@@ -752,6 +818,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
752 } 818 }
753 819
754 lbs_deb_leave(LBS_DEB_WEXT); 820 lbs_deb_leave(LBS_DEB_WEXT);
821
755 return 0; 822 return 0;
756} 823}
757 824
@@ -785,7 +852,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
785 u32 rssi_qual; 852 u32 rssi_qual;
786 u32 tx_qual; 853 u32 tx_qual;
787 u32 quality = 0; 854 u32 quality = 0;
788 int stats_valid = 0; 855 int ret, stats_valid = 0;
789 u8 rssi; 856 u8 rssi;
790 u32 tx_retries; 857 u32 tx_retries;
791 struct cmd_ds_802_11_get_log log; 858 struct cmd_ds_802_11_get_log log;
@@ -834,7 +901,9 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
834 901
835 memset(&log, 0, sizeof(log)); 902 memset(&log, 0, sizeof(log));
836 log.hdr.size = cpu_to_le16(sizeof(log)); 903 log.hdr.size = cpu_to_le16(sizeof(log));
837 lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log); 904 ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
905 if (ret)
906 goto out;
838 907
839 tx_retries = le32_to_cpu(log.retry); 908 tx_retries = le32_to_cpu(log.retry);
840 909
@@ -862,8 +931,10 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
862 stats_valid = 1; 931 stats_valid = 1;
863 932
864 /* update stats asynchronously for future calls */ 933 /* update stats asynchronously for future calls */
865 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 934 ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
866 0, 0, NULL); 935 0, 0, NULL);
936 if (ret)
937 lbs_pr_err("RSSI command failed\n");
867out: 938out:
868 if (!stats_valid) { 939 if (!stats_valid) {
869 priv->wstats.miss.beacon = 0; 940 priv->wstats.miss.beacon = 0;
@@ -973,7 +1044,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
973 goto out; 1044 goto out;
974 } 1045 }
975 1046
976 if (fwrq->m != priv->curbssparams.channel) { 1047 if (fwrq->m != priv->channel) {
977 lbs_deb_wext("mesh channel change forces eth disconnect\n"); 1048 lbs_deb_wext("mesh channel change forces eth disconnect\n");
978 if (priv->mode == IW_MODE_INFRA) 1049 if (priv->mode == IW_MODE_INFRA)
979 lbs_cmd_80211_deauthenticate(priv, 1050 lbs_cmd_80211_deauthenticate(priv,
@@ -1000,6 +1071,7 @@ static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1000 u8 rates[MAX_RATES + 1]; 1071 u8 rates[MAX_RATES + 1];
1001 1072
1002 lbs_deb_enter(LBS_DEB_WEXT); 1073 lbs_deb_enter(LBS_DEB_WEXT);
1074
1003 lbs_deb_wext("vwrq->value %d\n", vwrq->value); 1075 lbs_deb_wext("vwrq->value %d\n", vwrq->value);
1004 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed); 1076 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
1005 1077
@@ -1975,7 +2047,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1975{ 2047{
1976 struct lbs_private *priv = dev->ml_priv; 2048 struct lbs_private *priv = dev->ml_priv;
1977 int ret = 0; 2049 int ret = 0;
1978 u8 ssid[IW_ESSID_MAX_SIZE]; 2050 u8 ssid[IEEE80211_MAX_SSID_LEN];
1979 u8 ssid_len = 0; 2051 u8 ssid_len = 0;
1980 struct assoc_request * assoc_req; 2052 struct assoc_request * assoc_req;
1981 int in_ssid_len = dwrq->length; 2053 int in_ssid_len = dwrq->length;
@@ -1989,7 +2061,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1989 } 2061 }
1990 2062
1991 /* Check the size of the string */ 2063 /* Check the size of the string */
1992 if (in_ssid_len > IW_ESSID_MAX_SIZE) { 2064 if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
1993 ret = -E2BIG; 2065 ret = -E2BIG;
1994 goto out; 2066 goto out;
1995 } 2067 }
@@ -2020,7 +2092,7 @@ out:
2020 ret = -ENOMEM; 2092 ret = -ENOMEM;
2021 } else { 2093 } else {
2022 /* Copy the SSID to the association request */ 2094 /* Copy the SSID to the association request */
2023 memcpy(&assoc_req->ssid, &ssid, IW_ESSID_MAX_SIZE); 2095 memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
2024 assoc_req->ssid_len = ssid_len; 2096 assoc_req->ssid_len = ssid_len;
2025 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); 2097 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
2026 lbs_postpone_association_work(priv); 2098 lbs_postpone_association_work(priv);
@@ -2071,7 +2143,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2071 } 2143 }
2072 2144
2073 /* Check the size of the string */ 2145 /* Check the size of the string */
2074 if (dwrq->length > IW_ESSID_MAX_SIZE) { 2146 if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
2075 ret = -E2BIG; 2147 ret = -E2BIG;
2076 goto out; 2148 goto out;
2077 } 2149 }
@@ -2086,7 +2158,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2086 } 2158 }
2087 2159
2088 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 2160 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
2089 priv->curbssparams.channel); 2161 priv->channel);
2090 out: 2162 out:
2091 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2163 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2092 return ret; 2164 return ret;
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
index 4c08db497606..7863baf7d234 100644
--- a/drivers/net/wireless/libertas/wext.h
+++ b/drivers/net/wireless/libertas/wext.h
@@ -4,7 +4,15 @@
4#ifndef _LBS_WEXT_H_ 4#ifndef _LBS_WEXT_H_
5#define _LBS_WEXT_H_ 5#define _LBS_WEXT_H_
6 6
7void lbs_send_disconnect_notification(struct lbs_private *priv);
8void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
9
7extern struct iw_handler_def lbs_handler_def; 10extern struct iw_handler_def lbs_handler_def;
8extern struct iw_handler_def mesh_handler_def; 11extern struct iw_handler_def mesh_handler_def;
9 12
13struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
14 struct lbs_private *priv,
15 u8 band,
16 u16 channel);
17
10#endif 18#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 392337b37b1d..3691c307e674 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -23,6 +23,8 @@
23static char *lbtf_fw_name = "lbtf_usb.bin"; 23static char *lbtf_fw_name = "lbtf_usb.bin";
24module_param_named(fw_name, lbtf_fw_name, charp, 0644); 24module_param_named(fw_name, lbtf_fw_name, charp, 0644);
25 25
26MODULE_FIRMWARE("lbtf_usb.bin");
27
26static struct usb_device_id if_usb_table[] = { 28static struct usb_device_id if_usb_table[] = {
27 /* Enter the device signature inside */ 29 /* Enter the device signature inside */
28 { USB_DEVICE(0x1286, 0x2001) }, 30 { USB_DEVICE(0x1286, 0x2001) },
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 38cfd79e0590..88e41176e7fd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -284,7 +284,7 @@ struct mac80211_hwsim_data {
284 struct ieee80211_channel *channel; 284 struct ieee80211_channel *channel;
285 unsigned long beacon_int; /* in jiffies unit */ 285 unsigned long beacon_int; /* in jiffies unit */
286 unsigned int rx_filter; 286 unsigned int rx_filter;
287 int started; 287 bool started, idle;
288 struct timer_list beacon_timer; 288 struct timer_list beacon_timer;
289 enum ps_mode { 289 enum ps_mode {
290 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL 290 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -365,6 +365,49 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
365} 365}
366 366
367 367
368static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
369{
370 struct mac80211_hwsim_data *data = hw->priv;
371 struct sk_buff *skb;
372 struct hwsim_radiotap_hdr *hdr;
373 u16 flags;
374 struct ieee80211_hdr *hdr11;
375
376 if (!netif_running(hwsim_mon))
377 return;
378
379 skb = dev_alloc_skb(100);
380 if (skb == NULL)
381 return;
382
383 hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr));
384 hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
385 hdr->hdr.it_pad = 0;
386 hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
387 hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
388 (1 << IEEE80211_RADIOTAP_CHANNEL));
389 hdr->rt_flags = 0;
390 hdr->rt_rate = 0;
391 hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
392 flags = IEEE80211_CHAN_2GHZ;
393 hdr->rt_chbitmask = cpu_to_le16(flags);
394
395 hdr11 = (struct ieee80211_hdr *) skb_put(skb, 10);
396 hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
397 IEEE80211_STYPE_ACK);
398 hdr11->duration_id = cpu_to_le16(0);
399 memcpy(hdr11->addr1, addr, ETH_ALEN);
400
401 skb->dev = hwsim_mon;
402 skb_set_mac_header(skb, 0);
403 skb->ip_summed = CHECKSUM_UNNECESSARY;
404 skb->pkt_type = PACKET_OTHERHOST;
405 skb->protocol = htons(ETH_P_802_2);
406 memset(skb->cb, 0, sizeof(skb->cb));
407 netif_rx(skb);
408}
409
410
368static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, 411static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
369 struct sk_buff *skb) 412 struct sk_buff *skb)
370{ 413{
@@ -402,6 +445,12 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
402 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 445 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
403 struct ieee80211_rx_status rx_status; 446 struct ieee80211_rx_status rx_status;
404 447
448 if (data->idle) {
449 printk(KERN_DEBUG "%s: Trying to TX when idle - reject\n",
450 wiphy_name(hw->wiphy));
451 return false;
452 }
453
405 memset(&rx_status, 0, sizeof(rx_status)); 454 memset(&rx_status, 0, sizeof(rx_status));
406 /* TODO: set mactime */ 455 /* TODO: set mactime */
407 rx_status.freq = data->channel->center_freq; 456 rx_status.freq = data->channel->center_freq;
@@ -428,7 +477,8 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
428 if (data == data2) 477 if (data == data2)
429 continue; 478 continue;
430 479
431 if (!data2->started || !hwsim_ps_rx_ok(data2, skb) || 480 if (data2->idle || !data2->started ||
481 !hwsim_ps_rx_ok(data2, skb) ||
432 !data->channel || !data2->channel || 482 !data->channel || !data2->channel ||
433 data->channel->center_freq != data2->channel->center_freq || 483 data->channel->center_freq != data2->channel->center_freq ||
434 !(data->group & data2->group)) 484 !(data->group & data2->group))
@@ -464,6 +514,10 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
464 } 514 }
465 515
466 ack = mac80211_hwsim_tx_frame(hw, skb); 516 ack = mac80211_hwsim_tx_frame(hw, skb);
517 if (ack && skb->len >= 16) {
518 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
519 mac80211_hwsim_monitor_ack(hw, hdr->addr2);
520 }
467 521
468 txi = IEEE80211_SKB_CB(skb); 522 txi = IEEE80211_SKB_CB(skb);
469 523
@@ -571,6 +625,8 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
571 !!(conf->flags & IEEE80211_CONF_IDLE), 625 !!(conf->flags & IEEE80211_CONF_IDLE),
572 !!(conf->flags & IEEE80211_CONF_PS)); 626 !!(conf->flags & IEEE80211_CONF_PS));
573 627
628 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
629
574 data->channel = conf->channel; 630 data->channel = conf->channel;
575 if (!data->started || !data->beacon_int) 631 if (!data->started || !data->beacon_int)
576 del_timer(&data->beacon_timer); 632 del_timer(&data->beacon_timer);
@@ -1045,19 +1101,20 @@ static int __init init_mac80211_hwsim(void)
1045 sband->channels = data->channels_2ghz; 1101 sband->channels = data->channels_2ghz;
1046 sband->n_channels = 1102 sband->n_channels =
1047 ARRAY_SIZE(hwsim_channels_2ghz); 1103 ARRAY_SIZE(hwsim_channels_2ghz);
1104 sband->bitrates = data->rates;
1105 sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
1048 break; 1106 break;
1049 case IEEE80211_BAND_5GHZ: 1107 case IEEE80211_BAND_5GHZ:
1050 sband->channels = data->channels_5ghz; 1108 sband->channels = data->channels_5ghz;
1051 sband->n_channels = 1109 sband->n_channels =
1052 ARRAY_SIZE(hwsim_channels_5ghz); 1110 ARRAY_SIZE(hwsim_channels_5ghz);
1111 sband->bitrates = data->rates + 4;
1112 sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
1053 break; 1113 break;
1054 default: 1114 default:
1055 break; 1115 break;
1056 } 1116 }
1057 1117
1058 sband->bitrates = data->rates;
1059 sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
1060
1061 sband->ht_cap.ht_supported = true; 1118 sband->ht_cap.ht_supported = true;
1062 sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 1119 sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
1063 IEEE80211_HT_CAP_GRN_FLD | 1120 IEEE80211_HT_CAP_GRN_FLD |
@@ -1089,46 +1146,46 @@ static int __init init_mac80211_hwsim(void)
1089 break; 1146 break;
1090 case HWSIM_REGTEST_WORLD_ROAM: 1147 case HWSIM_REGTEST_WORLD_ROAM:
1091 if (i == 0) { 1148 if (i == 0) {
1092 hw->wiphy->custom_regulatory = true; 1149 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1093 wiphy_apply_custom_regulatory(hw->wiphy, 1150 wiphy_apply_custom_regulatory(hw->wiphy,
1094 &hwsim_world_regdom_custom_01); 1151 &hwsim_world_regdom_custom_01);
1095 } 1152 }
1096 break; 1153 break;
1097 case HWSIM_REGTEST_CUSTOM_WORLD: 1154 case HWSIM_REGTEST_CUSTOM_WORLD:
1098 hw->wiphy->custom_regulatory = true; 1155 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1099 wiphy_apply_custom_regulatory(hw->wiphy, 1156 wiphy_apply_custom_regulatory(hw->wiphy,
1100 &hwsim_world_regdom_custom_01); 1157 &hwsim_world_regdom_custom_01);
1101 break; 1158 break;
1102 case HWSIM_REGTEST_CUSTOM_WORLD_2: 1159 case HWSIM_REGTEST_CUSTOM_WORLD_2:
1103 if (i == 0) { 1160 if (i == 0) {
1104 hw->wiphy->custom_regulatory = true; 1161 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1105 wiphy_apply_custom_regulatory(hw->wiphy, 1162 wiphy_apply_custom_regulatory(hw->wiphy,
1106 &hwsim_world_regdom_custom_01); 1163 &hwsim_world_regdom_custom_01);
1107 } else if (i == 1) { 1164 } else if (i == 1) {
1108 hw->wiphy->custom_regulatory = true; 1165 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1109 wiphy_apply_custom_regulatory(hw->wiphy, 1166 wiphy_apply_custom_regulatory(hw->wiphy,
1110 &hwsim_world_regdom_custom_02); 1167 &hwsim_world_regdom_custom_02);
1111 } 1168 }
1112 break; 1169 break;
1113 case HWSIM_REGTEST_STRICT_ALL: 1170 case HWSIM_REGTEST_STRICT_ALL:
1114 hw->wiphy->strict_regulatory = true; 1171 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
1115 break; 1172 break;
1116 case HWSIM_REGTEST_STRICT_FOLLOW: 1173 case HWSIM_REGTEST_STRICT_FOLLOW:
1117 case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: 1174 case HWSIM_REGTEST_STRICT_AND_DRIVER_REG:
1118 if (i == 0) 1175 if (i == 0)
1119 hw->wiphy->strict_regulatory = true; 1176 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
1120 break; 1177 break;
1121 case HWSIM_REGTEST_ALL: 1178 case HWSIM_REGTEST_ALL:
1122 if (i == 0) { 1179 if (i == 0) {
1123 hw->wiphy->custom_regulatory = true; 1180 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1124 wiphy_apply_custom_regulatory(hw->wiphy, 1181 wiphy_apply_custom_regulatory(hw->wiphy,
1125 &hwsim_world_regdom_custom_01); 1182 &hwsim_world_regdom_custom_01);
1126 } else if (i == 1) { 1183 } else if (i == 1) {
1127 hw->wiphy->custom_regulatory = true; 1184 hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
1128 wiphy_apply_custom_regulatory(hw->wiphy, 1185 wiphy_apply_custom_regulatory(hw->wiphy,
1129 &hwsim_world_regdom_custom_02); 1186 &hwsim_world_regdom_custom_02);
1130 } else if (i == 4) 1187 } else if (i == 4)
1131 hw->wiphy->strict_regulatory = true; 1188 hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY;
1132 break; 1189 break;
1133 default: 1190 default:
1134 break; 1191 break;
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 746532ebe5a8..0cb5ecc822a8 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/sched.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/pci.h> 18#include <linux/pci.h>
@@ -27,18 +28,6 @@
27#define MWL8K_NAME KBUILD_MODNAME 28#define MWL8K_NAME KBUILD_MODNAME
28#define MWL8K_VERSION "0.10" 29#define MWL8K_VERSION "0.10"
29 30
30MODULE_DESCRIPTION(MWL8K_DESC);
31MODULE_VERSION(MWL8K_VERSION);
32MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
33MODULE_LICENSE("GPL");
34
35static DEFINE_PCI_DEVICE_TABLE(mwl8k_table) = {
36 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = 8687, },
37 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = 8687, },
38 { }
39};
40MODULE_DEVICE_TABLE(pci, mwl8k_table);
41
42/* Register definitions */ 31/* Register definitions */
43#define MWL8K_HIU_GEN_PTR 0x00000c10 32#define MWL8K_HIU_GEN_PTR 0x00000c10
44#define MWL8K_MODE_STA 0x0000005a 33#define MWL8K_MODE_STA 0x0000005a
@@ -88,72 +77,89 @@ MODULE_DEVICE_TABLE(pci, mwl8k_table);
88 MWL8K_A2H_INT_RX_READY | \ 77 MWL8K_A2H_INT_RX_READY | \
89 MWL8K_A2H_INT_TX_DONE) 78 MWL8K_A2H_INT_TX_DONE)
90 79
91/* WME stream classes */
92#define WME_AC_BE 0 /* best effort */
93#define WME_AC_BK 1 /* background */
94#define WME_AC_VI 2 /* video */
95#define WME_AC_VO 3 /* voice */
96
97#define MWL8K_RX_QUEUES 1 80#define MWL8K_RX_QUEUES 1
98#define MWL8K_TX_QUEUES 4 81#define MWL8K_TX_QUEUES 4
99 82
83struct rxd_ops {
84 int rxd_size;
85 void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
86 void (*rxd_refill)(void *rxd, dma_addr_t addr, int len);
87 int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status);
88};
89
90struct mwl8k_device_info {
91 char *part_name;
92 char *helper_image;
93 char *fw_image;
94 struct rxd_ops *rxd_ops;
95 u16 modes;
96};
97
100struct mwl8k_rx_queue { 98struct mwl8k_rx_queue {
101 int rx_desc_count; 99 int rxd_count;
102 100
103 /* hw receives here */ 101 /* hw receives here */
104 int rx_head; 102 int head;
105 103
106 /* refill descs here */ 104 /* refill descs here */
107 int rx_tail; 105 int tail;
108 106
109 struct mwl8k_rx_desc *rx_desc_area; 107 void *rxd;
110 dma_addr_t rx_desc_dma; 108 dma_addr_t rxd_dma;
111 struct sk_buff **rx_skb; 109 struct {
110 struct sk_buff *skb;
111 DECLARE_PCI_UNMAP_ADDR(dma)
112 } *buf;
112}; 113};
113 114
114struct mwl8k_tx_queue { 115struct mwl8k_tx_queue {
115 /* hw transmits here */ 116 /* hw transmits here */
116 int tx_head; 117 int head;
117 118
118 /* sw appends here */ 119 /* sw appends here */
119 int tx_tail; 120 int tail;
120 121
121 struct ieee80211_tx_queue_stats tx_stats; 122 struct ieee80211_tx_queue_stats stats;
122 struct mwl8k_tx_desc *tx_desc_area; 123 struct mwl8k_tx_desc *txd;
123 dma_addr_t tx_desc_dma; 124 dma_addr_t txd_dma;
124 struct sk_buff **tx_skb; 125 struct sk_buff **skb;
125}; 126};
126 127
127/* Pointers to the firmware data and meta information about it. */ 128/* Pointers to the firmware data and meta information about it. */
128struct mwl8k_firmware { 129struct mwl8k_firmware {
129 /* Microcode */
130 struct firmware *ucode;
131
132 /* Boot helper code */ 130 /* Boot helper code */
133 struct firmware *helper; 131 struct firmware *helper;
132
133 /* Microcode */
134 struct firmware *ucode;
134}; 135};
135 136
136struct mwl8k_priv { 137struct mwl8k_priv {
138 void __iomem *sram;
137 void __iomem *regs; 139 void __iomem *regs;
138 struct ieee80211_hw *hw; 140 struct ieee80211_hw *hw;
139 141
140 struct pci_dev *pdev; 142 struct pci_dev *pdev;
141 u8 name[16]; 143
144 struct mwl8k_device_info *device_info;
145 bool ap_fw;
146 struct rxd_ops *rxd_ops;
142 147
143 /* firmware files and meta data */ 148 /* firmware files and meta data */
144 struct mwl8k_firmware fw; 149 struct mwl8k_firmware fw;
145 u32 part_num;
146 150
147 /* firmware access */ 151 /* firmware access */
148 struct mutex fw_mutex; 152 struct mutex fw_mutex;
149 struct task_struct *fw_mutex_owner; 153 struct task_struct *fw_mutex_owner;
150 int fw_mutex_depth; 154 int fw_mutex_depth;
151 struct completion *tx_wait;
152 struct completion *hostcmd_wait; 155 struct completion *hostcmd_wait;
153 156
154 /* lock held over TX and TX reap */ 157 /* lock held over TX and TX reap */
155 spinlock_t tx_lock; 158 spinlock_t tx_lock;
156 159
160 /* TX quiesce completion, protected by fw_mutex and tx_lock */
161 struct completion *tx_wait;
162
157 struct ieee80211_vif *vif; 163 struct ieee80211_vif *vif;
158 164
159 struct ieee80211_channel *current_channel; 165 struct ieee80211_channel *current_channel;
@@ -178,10 +184,11 @@ struct mwl8k_priv {
178 /* PHY parameters */ 184 /* PHY parameters */
179 struct ieee80211_supported_band band; 185 struct ieee80211_supported_band band;
180 struct ieee80211_channel channels[14]; 186 struct ieee80211_channel channels[14];
181 struct ieee80211_rate rates[12]; 187 struct ieee80211_rate rates[13];
182 188
183 bool radio_on; 189 bool radio_on;
184 bool radio_short_preamble; 190 bool radio_short_preamble;
191 bool sniffer_enabled;
185 bool wmm_enabled; 192 bool wmm_enabled;
186 193
187 /* XXX need to convert this to handle multiple interfaces */ 194 /* XXX need to convert this to handle multiple interfaces */
@@ -199,9 +206,6 @@ struct mwl8k_priv {
199 206
200 /* Tasklet to reclaim TX descriptors and buffers after tx */ 207 /* Tasklet to reclaim TX descriptors and buffers after tx */
201 struct tasklet_struct tx_reclaim_task; 208 struct tasklet_struct tx_reclaim_task;
202
203 /* Work thread to serialize configuration requests */
204 struct workqueue_struct *config_wq;
205}; 209};
206 210
207/* Per interface specific private data */ 211/* Per interface specific private data */
@@ -220,7 +224,7 @@ struct mwl8k_vif {
220 * Subset of supported legacy rates. 224 * Subset of supported legacy rates.
221 * Intersection of AP and STA supported rates. 225 * Intersection of AP and STA supported rates.
222 */ 226 */
223 struct ieee80211_rate legacy_rates[12]; 227 struct ieee80211_rate legacy_rates[13];
224 228
225 /* number of supported legacy rates */ 229 /* number of supported legacy rates */
226 u8 legacy_nrates; 230 u8 legacy_nrates;
@@ -252,9 +256,10 @@ static const struct ieee80211_rate mwl8k_rates[] = {
252 { .bitrate = 10, .hw_value = 2, }, 256 { .bitrate = 10, .hw_value = 2, },
253 { .bitrate = 20, .hw_value = 4, }, 257 { .bitrate = 20, .hw_value = 4, },
254 { .bitrate = 55, .hw_value = 11, }, 258 { .bitrate = 55, .hw_value = 11, },
259 { .bitrate = 110, .hw_value = 22, },
260 { .bitrate = 220, .hw_value = 44, },
255 { .bitrate = 60, .hw_value = 12, }, 261 { .bitrate = 60, .hw_value = 12, },
256 { .bitrate = 90, .hw_value = 18, }, 262 { .bitrate = 90, .hw_value = 18, },
257 { .bitrate = 110, .hw_value = 22, },
258 { .bitrate = 120, .hw_value = 24, }, 263 { .bitrate = 120, .hw_value = 24, },
259 { .bitrate = 180, .hw_value = 36, }, 264 { .bitrate = 180, .hw_value = 36, },
260 { .bitrate = 240, .hw_value = 48, }, 265 { .bitrate = 240, .hw_value = 48, },
@@ -270,10 +275,12 @@ static const struct ieee80211_rate mwl8k_rates[] = {
270/* Firmware command codes */ 275/* Firmware command codes */
271#define MWL8K_CMD_CODE_DNLD 0x0001 276#define MWL8K_CMD_CODE_DNLD 0x0001
272#define MWL8K_CMD_GET_HW_SPEC 0x0003 277#define MWL8K_CMD_GET_HW_SPEC 0x0003
278#define MWL8K_CMD_SET_HW_SPEC 0x0004
273#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010 279#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
274#define MWL8K_CMD_GET_STAT 0x0014 280#define MWL8K_CMD_GET_STAT 0x0014
275#define MWL8K_CMD_RADIO_CONTROL 0x001c 281#define MWL8K_CMD_RADIO_CONTROL 0x001c
276#define MWL8K_CMD_RF_TX_POWER 0x001e 282#define MWL8K_CMD_RF_TX_POWER 0x001e
283#define MWL8K_CMD_RF_ANTENNA 0x0020
277#define MWL8K_CMD_SET_PRE_SCAN 0x0107 284#define MWL8K_CMD_SET_PRE_SCAN 0x0107
278#define MWL8K_CMD_SET_POST_SCAN 0x0108 285#define MWL8K_CMD_SET_POST_SCAN 0x0108
279#define MWL8K_CMD_SET_RF_CHANNEL 0x010a 286#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -287,6 +294,7 @@ static const struct ieee80211_rate mwl8k_rates[] = {
287#define MWL8K_CMD_MIMO_CONFIG 0x0125 294#define MWL8K_CMD_MIMO_CONFIG 0x0125
288#define MWL8K_CMD_USE_FIXED_RATE 0x0126 295#define MWL8K_CMD_USE_FIXED_RATE 0x0126
289#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 296#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
297#define MWL8K_CMD_SET_MAC_ADDR 0x0202
290#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 298#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
291#define MWL8K_CMD_UPDATE_STADB 0x1123 299#define MWL8K_CMD_UPDATE_STADB 0x1123
292 300
@@ -299,10 +307,12 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
299 switch (cmd & ~0x8000) { 307 switch (cmd & ~0x8000) {
300 MWL8K_CMDNAME(CODE_DNLD); 308 MWL8K_CMDNAME(CODE_DNLD);
301 MWL8K_CMDNAME(GET_HW_SPEC); 309 MWL8K_CMDNAME(GET_HW_SPEC);
310 MWL8K_CMDNAME(SET_HW_SPEC);
302 MWL8K_CMDNAME(MAC_MULTICAST_ADR); 311 MWL8K_CMDNAME(MAC_MULTICAST_ADR);
303 MWL8K_CMDNAME(GET_STAT); 312 MWL8K_CMDNAME(GET_STAT);
304 MWL8K_CMDNAME(RADIO_CONTROL); 313 MWL8K_CMDNAME(RADIO_CONTROL);
305 MWL8K_CMDNAME(RF_TX_POWER); 314 MWL8K_CMDNAME(RF_TX_POWER);
315 MWL8K_CMDNAME(RF_ANTENNA);
306 MWL8K_CMDNAME(SET_PRE_SCAN); 316 MWL8K_CMDNAME(SET_PRE_SCAN);
307 MWL8K_CMDNAME(SET_POST_SCAN); 317 MWL8K_CMDNAME(SET_POST_SCAN);
308 MWL8K_CMDNAME(SET_RF_CHANNEL); 318 MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -316,6 +326,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
316 MWL8K_CMDNAME(MIMO_CONFIG); 326 MWL8K_CMDNAME(MIMO_CONFIG);
317 MWL8K_CMDNAME(USE_FIXED_RATE); 327 MWL8K_CMDNAME(USE_FIXED_RATE);
318 MWL8K_CMDNAME(ENABLE_SNIFFER); 328 MWL8K_CMDNAME(ENABLE_SNIFFER);
329 MWL8K_CMDNAME(SET_MAC_ADDR);
319 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 330 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
320 MWL8K_CMDNAME(UPDATE_STADB); 331 MWL8K_CMDNAME(UPDATE_STADB);
321 default: 332 default:
@@ -353,41 +364,35 @@ static void mwl8k_release_firmware(struct mwl8k_priv *priv)
353 364
354/* Request fw image */ 365/* Request fw image */
355static int mwl8k_request_fw(struct mwl8k_priv *priv, 366static int mwl8k_request_fw(struct mwl8k_priv *priv,
356 const char *fname, struct firmware **fw) 367 const char *fname, struct firmware **fw)
357{ 368{
358 /* release current image */ 369 /* release current image */
359 if (*fw != NULL) 370 if (*fw != NULL)
360 mwl8k_release_fw(fw); 371 mwl8k_release_fw(fw);
361 372
362 return request_firmware((const struct firmware **)fw, 373 return request_firmware((const struct firmware **)fw,
363 fname, &priv->pdev->dev); 374 fname, &priv->pdev->dev);
364} 375}
365 376
366static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num) 377static int mwl8k_request_firmware(struct mwl8k_priv *priv)
367{ 378{
368 u8 filename[64]; 379 struct mwl8k_device_info *di = priv->device_info;
369 int rc; 380 int rc;
370 381
371 priv->part_num = part_num; 382 if (di->helper_image != NULL) {
372 383 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
373 snprintf(filename, sizeof(filename), 384 if (rc) {
374 "mwl8k/helper_%u.fw", priv->part_num); 385 printk(KERN_ERR "%s: Error requesting helper "
375 386 "firmware file %s\n", pci_name(priv->pdev),
376 rc = mwl8k_request_fw(priv, filename, &priv->fw.helper); 387 di->helper_image);
377 if (rc) { 388 return rc;
378 printk(KERN_ERR 389 }
379 "%s Error requesting helper firmware file %s\n",
380 pci_name(priv->pdev), filename);
381 return rc;
382 } 390 }
383 391
384 snprintf(filename, sizeof(filename), 392 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
385 "mwl8k/fmimage_%u.fw", priv->part_num);
386
387 rc = mwl8k_request_fw(priv, filename, &priv->fw.ucode);
388 if (rc) { 393 if (rc) {
389 printk(KERN_ERR "%s Error requesting firmware file %s\n", 394 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
390 pci_name(priv->pdev), filename); 395 pci_name(priv->pdev), di->fw_image);
391 mwl8k_release_fw(&priv->fw.helper); 396 mwl8k_release_fw(&priv->fw.helper);
392 return rc; 397 return rc;
393 } 398 }
@@ -395,6 +400,9 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num)
395 return 0; 400 return 0;
396} 401}
397 402
403MODULE_FIRMWARE("mwl8k/helper_8687.fw");
404MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
405
398struct mwl8k_cmd_pkt { 406struct mwl8k_cmd_pkt {
399 __le16 code; 407 __le16 code;
400 __le16 length; 408 __le16 length;
@@ -434,6 +442,7 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
434 break; 442 break;
435 } 443 }
436 444
445 cond_resched();
437 udelay(1); 446 udelay(1);
438 } while (--loops); 447 } while (--loops);
439 448
@@ -542,43 +551,62 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
542 return rc; 551 return rc;
543} 552}
544 553
545static int mwl8k_load_firmware(struct mwl8k_priv *priv) 554static int mwl8k_load_firmware(struct ieee80211_hw *hw)
546{ 555{
547 int loops, rc; 556 struct mwl8k_priv *priv = hw->priv;
557 struct firmware *fw = priv->fw.ucode;
558 struct mwl8k_device_info *di = priv->device_info;
559 int rc;
560 int loops;
548 561
549 const u8 *ucode = priv->fw.ucode->data; 562 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
550 size_t ucode_len = priv->fw.ucode->size; 563 struct firmware *helper = priv->fw.helper;
551 const u8 *helper = priv->fw.helper->data;
552 size_t helper_len = priv->fw.helper->size;
553 564
554 if (!memcmp(ucode, "\x01\x00\x00\x00", 4)) { 565 if (helper == NULL) {
555 rc = mwl8k_load_fw_image(priv, helper, helper_len); 566 printk(KERN_ERR "%s: helper image needed but none "
567 "given\n", pci_name(priv->pdev));
568 return -EINVAL;
569 }
570
571 rc = mwl8k_load_fw_image(priv, helper->data, helper->size);
556 if (rc) { 572 if (rc) {
557 printk(KERN_ERR "%s: unable to load firmware " 573 printk(KERN_ERR "%s: unable to load firmware "
558 "helper image\n", pci_name(priv->pdev)); 574 "helper image\n", pci_name(priv->pdev));
559 return rc; 575 return rc;
560 } 576 }
561 msleep(1); 577 msleep(1);
562 578
563 rc = mwl8k_feed_fw_image(priv, ucode, ucode_len); 579 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
564 } else { 580 } else {
565 rc = mwl8k_load_fw_image(priv, ucode, ucode_len); 581 rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
566 } 582 }
567 583
568 if (rc) { 584 if (rc) {
569 printk(KERN_ERR "%s: unable to load firmware data\n", 585 printk(KERN_ERR "%s: unable to load firmware image\n",
570 pci_name(priv->pdev)); 586 pci_name(priv->pdev));
571 return rc; 587 return rc;
572 } 588 }
573 589
574 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR); 590 if (di->modes & BIT(NL80211_IFTYPE_AP))
591 iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
592 else
593 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
575 msleep(1); 594 msleep(1);
576 595
577 loops = 200000; 596 loops = 200000;
578 do { 597 do {
579 if (ioread32(priv->regs + MWL8K_HIU_INT_CODE) 598 u32 ready_code;
580 == MWL8K_FWSTA_READY) 599
600 ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
601 if (ready_code == MWL8K_FWAP_READY) {
602 priv->ap_fw = 1;
603 break;
604 } else if (ready_code == MWL8K_FWSTA_READY) {
605 priv->ap_fw = 0;
581 break; 606 break;
607 }
608
609 cond_resched();
582 udelay(1); 610 udelay(1);
583 } while (--loops); 611 } while (--loops);
584 612
@@ -605,7 +633,7 @@ struct ewc_ht_info {
605/* Peer Entry flags - used to define the type of the peer node */ 633/* Peer Entry flags - used to define the type of the peer node */
606#define MWL8K_PEER_TYPE_ACCESSPOINT 2 634#define MWL8K_PEER_TYPE_ACCESSPOINT 2
607 635
608#define MWL8K_IEEE_LEGACY_DATA_RATES 12 636#define MWL8K_IEEE_LEGACY_DATA_RATES 13
609#define MWL8K_MCS_BITMAP_SIZE 16 637#define MWL8K_MCS_BITMAP_SIZE 16
610 638
611struct peer_capability_info { 639struct peer_capability_info {
@@ -731,16 +759,96 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
731 759
732 760
733/* 761/*
734 * Packet reception. 762 * Packet reception for 88w8366.
735 */ 763 */
736#define MWL8K_RX_CTRL_OWNED_BY_HOST 0x02 764struct mwl8k_rxd_8366 {
765 __le16 pkt_len;
766 __u8 sq2;
767 __u8 rate;
768 __le32 pkt_phys_addr;
769 __le32 next_rxd_phys_addr;
770 __le16 qos_control;
771 __le16 htsig2;
772 __le32 hw_rssi_info;
773 __le32 hw_noise_floor_info;
774 __u8 noise_floor;
775 __u8 pad0[3];
776 __u8 rssi;
777 __u8 rx_status;
778 __u8 channel;
779 __u8 rx_ctrl;
780} __attribute__((packed));
781
782#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
783
784static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
785{
786 struct mwl8k_rxd_8366 *rxd = _rxd;
787
788 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
789 rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
790}
791
792static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
793{
794 struct mwl8k_rxd_8366 *rxd = _rxd;
795
796 rxd->pkt_len = cpu_to_le16(len);
797 rxd->pkt_phys_addr = cpu_to_le32(addr);
798 wmb();
799 rxd->rx_ctrl = 0;
800}
801
802static int
803mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status)
804{
805 struct mwl8k_rxd_8366 *rxd = _rxd;
806
807 if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
808 return -1;
809 rmb();
810
811 memset(status, 0, sizeof(*status));
812
813 status->signal = -rxd->rssi;
814 status->noise = -rxd->noise_floor;
737 815
738struct mwl8k_rx_desc { 816 if (rxd->rate & 0x80) {
817 status->flag |= RX_FLAG_HT;
818 status->rate_idx = rxd->rate & 0x7f;
819 } else {
820 int i;
821
822 for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
823 if (mwl8k_rates[i].hw_value == rxd->rate) {
824 status->rate_idx = i;
825 break;
826 }
827 }
828 }
829
830 status->band = IEEE80211_BAND_2GHZ;
831 status->freq = ieee80211_channel_to_frequency(rxd->channel);
832
833 return le16_to_cpu(rxd->pkt_len);
834}
835
836static struct rxd_ops rxd_8366_ops = {
837 .rxd_size = sizeof(struct mwl8k_rxd_8366),
838 .rxd_init = mwl8k_rxd_8366_init,
839 .rxd_refill = mwl8k_rxd_8366_refill,
840 .rxd_process = mwl8k_rxd_8366_process,
841};
842
843/*
844 * Packet reception for 88w8687.
845 */
846struct mwl8k_rxd_8687 {
739 __le16 pkt_len; 847 __le16 pkt_len;
740 __u8 link_quality; 848 __u8 link_quality;
741 __u8 noise_level; 849 __u8 noise_level;
742 __le32 pkt_phys_addr; 850 __le32 pkt_phys_addr;
743 __le32 next_rx_desc_phys_addr; 851 __le32 next_rxd_phys_addr;
744 __le16 qos_control; 852 __le16 qos_control;
745 __le16 rate_info; 853 __le16 rate_info;
746 __le32 pad0[4]; 854 __le32 pad0[4];
@@ -752,6 +860,76 @@ struct mwl8k_rx_desc {
752 __u8 pad2[2]; 860 __u8 pad2[2];
753} __attribute__((packed)); 861} __attribute__((packed));
754 862
863#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000
864#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
865#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
866#define MWL8K_8687_RATE_INFO_40MHZ 0x0004
867#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002
868#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001
869
870#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02
871
872static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
873{
874 struct mwl8k_rxd_8687 *rxd = _rxd;
875
876 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
877 rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
878}
879
880static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
881{
882 struct mwl8k_rxd_8687 *rxd = _rxd;
883
884 rxd->pkt_len = cpu_to_le16(len);
885 rxd->pkt_phys_addr = cpu_to_le32(addr);
886 wmb();
887 rxd->rx_ctrl = 0;
888}
889
890static int
891mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status)
892{
893 struct mwl8k_rxd_8687 *rxd = _rxd;
894 u16 rate_info;
895
896 if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
897 return -1;
898 rmb();
899
900 rate_info = le16_to_cpu(rxd->rate_info);
901
902 memset(status, 0, sizeof(*status));
903
904 status->signal = -rxd->rssi;
905 status->noise = -rxd->noise_level;
906 status->qual = rxd->link_quality;
907 status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
908 status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
909
910 if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
911 status->flag |= RX_FLAG_SHORTPRE;
912 if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
913 status->flag |= RX_FLAG_40MHZ;
914 if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
915 status->flag |= RX_FLAG_SHORT_GI;
916 if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
917 status->flag |= RX_FLAG_HT;
918
919 status->band = IEEE80211_BAND_2GHZ;
920 status->freq = ieee80211_channel_to_frequency(rxd->channel);
921
922 return le16_to_cpu(rxd->pkt_len);
923}
924
925static struct rxd_ops rxd_8687_ops = {
926 .rxd_size = sizeof(struct mwl8k_rxd_8687),
927 .rxd_init = mwl8k_rxd_8687_init,
928 .rxd_refill = mwl8k_rxd_8687_refill,
929 .rxd_process = mwl8k_rxd_8687_process,
930};
931
932
755#define MWL8K_RX_DESCS 256 933#define MWL8K_RX_DESCS 256
756#define MWL8K_RX_MAXSZ 3800 934#define MWL8K_RX_MAXSZ 3800
757 935
@@ -762,43 +940,44 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
762 int size; 940 int size;
763 int i; 941 int i;
764 942
765 rxq->rx_desc_count = 0; 943 rxq->rxd_count = 0;
766 rxq->rx_head = 0; 944 rxq->head = 0;
767 rxq->rx_tail = 0; 945 rxq->tail = 0;
768 946
769 size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc); 947 size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size;
770 948
771 rxq->rx_desc_area = 949 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
772 pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma); 950 if (rxq->rxd == NULL) {
773 if (rxq->rx_desc_area == NULL) {
774 printk(KERN_ERR "%s: failed to alloc RX descriptors\n", 951 printk(KERN_ERR "%s: failed to alloc RX descriptors\n",
775 priv->name); 952 wiphy_name(hw->wiphy));
776 return -ENOMEM; 953 return -ENOMEM;
777 } 954 }
778 memset(rxq->rx_desc_area, 0, size); 955 memset(rxq->rxd, 0, size);
779 956
780 rxq->rx_skb = kmalloc(MWL8K_RX_DESCS * 957 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
781 sizeof(*rxq->rx_skb), GFP_KERNEL); 958 if (rxq->buf == NULL) {
782 if (rxq->rx_skb == NULL) {
783 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", 959 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n",
784 priv->name); 960 wiphy_name(hw->wiphy));
785 pci_free_consistent(priv->pdev, size, 961 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
786 rxq->rx_desc_area, rxq->rx_desc_dma);
787 return -ENOMEM; 962 return -ENOMEM;
788 } 963 }
789 memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb)); 964 memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
790 965
791 for (i = 0; i < MWL8K_RX_DESCS; i++) { 966 for (i = 0; i < MWL8K_RX_DESCS; i++) {
792 struct mwl8k_rx_desc *rx_desc; 967 int desc_size;
968 void *rxd;
793 int nexti; 969 int nexti;
970 dma_addr_t next_dma_addr;
794 971
795 rx_desc = rxq->rx_desc_area + i; 972 desc_size = priv->rxd_ops->rxd_size;
796 nexti = (i + 1) % MWL8K_RX_DESCS; 973 rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size);
797 974
798 rx_desc->next_rx_desc_phys_addr = 975 nexti = i + 1;
799 cpu_to_le32(rxq->rx_desc_dma 976 if (nexti == MWL8K_RX_DESCS)
800 + nexti * sizeof(*rx_desc)); 977 nexti = 0;
801 rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST; 978 next_dma_addr = rxq->rxd_dma + (nexti * desc_size);
979
980 priv->rxd_ops->rxd_init(rxd, next_dma_addr);
802 } 981 }
803 982
804 return 0; 983 return 0;
@@ -811,27 +990,28 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
811 int refilled; 990 int refilled;
812 991
813 refilled = 0; 992 refilled = 0;
814 while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) { 993 while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
815 struct sk_buff *skb; 994 struct sk_buff *skb;
995 dma_addr_t addr;
816 int rx; 996 int rx;
997 void *rxd;
817 998
818 skb = dev_alloc_skb(MWL8K_RX_MAXSZ); 999 skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
819 if (skb == NULL) 1000 if (skb == NULL)
820 break; 1001 break;
821 1002
822 rxq->rx_desc_count++; 1003 addr = pci_map_single(priv->pdev, skb->data,
823 1004 MWL8K_RX_MAXSZ, DMA_FROM_DEVICE);
824 rx = rxq->rx_tail;
825 rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS;
826 1005
827 rxq->rx_desc_area[rx].pkt_phys_addr = 1006 rxq->rxd_count++;
828 cpu_to_le32(pci_map_single(priv->pdev, skb->data, 1007 rx = rxq->tail++;
829 MWL8K_RX_MAXSZ, DMA_FROM_DEVICE)); 1008 if (rxq->tail == MWL8K_RX_DESCS)
1009 rxq->tail = 0;
1010 rxq->buf[rx].skb = skb;
1011 pci_unmap_addr_set(&rxq->buf[rx], dma, addr);
830 1012
831 rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ); 1013 rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
832 rxq->rx_skb[rx] = skb; 1014 priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
833 wmb();
834 rxq->rx_desc_area[rx].rx_ctrl = 0;
835 1015
836 refilled++; 1016 refilled++;
837 } 1017 }
@@ -847,24 +1027,24 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
847 int i; 1027 int i;
848 1028
849 for (i = 0; i < MWL8K_RX_DESCS; i++) { 1029 for (i = 0; i < MWL8K_RX_DESCS; i++) {
850 if (rxq->rx_skb[i] != NULL) { 1030 if (rxq->buf[i].skb != NULL) {
851 unsigned long addr; 1031 pci_unmap_single(priv->pdev,
852 1032 pci_unmap_addr(&rxq->buf[i], dma),
853 addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr); 1033 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
854 pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ, 1034 pci_unmap_addr_set(&rxq->buf[i], dma, 0);
855 PCI_DMA_FROMDEVICE); 1035
856 kfree_skb(rxq->rx_skb[i]); 1036 kfree_skb(rxq->buf[i].skb);
857 rxq->rx_skb[i] = NULL; 1037 rxq->buf[i].skb = NULL;
858 } 1038 }
859 } 1039 }
860 1040
861 kfree(rxq->rx_skb); 1041 kfree(rxq->buf);
862 rxq->rx_skb = NULL; 1042 rxq->buf = NULL;
863 1043
864 pci_free_consistent(priv->pdev, 1044 pci_free_consistent(priv->pdev,
865 MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc), 1045 MWL8K_RX_DESCS * priv->rxd_ops->rxd_size,
866 rxq->rx_desc_area, rxq->rx_desc_dma); 1046 rxq->rxd, rxq->rxd_dma);
867 rxq->rx_desc_area = NULL; 1047 rxq->rxd = NULL;
868} 1048}
869 1049
870 1050
@@ -880,9 +1060,11 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
880 !compare_ether_addr(wh->addr3, priv->capture_bssid); 1060 !compare_ether_addr(wh->addr3, priv->capture_bssid);
881} 1061}
882 1062
883static inline void mwl8k_save_beacon(struct mwl8k_priv *priv, 1063static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
884 struct sk_buff *skb) 1064 struct sk_buff *skb)
885{ 1065{
1066 struct mwl8k_priv *priv = hw->priv;
1067
886 priv->capture_beacon = false; 1068 priv->capture_beacon = false;
887 memset(priv->capture_bssid, 0, ETH_ALEN); 1069 memset(priv->capture_bssid, 0, ETH_ALEN);
888 1070
@@ -893,8 +1075,7 @@ static inline void mwl8k_save_beacon(struct mwl8k_priv *priv,
893 */ 1075 */
894 priv->beacon_skb = skb_copy(skb, GFP_ATOMIC); 1076 priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
895 if (priv->beacon_skb != NULL) 1077 if (priv->beacon_skb != NULL)
896 queue_work(priv->config_wq, 1078 ieee80211_queue_work(hw, &priv->finalize_join_worker);
897 &priv->finalize_join_worker);
898} 1079}
899 1080
900static int rxq_process(struct ieee80211_hw *hw, int index, int limit) 1081static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
@@ -904,53 +1085,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
904 int processed; 1085 int processed;
905 1086
906 processed = 0; 1087 processed = 0;
907 while (rxq->rx_desc_count && limit--) { 1088 while (rxq->rxd_count && limit--) {
908 struct mwl8k_rx_desc *rx_desc;
909 struct sk_buff *skb; 1089 struct sk_buff *skb;
1090 void *rxd;
1091 int pkt_len;
910 struct ieee80211_rx_status status; 1092 struct ieee80211_rx_status status;
911 unsigned long addr;
912 struct ieee80211_hdr *wh;
913 1093
914 rx_desc = rxq->rx_desc_area + rxq->rx_head; 1094 skb = rxq->buf[rxq->head].skb;
915 if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST)) 1095 if (skb == NULL)
916 break; 1096 break;
917 rmb();
918 1097
919 skb = rxq->rx_skb[rxq->rx_head]; 1098 rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
920 if (skb == NULL) 1099
1100 pkt_len = priv->rxd_ops->rxd_process(rxd, &status);
1101 if (pkt_len < 0)
921 break; 1102 break;
922 rxq->rx_skb[rxq->rx_head] = NULL;
923 1103
924 rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; 1104 rxq->buf[rxq->head].skb = NULL;
925 rxq->rx_desc_count--;
926 1105
927 addr = le32_to_cpu(rx_desc->pkt_phys_addr); 1106 pci_unmap_single(priv->pdev,
928 pci_unmap_single(priv->pdev, addr, 1107 pci_unmap_addr(&rxq->buf[rxq->head], dma),
929 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); 1108 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
1109 pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
930 1110
931 skb_put(skb, le16_to_cpu(rx_desc->pkt_len)); 1111 rxq->head++;
932 mwl8k_remove_dma_header(skb); 1112 if (rxq->head == MWL8K_RX_DESCS)
1113 rxq->head = 0;
1114
1115 rxq->rxd_count--;
933 1116
934 wh = (struct ieee80211_hdr *)skb->data; 1117 skb_put(skb, pkt_len);
1118 mwl8k_remove_dma_header(skb);
935 1119
936 /* 1120 /*
937 * Check for pending join operation. save a copy of 1121 * Check for a pending join operation. Save a
938 * the beacon and schedule a tasklet to send finalize 1122 * copy of the beacon and schedule a tasklet to
939 * join command to the firmware. 1123 * send a FINALIZE_JOIN command to the firmware.
940 */ 1124 */
941 if (mwl8k_capture_bssid(priv, wh)) 1125 if (mwl8k_capture_bssid(priv, (void *)skb->data))
942 mwl8k_save_beacon(priv, skb); 1126 mwl8k_save_beacon(hw, skb);
943 1127
944 memset(&status, 0, sizeof(status));
945 status.mactime = 0;
946 status.signal = -rx_desc->rssi;
947 status.noise = -rx_desc->noise_level;
948 status.qual = rx_desc->link_quality;
949 status.antenna = 1;
950 status.rate_idx = 1;
951 status.flag = 0;
952 status.band = IEEE80211_BAND_2GHZ;
953 status.freq = ieee80211_channel_to_frequency(rx_desc->channel);
954 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 1128 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
955 ieee80211_rx_irqsafe(hw, skb); 1129 ieee80211_rx_irqsafe(hw, skb);
956 1130
@@ -965,24 +1139,10 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
965 * Packet transmission. 1139 * Packet transmission.
966 */ 1140 */
967 1141
968/* Transmit queue assignment. */
969enum {
970 MWL8K_WME_AC_BK = 0, /* background access */
971 MWL8K_WME_AC_BE = 1, /* best effort access */
972 MWL8K_WME_AC_VI = 2, /* video access */
973 MWL8K_WME_AC_VO = 3, /* voice access */
974};
975
976/* Transmit packet ACK policy */ 1142/* Transmit packet ACK policy */
977#define MWL8K_TXD_ACK_POLICY_NORMAL 0 1143#define MWL8K_TXD_ACK_POLICY_NORMAL 0
978#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3 1144#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
979 1145
980#define GET_TXQ(_ac) (\
981 ((_ac) == WME_AC_VO) ? MWL8K_WME_AC_VO : \
982 ((_ac) == WME_AC_VI) ? MWL8K_WME_AC_VI : \
983 ((_ac) == WME_AC_BK) ? MWL8K_WME_AC_BK : \
984 MWL8K_WME_AC_BE)
985
986#define MWL8K_TXD_STATUS_OK 0x00000001 1146#define MWL8K_TXD_STATUS_OK 0x00000001
987#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 1147#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
988#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 1148#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
@@ -997,7 +1157,7 @@ struct mwl8k_tx_desc {
997 __le32 pkt_phys_addr; 1157 __le32 pkt_phys_addr;
998 __le16 pkt_len; 1158 __le16 pkt_len;
999 __u8 dest_MAC_addr[ETH_ALEN]; 1159 __u8 dest_MAC_addr[ETH_ALEN];
1000 __le32 next_tx_desc_phys_addr; 1160 __le32 next_txd_phys_addr;
1001 __le32 reserved; 1161 __le32 reserved;
1002 __le16 rate_info; 1162 __le16 rate_info;
1003 __u8 peer_id; 1163 __u8 peer_id;
@@ -1013,44 +1173,40 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1013 int size; 1173 int size;
1014 int i; 1174 int i;
1015 1175
1016 memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats)); 1176 memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
1017 txq->tx_stats.limit = MWL8K_TX_DESCS; 1177 txq->stats.limit = MWL8K_TX_DESCS;
1018 txq->tx_head = 0; 1178 txq->head = 0;
1019 txq->tx_tail = 0; 1179 txq->tail = 0;
1020 1180
1021 size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); 1181 size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
1022 1182
1023 txq->tx_desc_area = 1183 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
1024 pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma); 1184 if (txq->txd == NULL) {
1025 if (txq->tx_desc_area == NULL) {
1026 printk(KERN_ERR "%s: failed to alloc TX descriptors\n", 1185 printk(KERN_ERR "%s: failed to alloc TX descriptors\n",
1027 priv->name); 1186 wiphy_name(hw->wiphy));
1028 return -ENOMEM; 1187 return -ENOMEM;
1029 } 1188 }
1030 memset(txq->tx_desc_area, 0, size); 1189 memset(txq->txd, 0, size);
1031 1190
1032 txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb), 1191 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
1033 GFP_KERNEL); 1192 if (txq->skb == NULL) {
1034 if (txq->tx_skb == NULL) {
1035 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", 1193 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n",
1036 priv->name); 1194 wiphy_name(hw->wiphy));
1037 pci_free_consistent(priv->pdev, size, 1195 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
1038 txq->tx_desc_area, txq->tx_desc_dma);
1039 return -ENOMEM; 1196 return -ENOMEM;
1040 } 1197 }
1041 memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb)); 1198 memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
1042 1199
1043 for (i = 0; i < MWL8K_TX_DESCS; i++) { 1200 for (i = 0; i < MWL8K_TX_DESCS; i++) {
1044 struct mwl8k_tx_desc *tx_desc; 1201 struct mwl8k_tx_desc *tx_desc;
1045 int nexti; 1202 int nexti;
1046 1203
1047 tx_desc = txq->tx_desc_area + i; 1204 tx_desc = txq->txd + i;
1048 nexti = (i + 1) % MWL8K_TX_DESCS; 1205 nexti = (i + 1) % MWL8K_TX_DESCS;
1049 1206
1050 tx_desc->status = 0; 1207 tx_desc->status = 0;
1051 tx_desc->next_tx_desc_phys_addr = 1208 tx_desc->next_txd_phys_addr =
1052 cpu_to_le32(txq->tx_desc_dma + 1209 cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc));
1053 nexti * sizeof(*tx_desc));
1054 } 1210 }
1055 1211
1056 return 0; 1212 return 0;
@@ -1065,11 +1221,6 @@ static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
1065 ioread32(priv->regs + MWL8K_HIU_INT_CODE); 1221 ioread32(priv->regs + MWL8K_HIU_INT_CODE);
1066} 1222}
1067 1223
1068static inline int mwl8k_txq_busy(struct mwl8k_priv *priv)
1069{
1070 return priv->pending_tx_pkts;
1071}
1072
1073struct mwl8k_txq_info { 1224struct mwl8k_txq_info {
1074 u32 fw_owned; 1225 u32 fw_owned;
1075 u32 drv_owned; 1226 u32 drv_owned;
@@ -1089,14 +1240,13 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
1089 1240
1090 memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info)); 1241 memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info));
1091 1242
1092 spin_lock_bh(&priv->tx_lock);
1093 for (count = 0; count < MWL8K_TX_QUEUES; count++) { 1243 for (count = 0; count < MWL8K_TX_QUEUES; count++) {
1094 txq = priv->txq + count; 1244 txq = priv->txq + count;
1095 txinfo[count].len = txq->tx_stats.len; 1245 txinfo[count].len = txq->stats.len;
1096 txinfo[count].head = txq->tx_head; 1246 txinfo[count].head = txq->head;
1097 txinfo[count].tail = txq->tx_tail; 1247 txinfo[count].tail = txq->tail;
1098 for (desc = 0; desc < MWL8K_TX_DESCS; desc++) { 1248 for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
1099 tx_desc = txq->tx_desc_area + desc; 1249 tx_desc = txq->txd + desc;
1100 status = le32_to_cpu(tx_desc->status); 1250 status = le32_to_cpu(tx_desc->status);
1101 1251
1102 if (status & MWL8K_TXD_STATUS_FW_OWNED) 1252 if (status & MWL8K_TXD_STATUS_FW_OWNED)
@@ -1108,30 +1258,26 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
1108 txinfo[count].unused++; 1258 txinfo[count].unused++;
1109 } 1259 }
1110 } 1260 }
1111 spin_unlock_bh(&priv->tx_lock);
1112 1261
1113 return ndescs; 1262 return ndescs;
1114} 1263}
1115 1264
1116/* 1265/*
1117 * Must be called with hw->fw_mutex held and tx queues stopped. 1266 * Must be called with priv->fw_mutex held and tx queues stopped.
1118 */ 1267 */
1119static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) 1268static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1120{ 1269{
1121 struct mwl8k_priv *priv = hw->priv; 1270 struct mwl8k_priv *priv = hw->priv;
1122 DECLARE_COMPLETION_ONSTACK(cmd_wait); 1271 DECLARE_COMPLETION_ONSTACK(tx_wait);
1123 u32 count; 1272 u32 count;
1124 unsigned long timeout; 1273 unsigned long timeout;
1125 1274
1126 might_sleep(); 1275 might_sleep();
1127 1276
1128 spin_lock_bh(&priv->tx_lock); 1277 spin_lock_bh(&priv->tx_lock);
1129 count = mwl8k_txq_busy(priv); 1278 count = priv->pending_tx_pkts;
1130 if (count) { 1279 if (count)
1131 priv->tx_wait = &cmd_wait; 1280 priv->tx_wait = &tx_wait;
1132 if (priv->radio_on)
1133 mwl8k_tx_start(priv);
1134 }
1135 spin_unlock_bh(&priv->tx_lock); 1281 spin_unlock_bh(&priv->tx_lock);
1136 1282
1137 if (count) { 1283 if (count) {
@@ -1139,23 +1285,23 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1139 int index; 1285 int index;
1140 int newcount; 1286 int newcount;
1141 1287
1142 timeout = wait_for_completion_timeout(&cmd_wait, 1288 timeout = wait_for_completion_timeout(&tx_wait,
1143 msecs_to_jiffies(5000)); 1289 msecs_to_jiffies(5000));
1144 if (timeout) 1290 if (timeout)
1145 return 0; 1291 return 0;
1146 1292
1147 spin_lock_bh(&priv->tx_lock); 1293 spin_lock_bh(&priv->tx_lock);
1148 priv->tx_wait = NULL; 1294 priv->tx_wait = NULL;
1149 newcount = mwl8k_txq_busy(priv); 1295 newcount = priv->pending_tx_pkts;
1296 mwl8k_scan_tx_ring(priv, txinfo);
1150 spin_unlock_bh(&priv->tx_lock); 1297 spin_unlock_bh(&priv->tx_lock);
1151 1298
1152 printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n", 1299 printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n",
1153 __func__, __LINE__, count, newcount); 1300 __func__, __LINE__, count, newcount);
1154 1301
1155 mwl8k_scan_tx_ring(priv, txinfo);
1156 for (index = 0; index < MWL8K_TX_QUEUES; index++) 1302 for (index = 0; index < MWL8K_TX_QUEUES; index++)
1157 printk(KERN_ERR 1303 printk(KERN_ERR "TXQ:%u L:%u H:%u T:%u FW:%u "
1158 "TXQ:%u L:%u H:%u T:%u FW:%u DRV:%u U:%u\n", 1304 "DRV:%u U:%u\n",
1159 index, 1305 index,
1160 txinfo[index].len, 1306 txinfo[index].len,
1161 txinfo[index].head, 1307 txinfo[index].head,
@@ -1181,7 +1327,7 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1181 struct mwl8k_tx_queue *txq = priv->txq + index; 1327 struct mwl8k_tx_queue *txq = priv->txq + index;
1182 int wake = 0; 1328 int wake = 0;
1183 1329
1184 while (txq->tx_stats.len > 0) { 1330 while (txq->stats.len > 0) {
1185 int tx; 1331 int tx;
1186 struct mwl8k_tx_desc *tx_desc; 1332 struct mwl8k_tx_desc *tx_desc;
1187 unsigned long addr; 1333 unsigned long addr;
@@ -1190,8 +1336,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1190 struct ieee80211_tx_info *info; 1336 struct ieee80211_tx_info *info;
1191 u32 status; 1337 u32 status;
1192 1338
1193 tx = txq->tx_head; 1339 tx = txq->head;
1194 tx_desc = txq->tx_desc_area + tx; 1340 tx_desc = txq->txd + tx;
1195 1341
1196 status = le32_to_cpu(tx_desc->status); 1342 status = le32_to_cpu(tx_desc->status);
1197 1343
@@ -1202,15 +1348,15 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1202 ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED); 1348 ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
1203 } 1349 }
1204 1350
1205 txq->tx_head = (tx + 1) % MWL8K_TX_DESCS; 1351 txq->head = (tx + 1) % MWL8K_TX_DESCS;
1206 BUG_ON(txq->tx_stats.len == 0); 1352 BUG_ON(txq->stats.len == 0);
1207 txq->tx_stats.len--; 1353 txq->stats.len--;
1208 priv->pending_tx_pkts--; 1354 priv->pending_tx_pkts--;
1209 1355
1210 addr = le32_to_cpu(tx_desc->pkt_phys_addr); 1356 addr = le32_to_cpu(tx_desc->pkt_phys_addr);
1211 size = le16_to_cpu(tx_desc->pkt_len); 1357 size = le16_to_cpu(tx_desc->pkt_len);
1212 skb = txq->tx_skb[tx]; 1358 skb = txq->skb[tx];
1213 txq->tx_skb[tx] = NULL; 1359 txq->skb[tx] = NULL;
1214 1360
1215 BUG_ON(skb == NULL); 1361 BUG_ON(skb == NULL);
1216 pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); 1362 pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
@@ -1243,13 +1389,13 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1243 1389
1244 mwl8k_txq_reclaim(hw, index, 1); 1390 mwl8k_txq_reclaim(hw, index, 1);
1245 1391
1246 kfree(txq->tx_skb); 1392 kfree(txq->skb);
1247 txq->tx_skb = NULL; 1393 txq->skb = NULL;
1248 1394
1249 pci_free_consistent(priv->pdev, 1395 pci_free_consistent(priv->pdev,
1250 MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), 1396 MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
1251 txq->tx_desc_area, txq->tx_desc_dma); 1397 txq->txd, txq->txd_dma);
1252 txq->tx_desc_area = NULL; 1398 txq->txd = NULL;
1253} 1399}
1254 1400
1255static int 1401static int
@@ -1317,7 +1463,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1317 1463
1318 if (pci_dma_mapping_error(priv->pdev, dma)) { 1464 if (pci_dma_mapping_error(priv->pdev, dma)) {
1319 printk(KERN_DEBUG "%s: failed to dma map skb, " 1465 printk(KERN_DEBUG "%s: failed to dma map skb, "
1320 "dropping TX frame.\n", priv->name); 1466 "dropping TX frame.\n", wiphy_name(hw->wiphy));
1321 dev_kfree_skb(skb); 1467 dev_kfree_skb(skb);
1322 return NETDEV_TX_OK; 1468 return NETDEV_TX_OK;
1323 } 1469 }
@@ -1326,10 +1472,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1326 1472
1327 txq = priv->txq + index; 1473 txq = priv->txq + index;
1328 1474
1329 BUG_ON(txq->tx_skb[txq->tx_tail] != NULL); 1475 BUG_ON(txq->skb[txq->tail] != NULL);
1330 txq->tx_skb[txq->tx_tail] = skb; 1476 txq->skb[txq->tail] = skb;
1331 1477
1332 tx = txq->tx_desc_area + txq->tx_tail; 1478 tx = txq->txd + txq->tail;
1333 tx->data_rate = txdatarate; 1479 tx->data_rate = txdatarate;
1334 tx->tx_priority = index; 1480 tx->tx_priority = index;
1335 tx->qos_control = cpu_to_le16(qos); 1481 tx->qos_control = cpu_to_le16(qos);
@@ -1340,15 +1486,15 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1340 wmb(); 1486 wmb();
1341 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 1487 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1342 1488
1343 txq->tx_stats.count++; 1489 txq->stats.count++;
1344 txq->tx_stats.len++; 1490 txq->stats.len++;
1345 priv->pending_tx_pkts++; 1491 priv->pending_tx_pkts++;
1346 1492
1347 txq->tx_tail++; 1493 txq->tail++;
1348 if (txq->tx_tail == MWL8K_TX_DESCS) 1494 if (txq->tail == MWL8K_TX_DESCS)
1349 txq->tx_tail = 0; 1495 txq->tail = 0;
1350 1496
1351 if (txq->tx_head == txq->tx_tail) 1497 if (txq->head == txq->tail)
1352 ieee80211_stop_queue(hw, index); 1498 ieee80211_stop_queue(hw, index);
1353 1499
1354 mwl8k_tx_start(priv); 1500 mwl8k_tx_start(priv);
@@ -1431,7 +1577,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1431 unsigned long timeout = 0; 1577 unsigned long timeout = 0;
1432 u8 buf[32]; 1578 u8 buf[32];
1433 1579
1434 cmd->result = 0xFFFF; 1580 cmd->result = 0xffff;
1435 dma_size = le16_to_cpu(cmd->length); 1581 dma_size = le16_to_cpu(cmd->length);
1436 dma_addr = pci_map_single(priv->pdev, cmd, dma_size, 1582 dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
1437 PCI_DMA_BIDIRECTIONAL); 1583 PCI_DMA_BIDIRECTIONAL);
@@ -1464,7 +1610,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1464 1610
1465 if (!timeout) { 1611 if (!timeout) {
1466 printk(KERN_ERR "%s: Command %s timeout after %u ms\n", 1612 printk(KERN_ERR "%s: Command %s timeout after %u ms\n",
1467 priv->name, 1613 wiphy_name(hw->wiphy),
1468 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1614 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1469 MWL8K_CMD_TIMEOUT_MS); 1615 MWL8K_CMD_TIMEOUT_MS);
1470 rc = -ETIMEDOUT; 1616 rc = -ETIMEDOUT;
@@ -1472,7 +1618,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1472 rc = cmd->result ? -EINVAL : 0; 1618 rc = cmd->result ? -EINVAL : 0;
1473 if (rc) 1619 if (rc)
1474 printk(KERN_ERR "%s: Command %s error 0x%x\n", 1620 printk(KERN_ERR "%s: Command %s error 0x%x\n",
1475 priv->name, 1621 wiphy_name(hw->wiphy),
1476 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1622 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1477 le16_to_cpu(cmd->result)); 1623 le16_to_cpu(cmd->result));
1478 } 1624 }
@@ -1481,9 +1627,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1481} 1627}
1482 1628
1483/* 1629/*
1484 * GET_HW_SPEC. 1630 * CMD_GET_HW_SPEC (STA version).
1485 */ 1631 */
1486struct mwl8k_cmd_get_hw_spec { 1632struct mwl8k_cmd_get_hw_spec_sta {
1487 struct mwl8k_cmd_pkt header; 1633 struct mwl8k_cmd_pkt header;
1488 __u8 hw_rev; 1634 __u8 hw_rev;
1489 __u8 host_interface; 1635 __u8 host_interface;
@@ -1499,13 +1645,13 @@ struct mwl8k_cmd_get_hw_spec {
1499 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 1645 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
1500 __le32 caps2; 1646 __le32 caps2;
1501 __le32 num_tx_desc_per_queue; 1647 __le32 num_tx_desc_per_queue;
1502 __le32 total_rx_desc; 1648 __le32 total_rxd;
1503} __attribute__((packed)); 1649} __attribute__((packed));
1504 1650
1505static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw) 1651static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1506{ 1652{
1507 struct mwl8k_priv *priv = hw->priv; 1653 struct mwl8k_priv *priv = hw->priv;
1508 struct mwl8k_cmd_get_hw_spec *cmd; 1654 struct mwl8k_cmd_get_hw_spec_sta *cmd;
1509 int rc; 1655 int rc;
1510 int i; 1656 int i;
1511 1657
@@ -1518,12 +1664,12 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
1518 1664
1519 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); 1665 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1520 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 1666 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1521 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); 1667 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1522 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 1668 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1523 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1669 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1524 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); 1670 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1525 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 1671 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1526 cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS); 1672 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1527 1673
1528 rc = mwl8k_post_cmd(hw, &cmd->header); 1674 rc = mwl8k_post_cmd(hw, &cmd->header);
1529 1675
@@ -1539,6 +1685,129 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
1539} 1685}
1540 1686
1541/* 1687/*
1688 * CMD_GET_HW_SPEC (AP version).
1689 */
1690struct mwl8k_cmd_get_hw_spec_ap {
1691 struct mwl8k_cmd_pkt header;
1692 __u8 hw_rev;
1693 __u8 host_interface;
1694 __le16 num_wcb;
1695 __le16 num_mcaddrs;
1696 __u8 perm_addr[ETH_ALEN];
1697 __le16 region_code;
1698 __le16 num_antenna;
1699 __le32 fw_rev;
1700 __le32 wcbbase0;
1701 __le32 rxwrptr;
1702 __le32 rxrdptr;
1703 __le32 ps_cookie;
1704 __le32 wcbbase1;
1705 __le32 wcbbase2;
1706 __le32 wcbbase3;
1707} __attribute__((packed));
1708
1709static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1710{
1711 struct mwl8k_priv *priv = hw->priv;
1712 struct mwl8k_cmd_get_hw_spec_ap *cmd;
1713 int rc;
1714
1715 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1716 if (cmd == NULL)
1717 return -ENOMEM;
1718
1719 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
1720 cmd->header.length = cpu_to_le16(sizeof(*cmd));
1721
1722 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1723 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1724
1725 rc = mwl8k_post_cmd(hw, &cmd->header);
1726
1727 if (!rc) {
1728 int off;
1729
1730 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
1731 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1732 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1733 priv->hw_rev = cmd->hw_rev;
1734
1735 off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
1736 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
1737
1738 off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
1739 iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off);
1740
1741 off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
1742 iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off);
1743
1744 off = le32_to_cpu(cmd->wcbbase1) & 0xffff;
1745 iowrite32(cpu_to_le32(priv->txq[1].txd_dma), priv->sram + off);
1746
1747 off = le32_to_cpu(cmd->wcbbase2) & 0xffff;
1748 iowrite32(cpu_to_le32(priv->txq[2].txd_dma), priv->sram + off);
1749
1750 off = le32_to_cpu(cmd->wcbbase3) & 0xffff;
1751 iowrite32(cpu_to_le32(priv->txq[3].txd_dma), priv->sram + off);
1752 }
1753
1754 kfree(cmd);
1755 return rc;
1756}
1757
1758/*
1759 * CMD_SET_HW_SPEC.
1760 */
1761struct mwl8k_cmd_set_hw_spec {
1762 struct mwl8k_cmd_pkt header;
1763 __u8 hw_rev;
1764 __u8 host_interface;
1765 __le16 num_mcaddrs;
1766 __u8 perm_addr[ETH_ALEN];
1767 __le16 region_code;
1768 __le32 fw_rev;
1769 __le32 ps_cookie;
1770 __le32 caps;
1771 __le32 rx_queue_ptr;
1772 __le32 num_tx_queues;
1773 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
1774 __le32 flags;
1775 __le32 num_tx_desc_per_queue;
1776 __le32 total_rxd;
1777} __attribute__((packed));
1778
1779#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1780
1781static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1782{
1783 struct mwl8k_priv *priv = hw->priv;
1784 struct mwl8k_cmd_set_hw_spec *cmd;
1785 int rc;
1786 int i;
1787
1788 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1789 if (cmd == NULL)
1790 return -ENOMEM;
1791
1792 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_HW_SPEC);
1793 cmd->header.length = cpu_to_le16(sizeof(*cmd));
1794
1795 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1796 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1797 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1798 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1799 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1800 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
1801 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1802 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1803
1804 rc = mwl8k_post_cmd(hw, &cmd->header);
1805 kfree(cmd);
1806
1807 return rc;
1808}
1809
1810/*
1542 * CMD_MAC_MULTICAST_ADR. 1811 * CMD_MAC_MULTICAST_ADR.
1543 */ 1812 */
1544struct mwl8k_cmd_mac_multicast_adr { 1813struct mwl8k_cmd_mac_multicast_adr {
@@ -1548,19 +1817,23 @@ struct mwl8k_cmd_mac_multicast_adr {
1548 __u8 addr[0][ETH_ALEN]; 1817 __u8 addr[0][ETH_ALEN];
1549}; 1818};
1550 1819
1551#define MWL8K_ENABLE_RX_MULTICAST 0x000F 1820#define MWL8K_ENABLE_RX_DIRECTED 0x0001
1821#define MWL8K_ENABLE_RX_MULTICAST 0x0002
1822#define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004
1823#define MWL8K_ENABLE_RX_BROADCAST 0x0008
1552 1824
1553static struct mwl8k_cmd_pkt * 1825static struct mwl8k_cmd_pkt *
1554__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, 1826__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1555 int mc_count, struct dev_addr_list *mclist) 1827 int mc_count, struct dev_addr_list *mclist)
1556{ 1828{
1557 struct mwl8k_priv *priv = hw->priv; 1829 struct mwl8k_priv *priv = hw->priv;
1558 struct mwl8k_cmd_mac_multicast_adr *cmd; 1830 struct mwl8k_cmd_mac_multicast_adr *cmd;
1559 int size; 1831 int size;
1560 int i;
1561 1832
1562 if (mc_count > priv->num_mcaddrs) 1833 if (allmulti || mc_count > priv->num_mcaddrs) {
1563 mc_count = priv->num_mcaddrs; 1834 allmulti = 1;
1835 mc_count = 0;
1836 }
1564 1837
1565 size = sizeof(*cmd) + mc_count * ETH_ALEN; 1838 size = sizeof(*cmd) + mc_count * ETH_ALEN;
1566 1839
@@ -1570,16 +1843,24 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
1570 1843
1571 cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR); 1844 cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR);
1572 cmd->header.length = cpu_to_le16(size); 1845 cmd->header.length = cpu_to_le16(size);
1573 cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); 1846 cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_DIRECTED |
1574 cmd->numaddr = cpu_to_le16(mc_count); 1847 MWL8K_ENABLE_RX_BROADCAST);
1575 1848
1576 for (i = 0; i < mc_count && mclist; i++) { 1849 if (allmulti) {
1577 if (mclist->da_addrlen != ETH_ALEN) { 1850 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
1578 kfree(cmd); 1851 } else if (mc_count) {
1579 return NULL; 1852 int i;
1853
1854 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
1855 cmd->numaddr = cpu_to_le16(mc_count);
1856 for (i = 0; i < mc_count && mclist; i++) {
1857 if (mclist->da_addrlen != ETH_ALEN) {
1858 kfree(cmd);
1859 return NULL;
1860 }
1861 memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
1862 mclist = mclist->next;
1580 } 1863 }
1581 memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
1582 mclist = mclist->next;
1583 } 1864 }
1584 1865
1585 return &cmd->header; 1866 return &cmd->header;
@@ -1590,7 +1871,6 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
1590 */ 1871 */
1591struct mwl8k_cmd_802_11_get_stat { 1872struct mwl8k_cmd_802_11_get_stat {
1592 struct mwl8k_cmd_pkt header; 1873 struct mwl8k_cmd_pkt header;
1593 __le16 action;
1594 __le32 stats[64]; 1874 __le32 stats[64];
1595} __attribute__((packed)); 1875} __attribute__((packed));
1596 1876
@@ -1611,7 +1891,6 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
1611 1891
1612 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT); 1892 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT);
1613 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 1893 cmd->header.length = cpu_to_le16(sizeof(*cmd));
1614 cmd->action = cpu_to_le16(MWL8K_CMD_GET);
1615 1894
1616 rc = mwl8k_post_cmd(hw, &cmd->header); 1895 rc = mwl8k_post_cmd(hw, &cmd->header);
1617 if (!rc) { 1896 if (!rc) {
@@ -1727,6 +2006,39 @@ static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
1727} 2006}
1728 2007
1729/* 2008/*
2009 * CMD_RF_ANTENNA.
2010 */
2011struct mwl8k_cmd_rf_antenna {
2012 struct mwl8k_cmd_pkt header;
2013 __le16 antenna;
2014 __le16 mode;
2015} __attribute__((packed));
2016
2017#define MWL8K_RF_ANTENNA_RX 1
2018#define MWL8K_RF_ANTENNA_TX 2
2019
2020static int
2021mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
2022{
2023 struct mwl8k_cmd_rf_antenna *cmd;
2024 int rc;
2025
2026 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2027 if (cmd == NULL)
2028 return -ENOMEM;
2029
2030 cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_ANTENNA);
2031 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2032 cmd->antenna = cpu_to_le16(antenna);
2033 cmd->mode = cpu_to_le16(mask);
2034
2035 rc = mwl8k_post_cmd(hw, &cmd->header);
2036 kfree(cmd);
2037
2038 return rc;
2039}
2040
2041/*
1730 * CMD_SET_PRE_SCAN. 2042 * CMD_SET_PRE_SCAN.
1731 */ 2043 */
1732struct mwl8k_cmd_set_pre_scan { 2044struct mwl8k_cmd_set_pre_scan {
@@ -1904,6 +2216,46 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
1904} 2216}
1905 2217
1906/* 2218/*
2219 * CMD_SET_MAC_ADDR.
2220 */
2221struct mwl8k_cmd_set_mac_addr {
2222 struct mwl8k_cmd_pkt header;
2223 union {
2224 struct {
2225 __le16 mac_type;
2226 __u8 mac_addr[ETH_ALEN];
2227 } mbss;
2228 __u8 mac_addr[ETH_ALEN];
2229 };
2230} __attribute__((packed));
2231
2232static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
2233{
2234 struct mwl8k_priv *priv = hw->priv;
2235 struct mwl8k_cmd_set_mac_addr *cmd;
2236 int rc;
2237
2238 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2239 if (cmd == NULL)
2240 return -ENOMEM;
2241
2242 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
2243 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2244 if (priv->ap_fw) {
2245 cmd->mbss.mac_type = 0;
2246 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2247 } else {
2248 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2249 }
2250
2251 rc = mwl8k_post_cmd(hw, &cmd->header);
2252 kfree(cmd);
2253
2254 return rc;
2255}
2256
2257
2258/*
1907 * CMD_SET_RATEADAPT_MODE. 2259 * CMD_SET_RATEADAPT_MODE.
1908 */ 2260 */
1909struct mwl8k_cmd_set_rate_adapt_mode { 2261struct mwl8k_cmd_set_rate_adapt_mode {
@@ -2005,17 +2357,34 @@ struct mwl8k_cmd_set_edca_params {
2005 /* TX opportunity in units of 32 us */ 2357 /* TX opportunity in units of 32 us */
2006 __le16 txop; 2358 __le16 txop;
2007 2359
2008 /* Log exponent of max contention period: 0...15*/ 2360 union {
2009 __u8 log_cw_max; 2361 struct {
2362 /* Log exponent of max contention period: 0...15 */
2363 __le32 log_cw_max;
2364
2365 /* Log exponent of min contention period: 0...15 */
2366 __le32 log_cw_min;
2367
2368 /* Adaptive interframe spacing in units of 32us */
2369 __u8 aifs;
2370
2371 /* TX queue to configure */
2372 __u8 txq;
2373 } ap;
2374 struct {
2375 /* Log exponent of max contention period: 0...15 */
2376 __u8 log_cw_max;
2010 2377
2011 /* Log exponent of min contention period: 0...15 */ 2378 /* Log exponent of min contention period: 0...15 */
2012 __u8 log_cw_min; 2379 __u8 log_cw_min;
2013 2380
2014 /* Adaptive interframe spacing in units of 32us */ 2381 /* Adaptive interframe spacing in units of 32us */
2015 __u8 aifs; 2382 __u8 aifs;
2016 2383
2017 /* TX queue to configure */ 2384 /* TX queue to configure */
2018 __u8 txq; 2385 __u8 txq;
2386 } sta;
2387 };
2019} __attribute__((packed)); 2388} __attribute__((packed));
2020 2389
2021#define MWL8K_SET_EDCA_CW 0x01 2390#define MWL8K_SET_EDCA_CW 0x01
@@ -2031,6 +2400,7 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2031 __u16 cw_min, __u16 cw_max, 2400 __u16 cw_min, __u16 cw_max,
2032 __u8 aifs, __u16 txop) 2401 __u8 aifs, __u16 txop)
2033{ 2402{
2403 struct mwl8k_priv *priv = hw->priv;
2034 struct mwl8k_cmd_set_edca_params *cmd; 2404 struct mwl8k_cmd_set_edca_params *cmd;
2035 int rc; 2405 int rc;
2036 2406
@@ -2038,14 +2408,27 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2038 if (cmd == NULL) 2408 if (cmd == NULL)
2039 return -ENOMEM; 2409 return -ENOMEM;
2040 2410
2411 /*
2412 * Queues 0 (BE) and 1 (BK) are swapped in hardware for
2413 * this call.
2414 */
2415 qnum ^= !(qnum >> 1);
2416
2041 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); 2417 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
2042 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2418 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2043 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); 2419 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
2044 cmd->txop = cpu_to_le16(txop); 2420 cmd->txop = cpu_to_le16(txop);
2045 cmd->log_cw_max = (u8)ilog2(cw_max + 1); 2421 if (priv->ap_fw) {
2046 cmd->log_cw_min = (u8)ilog2(cw_min + 1); 2422 cmd->ap.log_cw_max = cpu_to_le32(ilog2(cw_max + 1));
2047 cmd->aifs = aifs; 2423 cmd->ap.log_cw_min = cpu_to_le32(ilog2(cw_min + 1));
2048 cmd->txq = qnum; 2424 cmd->ap.aifs = aifs;
2425 cmd->ap.txq = qnum;
2426 } else {
2427 cmd->sta.log_cw_max = (u8)ilog2(cw_max + 1);
2428 cmd->sta.log_cw_min = (u8)ilog2(cw_min + 1);
2429 cmd->sta.aifs = aifs;
2430 cmd->sta.txq = qnum;
2431 }
2049 2432
2050 rc = mwl8k_post_cmd(hw, &cmd->header); 2433 rc = mwl8k_post_cmd(hw, &cmd->header);
2051 kfree(cmd); 2434 kfree(cmd);
@@ -2093,8 +2476,8 @@ static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
2093 /* XXX TBD Might just have to abort and return an error */ 2476 /* XXX TBD Might just have to abort and return an error */
2094 if (payload_len > MWL8K_FJ_BEACON_MAXLEN) 2477 if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2095 printk(KERN_ERR "%s(): WARNING: Incomplete beacon " 2478 printk(KERN_ERR "%s(): WARNING: Incomplete beacon "
2096 "sent to firmware. Sz=%u MAX=%u\n", __func__, 2479 "sent to firmware. Sz=%u MAX=%u\n", __func__,
2097 payload_len, MWL8K_FJ_BEACON_MAXLEN); 2480 payload_len, MWL8K_FJ_BEACON_MAXLEN);
2098 2481
2099 if (payload_len > MWL8K_FJ_BEACON_MAXLEN) 2482 if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2100 payload_len = MWL8K_FJ_BEACON_MAXLEN; 2483 payload_len = MWL8K_FJ_BEACON_MAXLEN;
@@ -2341,9 +2724,10 @@ static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
2341 cmd->rate_type = cpu_to_le32(rate_type); 2724 cmd->rate_type = cpu_to_le32(rate_type);
2342 2725
2343 if (rate_table != NULL) { 2726 if (rate_table != NULL) {
2344 /* Copy over each field manually so 2727 /*
2345 * that bitflipping can be done 2728 * Copy over each field manually so that endian
2346 */ 2729 * conversion can be done.
2730 */
2347 cmd->rate_table.allow_rate_drop = 2731 cmd->rate_table.allow_rate_drop =
2348 cpu_to_le32(rate_table->allow_rate_drop); 2732 cpu_to_le32(rate_table->allow_rate_drop);
2349 cmd->rate_table.num_rates = 2733 cmd->rate_table.num_rates =
@@ -2399,7 +2783,7 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2399 2783
2400 if (status & MWL8K_A2H_INT_QUEUE_EMPTY) { 2784 if (status & MWL8K_A2H_INT_QUEUE_EMPTY) {
2401 if (!mutex_is_locked(&priv->fw_mutex) && 2785 if (!mutex_is_locked(&priv->fw_mutex) &&
2402 priv->radio_on && mwl8k_txq_busy(priv)) 2786 priv->radio_on && priv->pending_tx_pkts)
2403 mwl8k_tx_start(priv); 2787 mwl8k_tx_start(priv);
2404 } 2788 }
2405 2789
@@ -2418,7 +2802,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2418 2802
2419 if (priv->current_channel == NULL) { 2803 if (priv->current_channel == NULL) {
2420 printk(KERN_DEBUG "%s: dropped TX frame since radio " 2804 printk(KERN_DEBUG "%s: dropped TX frame since radio "
2421 "disabled\n", priv->name); 2805 "disabled\n", wiphy_name(hw->wiphy));
2422 dev_kfree_skb(skb); 2806 dev_kfree_skb(skb);
2423 return NETDEV_TX_OK; 2807 return NETDEV_TX_OK;
2424 } 2808 }
@@ -2433,11 +2817,11 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2433 struct mwl8k_priv *priv = hw->priv; 2817 struct mwl8k_priv *priv = hw->priv;
2434 int rc; 2818 int rc;
2435 2819
2436 rc = request_irq(priv->pdev->irq, &mwl8k_interrupt, 2820 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
2437 IRQF_SHARED, MWL8K_NAME, hw); 2821 IRQF_SHARED, MWL8K_NAME, hw);
2438 if (rc) { 2822 if (rc) {
2439 printk(KERN_ERR "%s: failed to register IRQ handler\n", 2823 printk(KERN_ERR "%s: failed to register IRQ handler\n",
2440 priv->name); 2824 wiphy_name(hw->wiphy));
2441 return -EIO; 2825 return -EIO;
2442 } 2826 }
2443 2827
@@ -2451,12 +2835,17 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2451 if (!rc) { 2835 if (!rc) {
2452 rc = mwl8k_cmd_802_11_radio_enable(hw); 2836 rc = mwl8k_cmd_802_11_radio_enable(hw);
2453 2837
2454 if (!rc) 2838 if (!priv->ap_fw) {
2455 rc = mwl8k_cmd_set_pre_scan(hw); 2839 if (!rc)
2840 rc = mwl8k_enable_sniffer(hw, 0);
2456 2841
2457 if (!rc) 2842 if (!rc)
2458 rc = mwl8k_cmd_set_post_scan(hw, 2843 rc = mwl8k_cmd_set_pre_scan(hw);
2459 "\x00\x00\x00\x00\x00\x00"); 2844
2845 if (!rc)
2846 rc = mwl8k_cmd_set_post_scan(hw,
2847 "\x00\x00\x00\x00\x00\x00");
2848 }
2460 2849
2461 if (!rc) 2850 if (!rc)
2462 rc = mwl8k_cmd_setrateadaptmode(hw, 0); 2851 rc = mwl8k_cmd_setrateadaptmode(hw, 0);
@@ -2464,9 +2853,6 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2464 if (!rc) 2853 if (!rc)
2465 rc = mwl8k_set_wmm(hw, 0); 2854 rc = mwl8k_set_wmm(hw, 0);
2466 2855
2467 if (!rc)
2468 rc = mwl8k_enable_sniffer(hw, 0);
2469
2470 mwl8k_fw_unlock(hw); 2856 mwl8k_fw_unlock(hw);
2471 } 2857 }
2472 2858
@@ -2500,9 +2886,6 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2500 /* Stop tx reclaim tasklet */ 2886 /* Stop tx reclaim tasklet */
2501 tasklet_disable(&priv->tx_reclaim_task); 2887 tasklet_disable(&priv->tx_reclaim_task);
2502 2888
2503 /* Stop config thread */
2504 flush_workqueue(priv->config_wq);
2505
2506 /* Return all skbs to mac80211 */ 2889 /* Return all skbs to mac80211 */
2507 for (i = 0; i < MWL8K_TX_QUEUES; i++) 2890 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2508 mwl8k_txq_reclaim(hw, i, 1); 2891 mwl8k_txq_reclaim(hw, i, 1);
@@ -2526,11 +2909,24 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
2526 if (conf->type != NL80211_IFTYPE_STATION) 2909 if (conf->type != NL80211_IFTYPE_STATION)
2527 return -EINVAL; 2910 return -EINVAL;
2528 2911
2912 /*
2913 * Reject interface creation if sniffer mode is active, as
2914 * STA operation is mutually exclusive with hardware sniffer
2915 * mode.
2916 */
2917 if (priv->sniffer_enabled) {
2918 printk(KERN_INFO "%s: unable to create STA "
2919 "interface due to sniffer mode being enabled\n",
2920 wiphy_name(hw->wiphy));
2921 return -EINVAL;
2922 }
2923
2529 /* Clean out driver private area */ 2924 /* Clean out driver private area */
2530 mwl8k_vif = MWL8K_VIF(conf->vif); 2925 mwl8k_vif = MWL8K_VIF(conf->vif);
2531 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif)); 2926 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
2532 2927
2533 /* Save the mac address */ 2928 /* Set and save the mac address */
2929 mwl8k_set_mac_addr(hw, conf->mac_addr);
2534 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN); 2930 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
2535 2931
2536 /* Back pointer to parent config block */ 2932 /* Back pointer to parent config block */
@@ -2558,6 +2954,8 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
2558 if (priv->vif == NULL) 2954 if (priv->vif == NULL)
2559 return; 2955 return;
2560 2956
2957 mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
2958
2561 priv->vif = NULL; 2959 priv->vif = NULL;
2562} 2960}
2563 2961
@@ -2593,8 +2991,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2593 if (rc) 2991 if (rc)
2594 goto out; 2992 goto out;
2595 2993
2596 if (mwl8k_cmd_mimo_config(hw, 0x7, 0x7)) 2994 if (priv->ap_fw) {
2597 rc = -EINVAL; 2995 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
2996 if (!rc)
2997 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
2998 } else {
2999 rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
3000 }
2598 3001
2599out: 3002out:
2600 mwl8k_fw_unlock(hw); 3003 mwl8k_fw_unlock(hw);
@@ -2681,32 +3084,108 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
2681{ 3084{
2682 struct mwl8k_cmd_pkt *cmd; 3085 struct mwl8k_cmd_pkt *cmd;
2683 3086
2684 cmd = __mwl8k_cmd_mac_multicast_adr(hw, mc_count, mclist); 3087 /*
3088 * Synthesize and return a command packet that programs the
3089 * hardware multicast address filter. At this point we don't
3090 * know whether FIF_ALLMULTI is being requested, but if it is,
3091 * we'll end up throwing this packet away and creating a new
3092 * one in mwl8k_configure_filter().
3093 */
3094 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
2685 3095
2686 return (unsigned long)cmd; 3096 return (unsigned long)cmd;
2687} 3097}
2688 3098
3099static int
3100mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3101 unsigned int changed_flags,
3102 unsigned int *total_flags)
3103{
3104 struct mwl8k_priv *priv = hw->priv;
3105
3106 /*
3107 * Hardware sniffer mode is mutually exclusive with STA
3108 * operation, so refuse to enable sniffer mode if a STA
3109 * interface is active.
3110 */
3111 if (priv->vif != NULL) {
3112 if (net_ratelimit())
3113 printk(KERN_INFO "%s: not enabling sniffer "
3114 "mode because STA interface is active\n",
3115 wiphy_name(hw->wiphy));
3116 return 0;
3117 }
3118
3119 if (!priv->sniffer_enabled) {
3120 if (mwl8k_enable_sniffer(hw, 1))
3121 return 0;
3122 priv->sniffer_enabled = true;
3123 }
3124
3125 *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
3126 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
3127 FIF_OTHER_BSS;
3128
3129 return 1;
3130}
3131
2689static void mwl8k_configure_filter(struct ieee80211_hw *hw, 3132static void mwl8k_configure_filter(struct ieee80211_hw *hw,
2690 unsigned int changed_flags, 3133 unsigned int changed_flags,
2691 unsigned int *total_flags, 3134 unsigned int *total_flags,
2692 u64 multicast) 3135 u64 multicast)
2693{ 3136{
2694 struct mwl8k_priv *priv = hw->priv; 3137 struct mwl8k_priv *priv = hw->priv;
2695 struct mwl8k_cmd_pkt *multicast_adr_cmd; 3138 struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast;
3139
3140 /*
3141 * AP firmware doesn't allow fine-grained control over
3142 * the receive filter.
3143 */
3144 if (priv->ap_fw) {
3145 *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
3146 kfree(cmd);
3147 return;
3148 }
3149
3150 /*
3151 * Enable hardware sniffer mode if FIF_CONTROL or
3152 * FIF_OTHER_BSS is requested.
3153 */
3154 if (*total_flags & (FIF_CONTROL | FIF_OTHER_BSS) &&
3155 mwl8k_configure_filter_sniffer(hw, changed_flags, total_flags)) {
3156 kfree(cmd);
3157 return;
3158 }
2696 3159
2697 /* Clear unsupported feature flags */ 3160 /* Clear unsupported feature flags */
2698 *total_flags &= FIF_BCN_PRBRESP_PROMISC; 3161 *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
2699 3162
2700 if (mwl8k_fw_lock(hw)) 3163 if (mwl8k_fw_lock(hw))
2701 return; 3164 return;
2702 3165
3166 if (priv->sniffer_enabled) {
3167 mwl8k_enable_sniffer(hw, 0);
3168 priv->sniffer_enabled = false;
3169 }
3170
2703 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 3171 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
2704 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 3172 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
3173 /*
3174 * Disable the BSS filter.
3175 */
2705 mwl8k_cmd_set_pre_scan(hw); 3176 mwl8k_cmd_set_pre_scan(hw);
2706 else { 3177 } else {
2707 u8 *bssid; 3178 u8 *bssid;
2708 3179
2709 bssid = "\x00\x00\x00\x00\x00\x00"; 3180 /*
3181 * Enable the BSS filter.
3182 *
3183 * If there is an active STA interface, use that
3184 * interface's BSSID, otherwise use a dummy one
3185 * (where the OUI part needs to be nonzero for
3186 * the BSSID to be accepted by POST_SCAN).
3187 */
3188 bssid = "\x01\x00\x00\x00\x00\x00";
2710 if (priv->vif != NULL) 3189 if (priv->vif != NULL)
2711 bssid = MWL8K_VIF(priv->vif)->bssid; 3190 bssid = MWL8K_VIF(priv->vif)->bssid;
2712 3191
@@ -2714,10 +3193,20 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
2714 } 3193 }
2715 } 3194 }
2716 3195
2717 multicast_adr_cmd = (void *)(unsigned long)multicast; 3196 /*
2718 if (multicast_adr_cmd != NULL) { 3197 * If FIF_ALLMULTI is being requested, throw away the command
2719 mwl8k_post_cmd(hw, multicast_adr_cmd); 3198 * packet that ->prepare_multicast() built and replace it with
2720 kfree(multicast_adr_cmd); 3199 * a command packet that enables reception of all multicast
3200 * packets.
3201 */
3202 if (*total_flags & FIF_ALLMULTI) {
3203 kfree(cmd);
3204 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
3205 }
3206
3207 if (cmd != NULL) {
3208 mwl8k_post_cmd(hw, cmd);
3209 kfree(cmd);
2721 } 3210 }
2722 3211
2723 mwl8k_fw_unlock(hw); 3212 mwl8k_fw_unlock(hw);
@@ -2762,7 +3251,7 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
2762 spin_lock_bh(&priv->tx_lock); 3251 spin_lock_bh(&priv->tx_lock);
2763 for (index = 0; index < MWL8K_TX_QUEUES; index++) { 3252 for (index = 0; index < MWL8K_TX_QUEUES; index++) {
2764 txq = priv->txq + index; 3253 txq = priv->txq + index;
2765 memcpy(&stats[index], &txq->tx_stats, 3254 memcpy(&stats[index], &txq->stats,
2766 sizeof(struct ieee80211_tx_queue_stats)); 3255 sizeof(struct ieee80211_tx_queue_stats));
2767 } 3256 }
2768 spin_unlock_bh(&priv->tx_lock); 3257 spin_unlock_bh(&priv->tx_lock);
@@ -2802,7 +3291,7 @@ static void mwl8k_tx_reclaim_handler(unsigned long data)
2802 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3291 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2803 mwl8k_txq_reclaim(hw, i, 0); 3292 mwl8k_txq_reclaim(hw, i, 0);
2804 3293
2805 if (priv->tx_wait != NULL && mwl8k_txq_busy(priv) == 0) { 3294 if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
2806 complete(priv->tx_wait); 3295 complete(priv->tx_wait);
2807 priv->tx_wait = NULL; 3296 priv->tx_wait = NULL;
2808 } 3297 }
@@ -2822,6 +3311,36 @@ static void mwl8k_finalize_join_worker(struct work_struct *work)
2822 priv->beacon_skb = NULL; 3311 priv->beacon_skb = NULL;
2823} 3312}
2824 3313
3314enum {
3315 MWL8687 = 0,
3316 MWL8366,
3317};
3318
3319static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3320 {
3321 .part_name = "88w8687",
3322 .helper_image = "mwl8k/helper_8687.fw",
3323 .fw_image = "mwl8k/fmimage_8687.fw",
3324 .rxd_ops = &rxd_8687_ops,
3325 .modes = BIT(NL80211_IFTYPE_STATION),
3326 },
3327 {
3328 .part_name = "88w8366",
3329 .helper_image = "mwl8k/helper_8366.fw",
3330 .fw_image = "mwl8k/fmimage_8366.fw",
3331 .rxd_ops = &rxd_8366_ops,
3332 .modes = 0,
3333 },
3334};
3335
3336static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3337 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
3338 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
3339 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
3340 { },
3341};
3342MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
3343
2825static int __devinit mwl8k_probe(struct pci_dev *pdev, 3344static int __devinit mwl8k_probe(struct pci_dev *pdev,
2826 const struct pci_device_id *id) 3345 const struct pci_device_id *id)
2827{ 3346{
@@ -2862,17 +3381,34 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2862 priv = hw->priv; 3381 priv = hw->priv;
2863 priv->hw = hw; 3382 priv->hw = hw;
2864 priv->pdev = pdev; 3383 priv->pdev = pdev;
3384 priv->device_info = &mwl8k_info_tbl[id->driver_data];
3385 priv->rxd_ops = priv->device_info->rxd_ops;
3386 priv->sniffer_enabled = false;
2865 priv->wmm_enabled = false; 3387 priv->wmm_enabled = false;
2866 priv->pending_tx_pkts = 0; 3388 priv->pending_tx_pkts = 0;
2867 strncpy(priv->name, MWL8K_NAME, sizeof(priv->name));
2868 3389
2869 SET_IEEE80211_DEV(hw, &pdev->dev); 3390 SET_IEEE80211_DEV(hw, &pdev->dev);
2870 pci_set_drvdata(pdev, hw); 3391 pci_set_drvdata(pdev, hw);
2871 3392
3393 priv->sram = pci_iomap(pdev, 0, 0x10000);
3394 if (priv->sram == NULL) {
3395 printk(KERN_ERR "%s: Cannot map device SRAM\n",
3396 wiphy_name(hw->wiphy));
3397 goto err_iounmap;
3398 }
3399
3400 /*
3401 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
3402 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
3403 */
2872 priv->regs = pci_iomap(pdev, 1, 0x10000); 3404 priv->regs = pci_iomap(pdev, 1, 0x10000);
2873 if (priv->regs == NULL) { 3405 if (priv->regs == NULL) {
2874 printk(KERN_ERR "%s: Cannot map device memory\n", priv->name); 3406 priv->regs = pci_iomap(pdev, 2, 0x10000);
2875 goto err_iounmap; 3407 if (priv->regs == NULL) {
3408 printk(KERN_ERR "%s: Cannot map device registers\n",
3409 wiphy_name(hw->wiphy));
3410 goto err_iounmap;
3411 }
2876 } 3412 }
2877 3413
2878 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels)); 3414 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
@@ -2897,7 +3433,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2897 3433
2898 hw->queues = MWL8K_TX_QUEUES; 3434 hw->queues = MWL8K_TX_QUEUES;
2899 3435
2900 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 3436 hw->wiphy->interface_modes = priv->device_info->modes;
2901 3437
2902 /* Set rssi and noise values to dBm */ 3438 /* Set rssi and noise values to dBm */
2903 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 3439 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
@@ -2916,11 +3452,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2916 mwl8k_tx_reclaim_handler, (unsigned long)hw); 3452 mwl8k_tx_reclaim_handler, (unsigned long)hw);
2917 tasklet_disable(&priv->tx_reclaim_task); 3453 tasklet_disable(&priv->tx_reclaim_task);
2918 3454
2919 /* Config workthread */
2920 priv->config_wq = create_singlethread_workqueue("mwl8k_config");
2921 if (priv->config_wq == NULL)
2922 goto err_iounmap;
2923
2924 /* Power management cookie */ 3455 /* Power management cookie */
2925 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); 3456 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
2926 if (priv->cookie == NULL) 3457 if (priv->cookie == NULL)
@@ -2934,11 +3465,12 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2934 mutex_init(&priv->fw_mutex); 3465 mutex_init(&priv->fw_mutex);
2935 priv->fw_mutex_owner = NULL; 3466 priv->fw_mutex_owner = NULL;
2936 priv->fw_mutex_depth = 0; 3467 priv->fw_mutex_depth = 0;
2937 priv->tx_wait = NULL;
2938 priv->hostcmd_wait = NULL; 3468 priv->hostcmd_wait = NULL;
2939 3469
2940 spin_lock_init(&priv->tx_lock); 3470 spin_lock_init(&priv->tx_lock);
2941 3471
3472 priv->tx_wait = NULL;
3473
2942 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 3474 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
2943 rc = mwl8k_txq_init(hw, i); 3475 rc = mwl8k_txq_init(hw, i);
2944 if (rc) 3476 if (rc)
@@ -2950,11 +3482,11 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2950 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL); 3482 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
2951 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK); 3483 iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
2952 3484
2953 rc = request_irq(priv->pdev->irq, &mwl8k_interrupt, 3485 rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
2954 IRQF_SHARED, MWL8K_NAME, hw); 3486 IRQF_SHARED, MWL8K_NAME, hw);
2955 if (rc) { 3487 if (rc) {
2956 printk(KERN_ERR "%s: failed to register IRQ handler\n", 3488 printk(KERN_ERR "%s: failed to register IRQ handler\n",
2957 priv->name); 3489 wiphy_name(hw->wiphy));
2958 goto err_free_queues; 3490 goto err_free_queues;
2959 } 3491 }
2960 3492
@@ -2962,16 +3494,18 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2962 mwl8k_hw_reset(priv); 3494 mwl8k_hw_reset(priv);
2963 3495
2964 /* Ask userland hotplug daemon for the device firmware */ 3496 /* Ask userland hotplug daemon for the device firmware */
2965 rc = mwl8k_request_firmware(priv, (u32)id->driver_data); 3497 rc = mwl8k_request_firmware(priv);
2966 if (rc) { 3498 if (rc) {
2967 printk(KERN_ERR "%s: Firmware files not found\n", priv->name); 3499 printk(KERN_ERR "%s: Firmware files not found\n",
3500 wiphy_name(hw->wiphy));
2968 goto err_free_irq; 3501 goto err_free_irq;
2969 } 3502 }
2970 3503
2971 /* Load firmware into hardware */ 3504 /* Load firmware into hardware */
2972 rc = mwl8k_load_firmware(priv); 3505 rc = mwl8k_load_firmware(hw);
2973 if (rc) { 3506 if (rc) {
2974 printk(KERN_ERR "%s: Cannot start firmware\n", priv->name); 3507 printk(KERN_ERR "%s: Cannot start firmware\n",
3508 wiphy_name(hw->wiphy));
2975 goto err_stop_firmware; 3509 goto err_stop_firmware;
2976 } 3510 }
2977 3511
@@ -2986,16 +3520,31 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2986 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3520 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2987 3521
2988 /* Get config data, mac addrs etc */ 3522 /* Get config data, mac addrs etc */
2989 rc = mwl8k_cmd_get_hw_spec(hw); 3523 if (priv->ap_fw) {
3524 rc = mwl8k_cmd_get_hw_spec_ap(hw);
3525 if (!rc)
3526 rc = mwl8k_cmd_set_hw_spec(hw);
3527 } else {
3528 rc = mwl8k_cmd_get_hw_spec_sta(hw);
3529 }
2990 if (rc) { 3530 if (rc) {
2991 printk(KERN_ERR "%s: Cannot initialise firmware\n", priv->name); 3531 printk(KERN_ERR "%s: Cannot initialise firmware\n",
3532 wiphy_name(hw->wiphy));
2992 goto err_stop_firmware; 3533 goto err_stop_firmware;
2993 } 3534 }
2994 3535
2995 /* Turn radio off */ 3536 /* Turn radio off */
2996 rc = mwl8k_cmd_802_11_radio_disable(hw); 3537 rc = mwl8k_cmd_802_11_radio_disable(hw);
2997 if (rc) { 3538 if (rc) {
2998 printk(KERN_ERR "%s: Cannot disable\n", priv->name); 3539 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
3540 goto err_stop_firmware;
3541 }
3542
3543 /* Clear MAC address */
3544 rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
3545 if (rc) {
3546 printk(KERN_ERR "%s: Cannot clear MAC address\n",
3547 wiphy_name(hw->wiphy));
2999 goto err_stop_firmware; 3548 goto err_stop_firmware;
3000 } 3549 }
3001 3550
@@ -3005,13 +3554,15 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3005 3554
3006 rc = ieee80211_register_hw(hw); 3555 rc = ieee80211_register_hw(hw);
3007 if (rc) { 3556 if (rc) {
3008 printk(KERN_ERR "%s: Cannot register device\n", priv->name); 3557 printk(KERN_ERR "%s: Cannot register device\n",
3558 wiphy_name(hw->wiphy));
3009 goto err_stop_firmware; 3559 goto err_stop_firmware;
3010 } 3560 }
3011 3561
3012 printk(KERN_INFO "%s: 88w%u v%d, %pM, firmware version %u.%u.%u.%u\n", 3562 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
3013 wiphy_name(hw->wiphy), priv->part_num, priv->hw_rev, 3563 wiphy_name(hw->wiphy), priv->device_info->part_name,
3014 hw->wiphy->perm_addr, 3564 priv->hw_rev, hw->wiphy->perm_addr,
3565 priv->ap_fw ? "AP" : "STA",
3015 (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, 3566 (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff,
3016 (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); 3567 (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff);
3017 3568
@@ -3038,8 +3589,8 @@ err_iounmap:
3038 if (priv->regs != NULL) 3589 if (priv->regs != NULL)
3039 pci_iounmap(pdev, priv->regs); 3590 pci_iounmap(pdev, priv->regs);
3040 3591
3041 if (priv->config_wq != NULL) 3592 if (priv->sram != NULL)
3042 destroy_workqueue(priv->config_wq); 3593 pci_iounmap(pdev, priv->sram);
3043 3594
3044 pci_set_drvdata(pdev, NULL); 3595 pci_set_drvdata(pdev, NULL);
3045 ieee80211_free_hw(hw); 3596 ieee80211_free_hw(hw);
@@ -3073,9 +3624,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3073 /* Remove tx reclaim tasklet */ 3624 /* Remove tx reclaim tasklet */
3074 tasklet_kill(&priv->tx_reclaim_task); 3625 tasklet_kill(&priv->tx_reclaim_task);
3075 3626
3076 /* Stop config thread */
3077 destroy_workqueue(priv->config_wq);
3078
3079 /* Stop hardware */ 3627 /* Stop hardware */
3080 mwl8k_hw_reset(priv); 3628 mwl8k_hw_reset(priv);
3081 3629
@@ -3088,10 +3636,10 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3088 3636
3089 mwl8k_rxq_deinit(hw, 0); 3637 mwl8k_rxq_deinit(hw, 0);
3090 3638
3091 pci_free_consistent(priv->pdev, 4, 3639 pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
3092 priv->cookie, priv->cookie_dma);
3093 3640
3094 pci_iounmap(pdev, priv->regs); 3641 pci_iounmap(pdev, priv->regs);
3642 pci_iounmap(pdev, priv->sram);
3095 pci_set_drvdata(pdev, NULL); 3643 pci_set_drvdata(pdev, NULL);
3096 ieee80211_free_hw(hw); 3644 ieee80211_free_hw(hw);
3097 pci_release_regions(pdev); 3645 pci_release_regions(pdev);
@@ -3100,7 +3648,7 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3100 3648
3101static struct pci_driver mwl8k_driver = { 3649static struct pci_driver mwl8k_driver = {
3102 .name = MWL8K_NAME, 3650 .name = MWL8K_NAME,
3103 .id_table = mwl8k_table, 3651 .id_table = mwl8k_pci_id_table,
3104 .probe = mwl8k_probe, 3652 .probe = mwl8k_probe,
3105 .remove = __devexit_p(mwl8k_remove), 3653 .remove = __devexit_p(mwl8k_remove),
3106 .shutdown = __devexit_p(mwl8k_shutdown), 3654 .shutdown = __devexit_p(mwl8k_shutdown),
@@ -3118,3 +3666,8 @@ static void __exit mwl8k_exit(void)
3118 3666
3119module_init(mwl8k_init); 3667module_init(mwl8k_init);
3120module_exit(mwl8k_exit); 3668module_exit(mwl8k_exit);
3669
3670MODULE_DESCRIPTION(MWL8K_DESC);
3671MODULE_VERSION(MWL8K_VERSION);
3672MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
3673MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 83b635fd7784..e2a2c18920aa 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -1,8 +1,10 @@
1config HERMES 1config HERMES
2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" 2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
3 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211 3 depends on (PPC_PMAC || PCI || PCMCIA)
4 depends on CFG80211 4 depends on CFG80211 && CFG80211_WEXT
5 select WIRELESS_EXT 5 select WIRELESS_EXT
6 select WEXT_SPY
7 select WEXT_PRIV
6 select FW_LOADER 8 select FW_LOADER
7 select CRYPTO 9 select CRYPTO
8 select CRYPTO_MICHAEL_MIC 10 select CRYPTO_MICHAEL_MIC
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 1257250a1e22..cfa72962052b 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -28,6 +28,12 @@ static const struct fw_info orinoco_fw[] = {
28 { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, 28 { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
29 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 } 29 { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 }
30}; 30};
31MODULE_FIRMWARE("agere_sta_fw.bin");
32MODULE_FIRMWARE("agere_ap_fw.bin");
33MODULE_FIRMWARE("prism_sta_fw.bin");
34MODULE_FIRMWARE("prism_ap_fw.bin");
35MODULE_FIRMWARE("symbol_sp24t_prim_fw");
36MODULE_FIRMWARE("symbol_sp24t_sec_fw");
31 37
32/* Structure used to access fields in FW 38/* Structure used to access fields in FW
33 * Make sure LE decoding macros are used 39 * Make sure LE decoding macros are used
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 359652d35e63..404830f47ab2 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -60,8 +60,15 @@ static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
60/* Set priv->firmware type, determine firmware properties 60/* Set priv->firmware type, determine firmware properties
61 * This function can be called before we have registerred with netdev, 61 * This function can be called before we have registerred with netdev,
62 * so all errors go out with dev_* rather than printk 62 * so all errors go out with dev_* rather than printk
63 *
64 * If non-NULL stores a firmware description in fw_name.
65 * If non-NULL stores a HW version in hw_ver
66 *
67 * These are output via generic cfg80211 ethtool support.
63 */ 68 */
64int determine_fw_capabilities(struct orinoco_private *priv) 69int determine_fw_capabilities(struct orinoco_private *priv,
70 char *fw_name, size_t fw_name_len,
71 u32 *hw_ver)
65{ 72{
66 struct device *dev = priv->dev; 73 struct device *dev = priv->dev;
67 hermes_t *hw = &priv->hw; 74 hermes_t *hw = &priv->hw;
@@ -85,6 +92,12 @@ int determine_fw_capabilities(struct orinoco_private *priv)
85 dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n", 92 dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
86 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor); 93 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
87 94
95 if (hw_ver)
96 *hw_ver = (((nic_id.id & 0xff) << 24) |
97 ((nic_id.variant & 0xff) << 16) |
98 ((nic_id.major & 0xff) << 8) |
99 (nic_id.minor & 0xff));
100
88 priv->firmware_type = determine_firmware_type(&nic_id); 101 priv->firmware_type = determine_firmware_type(&nic_id);
89 102
90 /* Get the firmware version */ 103 /* Get the firmware version */
@@ -135,8 +148,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
135 case FIRMWARE_TYPE_AGERE: 148 case FIRMWARE_TYPE_AGERE:
136 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout, 149 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
137 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */ 150 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
138 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 151 if (fw_name)
139 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor); 152 snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
153 sta_id.major, sta_id.minor);
140 154
141 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor; 155 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
142 156
@@ -185,8 +199,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
185 tmp[SYMBOL_MAX_VER_LEN] = '\0'; 199 tmp[SYMBOL_MAX_VER_LEN] = '\0';
186 } 200 }
187 201
188 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 202 if (fw_name)
189 "Symbol %s", tmp); 203 snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
190 204
191 priv->has_ibss = (firmver >= 0x20000); 205 priv->has_ibss = (firmver >= 0x20000);
192 priv->has_wep = (firmver >= 0x15012); 206 priv->has_wep = (firmver >= 0x15012);
@@ -224,9 +238,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
224 * different and less well tested */ 238 * different and less well tested */
225 /* D-Link MAC : 00:40:05:* */ 239 /* D-Link MAC : 00:40:05:* */
226 /* Addtron MAC : 00:90:D1:* */ 240 /* Addtron MAC : 00:90:D1:* */
227 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 241 if (fw_name)
228 "Intersil %d.%d.%d", sta_id.major, sta_id.minor, 242 snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
229 sta_id.variant); 243 sta_id.major, sta_id.minor, sta_id.variant);
230 244
231 firmver = ((unsigned long)sta_id.major << 16) | 245 firmver = ((unsigned long)sta_id.major << 16) |
232 ((unsigned long)sta_id.minor << 8) | sta_id.variant; 246 ((unsigned long)sta_id.minor << 8) | sta_id.variant;
@@ -245,7 +259,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
245 } 259 }
246 break; 260 break;
247 } 261 }
248 dev_info(dev, "Firmware determined as %s\n", priv->fw_name); 262 if (fw_name)
263 dev_info(dev, "Firmware determined as %s\n", fw_name);
249 264
250 return 0; 265 return 0;
251} 266}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8df6e8752be6..e2f7fdc4d45a 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -24,7 +24,8 @@
24struct orinoco_private; 24struct orinoco_private;
25struct dev_addr_list; 25struct dev_addr_list;
26 26
27int determine_fw_capabilities(struct orinoco_private *priv); 27int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
28 size_t fw_name_len, u32 *hw_ver);
28int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr); 29int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr);
29int orinoco_hw_allocate_fid(struct orinoco_private *priv); 30int orinoco_hw_allocate_fid(struct orinoco_private *priv);
30int orinoco_get_bitratemode(int bitrate, int automatic); 31int orinoco_get_bitratemode(int bitrate, int automatic);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 7a32bcb0c037..753a1804eee7 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -83,7 +83,6 @@
83#include <linux/device.h> 83#include <linux/device.h>
84#include <linux/netdevice.h> 84#include <linux/netdevice.h>
85#include <linux/etherdevice.h> 85#include <linux/etherdevice.h>
86#include <linux/ethtool.h>
87#include <linux/suspend.h> 86#include <linux/suspend.h>
88#include <linux/if_arp.h> 87#include <linux/if_arp.h>
89#include <linux/wireless.h> 88#include <linux/wireless.h>
@@ -162,8 +161,6 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
162 | HERMES_EV_WTERR | HERMES_EV_INFO \ 161 | HERMES_EV_WTERR | HERMES_EV_INFO \
163 | HERMES_EV_INFDROP) 162 | HERMES_EV_INFDROP)
164 163
165static const struct ethtool_ops orinoco_ethtool_ops;
166
167/********************************************************************/ 164/********************************************************************/
168/* Data types */ 165/* Data types */
169/********************************************************************/ 166/********************************************************************/
@@ -1994,7 +1991,9 @@ int orinoco_init(struct orinoco_private *priv)
1994 goto out; 1991 goto out;
1995 } 1992 }
1996 1993
1997 err = determine_fw_capabilities(priv); 1994 err = determine_fw_capabilities(priv, wiphy->fw_version,
1995 sizeof(wiphy->fw_version),
1996 &wiphy->hw_version);
1998 if (err != 0) { 1997 if (err != 0) {
1999 dev_err(dev, "Incompatible firmware, aborting\n"); 1998 dev_err(dev, "Incompatible firmware, aborting\n");
2000 goto out; 1999 goto out;
@@ -2010,7 +2009,9 @@ int orinoco_init(struct orinoco_private *priv)
2010 priv->do_fw_download = 0; 2009 priv->do_fw_download = 0;
2011 2010
2012 /* Check firmware version again */ 2011 /* Check firmware version again */
2013 err = determine_fw_capabilities(priv); 2012 err = determine_fw_capabilities(priv, wiphy->fw_version,
2013 sizeof(wiphy->fw_version),
2014 &wiphy->hw_version);
2014 if (err != 0) { 2015 if (err != 0) {
2015 dev_err(dev, "Incompatible firmware, aborting\n"); 2016 dev_err(dev, "Incompatible firmware, aborting\n");
2016 goto out; 2017 goto out;
@@ -2212,7 +2213,6 @@ int orinoco_if_add(struct orinoco_private *priv,
2212 dev->ieee80211_ptr = wdev; 2213 dev->ieee80211_ptr = wdev;
2213 dev->netdev_ops = &orinoco_netdev_ops; 2214 dev->netdev_ops = &orinoco_netdev_ops;
2214 dev->watchdog_timeo = HZ; /* 1 second timeout */ 2215 dev->watchdog_timeo = HZ; /* 1 second timeout */
2215 dev->ethtool_ops = &orinoco_ethtool_ops;
2216 dev->wireless_handlers = &orinoco_handler_def; 2216 dev->wireless_handlers = &orinoco_handler_def;
2217#ifdef WIRELESS_SPY 2217#ifdef WIRELESS_SPY
2218 dev->wireless_data = &priv->wireless_data; 2218 dev->wireless_data = &priv->wireless_data;
@@ -2225,6 +2225,7 @@ int orinoco_if_add(struct orinoco_private *priv,
2225 netif_carrier_off(dev); 2225 netif_carrier_off(dev);
2226 2226
2227 memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); 2227 memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
2228 memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
2228 2229
2229 dev->base_addr = base_addr; 2230 dev->base_addr = base_addr;
2230 dev->irq = irq; 2231 dev->irq = irq;
@@ -2348,27 +2349,6 @@ void orinoco_down(struct orinoco_private *priv)
2348} 2349}
2349EXPORT_SYMBOL(orinoco_down); 2350EXPORT_SYMBOL(orinoco_down);
2350 2351
2351static void orinoco_get_drvinfo(struct net_device *dev,
2352 struct ethtool_drvinfo *info)
2353{
2354 struct orinoco_private *priv = ndev_priv(dev);
2355
2356 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
2357 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
2358 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
2359 if (dev->dev.parent)
2360 strncpy(info->bus_info, dev_name(dev->dev.parent),
2361 sizeof(info->bus_info) - 1);
2362 else
2363 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
2364 "PCMCIA %p", priv->hw.iobase);
2365}
2366
2367static const struct ethtool_ops orinoco_ethtool_ops = {
2368 .get_drvinfo = orinoco_get_drvinfo,
2369 .get_link = ethtool_op_get_link,
2370};
2371
2372/********************************************************************/ 2352/********************************************************************/
2373/* Module initialization */ 2353/* Module initialization */
2374/********************************************************************/ 2354/********************************************************************/
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 9ac6f1dda4b0..665ef56f8382 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -93,7 +93,6 @@ struct orinoco_private {
93 93
94 /* Capabilities of the hardware/firmware */ 94 /* Capabilities of the hardware/firmware */
95 fwtype_t firmware_type; 95 fwtype_t firmware_type;
96 char fw_name[32];
97 int ibss_port; 96 int ibss_port;
98 int nicbuf_size; 97 int nicbuf_size;
99 u16 channel_mask; 98 u16 channel_mask;
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index b45d6a4ed1e8..b0342a520bf1 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,6 +1,6 @@
1config P54_COMMON 1config P54_COMMON
2 tristate "Softmac Prism54 support" 2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 This is common code for isl38xx/stlc45xx based modules. 6 This is common code for isl38xx/stlc45xx based modules.
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 0efe67deedee..8e3818f6832e 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -126,7 +126,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
126 int ret = -ENOMEM; 126 int ret = -ENOMEM;
127 127
128 if ((!list->entries) || (!list->band_channel_num[band])) 128 if ((!list->entries) || (!list->band_channel_num[band]))
129 return 0; 129 return -EINVAL;
130 130
131 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 131 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
132 if (!tmp) 132 if (!tmp)
@@ -158,6 +158,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
158 (list->channels[i].data & CHAN_HAS_CURVE ? "" : 158 (list->channels[i].data & CHAN_HAS_CURVE ? "" :
159 " [curve data]"), 159 " [curve data]"),
160 list->channels[i].index, list->channels[i].freq); 160 list->channels[i].index, list->channels[i].freq);
161 continue;
161 } 162 }
162 163
163 tmp->channels[j].band = list->channels[i].band; 164 tmp->channels[j].band = list->channels[i].band;
@@ -165,7 +166,16 @@ static int p54_generate_band(struct ieee80211_hw *dev,
165 j++; 166 j++;
166 } 167 }
167 168
168 tmp->n_channels = list->band_channel_num[band]; 169 if (j == 0) {
170 printk(KERN_ERR "%s: Disabling totally damaged %s band.\n",
171 wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ?
172 "2 GHz" : "5 GHz");
173
174 ret = -ENODATA;
175 goto err_out;
176 }
177
178 tmp->n_channels = j;
169 old = priv->band_table[band]; 179 old = priv->band_table[band];
170 priv->band_table[band] = tmp; 180 priv->band_table[band] = tmp;
171 if (old) { 181 if (old) {
@@ -228,13 +238,13 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
228 struct p54_common *priv = dev->priv; 238 struct p54_common *priv = dev->priv;
229 struct p54_channel_list *list; 239 struct p54_channel_list *list;
230 unsigned int i, j, max_channel_num; 240 unsigned int i, j, max_channel_num;
231 int ret = -ENOMEM; 241 int ret = 0;
232 u16 freq; 242 u16 freq;
233 243
234 if ((priv->iq_autocal_len != priv->curve_data->entries) || 244 if ((priv->iq_autocal_len != priv->curve_data->entries) ||
235 (priv->iq_autocal_len != priv->output_limit->entries)) 245 (priv->iq_autocal_len != priv->output_limit->entries))
236 printk(KERN_ERR "%s: EEPROM is damaged... you may not be able" 246 printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. "
237 "to use all channels with this device.\n", 247 "You may not be able to use all channels.\n",
238 wiphy_name(dev->wiphy)); 248 wiphy_name(dev->wiphy));
239 249
240 max_channel_num = max_t(unsigned int, priv->output_limit->entries, 250 max_channel_num = max_t(unsigned int, priv->output_limit->entries,
@@ -243,8 +253,10 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
243 priv->curve_data->entries); 253 priv->curve_data->entries);
244 254
245 list = kzalloc(sizeof(*list), GFP_KERNEL); 255 list = kzalloc(sizeof(*list), GFP_KERNEL);
246 if (!list) 256 if (!list) {
257 ret = -ENOMEM;
247 goto free; 258 goto free;
259 }
248 260
249 list->max_entries = max_channel_num; 261 list->max_entries = max_channel_num;
250 list->channels = kzalloc(sizeof(struct p54_channel_entry) * 262 list->channels = kzalloc(sizeof(struct p54_channel_entry) *
@@ -282,13 +294,8 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
282 p54_compare_channels, NULL); 294 p54_compare_channels, NULL);
283 295
284 for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) { 296 for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
285 if (list->band_channel_num[i]) { 297 if (p54_generate_band(dev, list, i) == 0)
286 ret = p54_generate_band(dev, list, i);
287 if (ret)
288 goto free;
289
290 j++; 298 j++;
291 }
292 } 299 }
293 if (j == 0) { 300 if (j == 0) {
294 /* no useable band available. */ 301 /* no useable band available. */
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 4d486bf9f725..18012dbfb45d 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -579,7 +579,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
579 * For now, disable PS by default because it affects 579 * For now, disable PS by default because it affects
580 * link stability significantly. 580 * link stability significantly.
581 */ 581 */
582 dev->wiphy->ps_default = false; 582 dev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
583 583
584 mutex_init(&priv->conf_mutex); 584 mutex_init(&priv->conf_mutex);
585 mutex_init(&priv->eeprom_mutex); 585 mutex_init(&priv->eeprom_mutex);
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index d348c265e867..a15962a19b2a 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -411,7 +411,7 @@ static int p54p_open(struct ieee80211_hw *dev)
411 int err; 411 int err;
412 412
413 init_completion(&priv->boot_comp); 413 init_completion(&priv->boot_comp);
414 err = request_irq(priv->pdev->irq, &p54p_interrupt, 414 err = request_irq(priv->pdev->irq, p54p_interrupt,
415 IRQF_SHARED, "p54pci", dev); 415 IRQF_SHARED, "p54pci", dev);
416 if (err) { 416 if (err) {
417 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); 417 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2505be56ae39..a3ba3539db02 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -41,6 +41,9 @@
41#define ISL3877_IMAGE_FILE "isl3877" 41#define ISL3877_IMAGE_FILE "isl3877"
42#define ISL3886_IMAGE_FILE "isl3886" 42#define ISL3886_IMAGE_FILE "isl3886"
43#define ISL3890_IMAGE_FILE "isl3890" 43#define ISL3890_IMAGE_FILE "isl3890"
44MODULE_FIRMWARE(ISL3877_IMAGE_FILE);
45MODULE_FIRMWARE(ISL3886_IMAGE_FILE);
46MODULE_FIRMWARE(ISL3890_IMAGE_FILE);
44 47
45static int prism54_bring_down(islpci_private *); 48static int prism54_bring_down(islpci_private *);
46static int islpci_alloc_memory(islpci_private *); 49static int islpci_alloc_memory(islpci_private *);
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index 83d366258c81..e4f2bb7368f2 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -181,7 +181,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
181 isl38xx_disable_interrupts(priv->device_base); 181 isl38xx_disable_interrupts(priv->device_base);
182 182
183 /* request for the interrupt before uploading the firmware */ 183 /* request for the interrupt before uploading the firmware */
184 rvalue = request_irq(pdev->irq, &islpci_interrupt, 184 rvalue = request_irq(pdev->irq, islpci_interrupt,
185 IRQF_SHARED, ndev->name, priv); 185 IRQF_SHARED, ndev->name, priv);
186 186
187 if (rvalue) { 187 if (rvalue) {
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 1c88c2ea59aa..5ee9d2a19360 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2074,7 +2074,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2074 del_timer(&local->timer); 2074 del_timer(&local->timer);
2075 local->timer.expires = jiffies + HZ * 5; 2075 local->timer.expires = jiffies + HZ * 5;
2076 local->timer.data = (long)local; 2076 local->timer.data = (long)local;
2077 if (status == CCS_START_NETWORK) { 2077 if (cmd == CCS_START_NETWORK) {
2078 DEBUG(0, 2078 DEBUG(0,
2079 "ray_cs interrupt network \"%s\" start failed\n", 2079 "ray_cs interrupt network \"%s\" start failed\n",
2080 local->sparm.b4.a_current_ess_id); 2080 local->sparm.b4.a_current_ess_id);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 54175b6fa86c..2ecbedb26e15 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -83,11 +83,11 @@ MODULE_PARM_DESC(roamdelta,
83 "set roaming tendency: 0=aggressive, 1=moderate, " 83 "set roaming tendency: 0=aggressive, 1=moderate, "
84 "2=conservative (default: moderate)"); 84 "2=conservative (default: moderate)");
85 85
86static int modparam_workaround_interval = 500; 86static int modparam_workaround_interval;
87module_param_named(workaround_interval, modparam_workaround_interval, 87module_param_named(workaround_interval, modparam_workaround_interval,
88 int, 0444); 88 int, 0444);
89MODULE_PARM_DESC(workaround_interval, 89MODULE_PARM_DESC(workaround_interval,
90 "set stall workaround interval in msecs (default: 500)"); 90 "set stall workaround interval in msecs (0=disabled) (default: 0)");
91 91
92 92
93/* various RNDIS OID defs */ 93/* various RNDIS OID defs */
@@ -733,12 +733,13 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
733 le32_to_cpu(u.get_c->status)); 733 le32_to_cpu(u.get_c->status));
734 734
735 if (ret == 0) { 735 if (ret == 0) {
736 memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
737
736 ret = le32_to_cpu(u.get_c->len); 738 ret = le32_to_cpu(u.get_c->len);
737 if (ret > *len) 739 if (ret > *len)
738 *len = ret; 740 *len = ret;
739 memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
740 ret = rndis_error_status(u.get_c->status);
741 741
742 ret = rndis_error_status(u.get_c->status);
742 if (ret < 0) 743 if (ret < 0)
743 devdbg(dev, "rndis_query_oid(%s): device returned " 744 devdbg(dev, "rndis_query_oid(%s): device returned "
744 "error, 0x%08x (%d)", oid_to_string(oid), 745 "error, 0x%08x (%d)", oid_to_string(oid),
@@ -1072,6 +1073,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
1072 auth_mode = NDIS_80211_AUTH_SHARED; 1073 auth_mode = NDIS_80211_AUTH_SHARED;
1073 else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM) 1074 else if (auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM)
1074 auth_mode = NDIS_80211_AUTH_OPEN; 1075 auth_mode = NDIS_80211_AUTH_OPEN;
1076 else if (auth_type == NL80211_AUTHTYPE_AUTOMATIC)
1077 auth_mode = NDIS_80211_AUTH_AUTO_SWITCH;
1075 else 1078 else
1076 return -ENOTSUPP; 1079 return -ENOTSUPP;
1077 1080
@@ -2547,7 +2550,7 @@ static void rndis_device_poller(struct work_struct *work)
2547 /* Workaround transfer stalls on poor quality links. 2550 /* Workaround transfer stalls on poor quality links.
2548 * TODO: find right way to fix these stalls (as stalls do not happen 2551 * TODO: find right way to fix these stalls (as stalls do not happen
2549 * with ndiswrapper/windows driver). */ 2552 * with ndiswrapper/windows driver). */
2550 if (priv->last_qual <= 25) { 2553 if (priv->param_workaround_interval > 0 && priv->last_qual <= 25) {
2551 /* Decrease stats worker interval to catch stalls. 2554 /* Decrease stats worker interval to catch stalls.
2552 * faster. Faster than 400-500ms causes packet loss, 2555 * faster. Faster than 400-500ms causes packet loss,
2553 * Slower doesn't catch stalls fast enough. 2556 * Slower doesn't catch stalls fast enough.
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ed1f997e3521..bf60689aaabb 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
1menuconfig RT2X00 1menuconfig RT2X00
2 tristate "Ralink driver support" 2 tristate "Ralink driver support"
3 depends on MAC80211 && WLAN_80211 3 depends on MAC80211
4 ---help--- 4 ---help---
5 This will enable the support for the Ralink drivers, 5 This will enable the support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
@@ -53,6 +53,36 @@ config RT61PCI
53 53
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI
57 tristate
58 depends on PCI
59 default y
60
61config RT2800PCI_SOC
62 tristate
63 depends on RALINK_RT288X || RALINK_RT305X
64 default y
65
66config RT2800PCI
67 tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)"
68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
69 select RT2800_LIB
70 select RT2X00_LIB_PCI if RT2800PCI_PCI
71 select RT2X00_LIB_SOC if RT2800PCI_SOC
72 select RT2X00_LIB_HT
73 select RT2X00_LIB_FIRMWARE
74 select RT2X00_LIB_CRYPTO
75 select CRC_CCITT
76 select EEPROM_93CX6
77 ---help---
78 This adds support for rt2800 wireless chipset family.
79 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
80
81 This driver is non-functional at the moment and is intended for
82 developers.
83
84 When compiled as a module, this driver will be called "rt2800pci.ko".
85
56config RT2500USB 86config RT2500USB
57 tristate "Ralink rt2500 (USB) support" 87 tristate "Ralink rt2500 (USB) support"
58 depends on USB 88 depends on USB
@@ -78,8 +108,9 @@ config RT73USB
78 When compiled as a module, this driver will be called rt73usb. 108 When compiled as a module, this driver will be called rt73usb.
79 109
80config RT2800USB 110config RT2800USB
81 tristate "Ralink rt2800 (USB) support" 111 tristate "Ralink rt2800 (USB) support (EXPERIMENTAL)"
82 depends on USB && EXPERIMENTAL 112 depends on USB && EXPERIMENTAL
113 select RT2800_LIB
83 select RT2X00_LIB_USB 114 select RT2X00_LIB_USB
84 select RT2X00_LIB_HT 115 select RT2X00_LIB_HT
85 select RT2X00_LIB_FIRMWARE 116 select RT2X00_LIB_FIRMWARE
@@ -89,12 +120,23 @@ config RT2800USB
89 This adds experimental support for rt2800 wireless chipset family. 120 This adds experimental support for rt2800 wireless chipset family.
90 Supported chips: RT2770, RT2870 & RT3070. 121 Supported chips: RT2770, RT2870 & RT3070.
91 122
123 Known issues:
124 - support for RT2870 chips doesn't work with 802.11n APs yet
125 - support for RT3070 chips is non-functional at the moment
126
92 When compiled as a module, this driver will be called "rt2800usb.ko". 127 When compiled as a module, this driver will be called "rt2800usb.ko".
93 128
129config RT2800_LIB
130 tristate
131
94config RT2X00_LIB_PCI 132config RT2X00_LIB_PCI
95 tristate 133 tristate
96 select RT2X00_LIB 134 select RT2X00_LIB
97 135
136config RT2X00_LIB_SOC
137 tristate
138 select RT2X00_LIB
139
98config RT2X00_LIB_USB 140config RT2X00_LIB_USB
99 tristate 141 tristate
100 select RT2X00_LIB 142 select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 13043ea97667..971339858297 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -11,10 +11,13 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_HT) += rt2x00ht.o
11 11
12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o 12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o 13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
14obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o 15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
16obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
15obj-$(CONFIG_RT2400PCI) += rt2400pci.o 17obj-$(CONFIG_RT2400PCI) += rt2400pci.o
16obj-$(CONFIG_RT2500PCI) += rt2500pci.o 18obj-$(CONFIG_RT2500PCI) += rt2500pci.o
17obj-$(CONFIG_RT61PCI) += rt61pci.o 19obj-$(CONFIG_RT61PCI) += rt61pci.o
20obj-$(CONFIG_RT2800PCI) += rt2800pci.o
18obj-$(CONFIG_RT2500USB) += rt2500usb.o 21obj-$(CONFIG_RT2500USB) += rt2500usb.o
19obj-$(CONFIG_RT73USB) += rt73usb.o 22obj-$(CONFIG_RT73USB) += rt73usb.o
20obj-$(CONFIG_RT2800USB) += rt2800usb.o 23obj-$(CONFIG_RT2800USB) += rt2800usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 798f625e38f7..6e68bc7efd4e 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -1341,6 +1341,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1341 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1341 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1342 rt2x00pci_register_read(rt2x00dev, CSR0, &reg); 1342 rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
1343 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1343 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1344 rt2x00_print_chip(rt2x00dev);
1344 1345
1345 if (!rt2x00_rf(&rt2x00dev->chip, RF2420) && 1346 if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
1346 !rt2x00_rf(&rt2x00dev->chip, RF2421)) { 1347 !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index ccd644104ad1..6c21ef66dfe0 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2e872ac69826..9a31e5e7b8df 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -1505,6 +1505,7 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1505 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1505 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1506 rt2x00pci_register_read(rt2x00dev, CSR0, &reg); 1506 rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
1507 rt2x00_set_chip_rf(rt2x00dev, value, reg); 1507 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1508 rt2x00_print_chip(rt2x00dev);
1508 1509
1509 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) && 1510 if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
1510 !rt2x00_rf(&rt2x00dev->chip, RF2523) && 1511 !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 54d37957883c..b0075674c09b 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 22dd6d9e2981..b2de43e4f656 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -716,139 +716,6 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
716} 716}
717 717
718/* 718/*
719 * NOTE: This function is directly ported from legacy driver, but
720 * despite it being declared it was never called. Although link tuning
721 * sounds like a good idea, and usually works well for the other drivers,
722 * it does _not_ work with rt2500usb. Enabling this function will result
723 * in TX capabilities only until association kicks in. Immediately
724 * after the successful association all TX frames will be kept in the
725 * hardware queue and never transmitted.
726 */
727#if 0
728static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
729{
730 int rssi = rt2x00_get_link_rssi(&rt2x00dev->link);
731 u16 bbp_thresh;
732 u16 vgc_bound;
733 u16 sens;
734 u16 r24;
735 u16 r25;
736 u16 r61;
737 u16 r17_sens;
738 u8 r17;
739 u8 up_bound;
740 u8 low_bound;
741
742 /*
743 * Read current r17 value, as well as the sensitivity values
744 * for the r17 register.
745 */
746 rt2500usb_bbp_read(rt2x00dev, 17, &r17);
747 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &r17_sens);
748
749 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &vgc_bound);
750 up_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCUPPER);
751 low_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCLOWER);
752
753 /*
754 * If we are not associated, we should go straight to the
755 * dynamic CCA tuning.
756 */
757 if (!rt2x00dev->intf_associated)
758 goto dynamic_cca_tune;
759
760 /*
761 * Determine the BBP tuning threshold and correctly
762 * set BBP 24, 25 and 61.
763 */
764 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE, &bbp_thresh);
765 bbp_thresh = rt2x00_get_field16(bbp_thresh, EEPROM_BBPTUNE_THRESHOLD);
766
767 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &r24);
768 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25, &r25);
769 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61, &r61);
770
771 if ((rssi + bbp_thresh) > 0) {
772 r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_HIGH);
773 r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_HIGH);
774 r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_HIGH);
775 } else {
776 r24 = rt2x00_get_field16(r24, EEPROM_BBPTUNE_R24_LOW);
777 r25 = rt2x00_get_field16(r25, EEPROM_BBPTUNE_R25_LOW);
778 r61 = rt2x00_get_field16(r61, EEPROM_BBPTUNE_R61_LOW);
779 }
780
781 rt2500usb_bbp_write(rt2x00dev, 24, r24);
782 rt2500usb_bbp_write(rt2x00dev, 25, r25);
783 rt2500usb_bbp_write(rt2x00dev, 61, r61);
784
785 /*
786 * A too low RSSI will cause too much false CCA which will
787 * then corrupt the R17 tuning. To remidy this the tuning should
788 * be stopped (While making sure the R17 value will not exceed limits)
789 */
790 if (rssi >= -40) {
791 if (r17 != 0x60)
792 rt2500usb_bbp_write(rt2x00dev, 17, 0x60);
793 return;
794 }
795
796 /*
797 * Special big-R17 for short distance
798 */
799 if (rssi >= -58) {
800 sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_LOW);
801 if (r17 != sens)
802 rt2500usb_bbp_write(rt2x00dev, 17, sens);
803 return;
804 }
805
806 /*
807 * Special mid-R17 for middle distance
808 */
809 if (rssi >= -74) {
810 sens = rt2x00_get_field16(r17_sens, EEPROM_BBPTUNE_R17_HIGH);
811 if (r17 != sens)
812 rt2500usb_bbp_write(rt2x00dev, 17, sens);
813 return;
814 }
815
816 /*
817 * Leave short or middle distance condition, restore r17
818 * to the dynamic tuning range.
819 */
820 low_bound = 0x32;
821 if (rssi < -77)
822 up_bound -= (-77 - rssi);
823
824 if (up_bound < low_bound)
825 up_bound = low_bound;
826
827 if (r17 > up_bound) {
828 rt2500usb_bbp_write(rt2x00dev, 17, up_bound);
829 rt2x00dev->link.vgc_level = up_bound;
830 return;
831 }
832
833dynamic_cca_tune:
834
835 /*
836 * R17 is inside the dynamic tuning range,
837 * start tuning the link based on the false cca counter.
838 */
839 if (rt2x00dev->link.qual.false_cca > 512 && r17 < up_bound) {
840 rt2500usb_bbp_write(rt2x00dev, 17, ++r17);
841 rt2x00dev->link.vgc_level = r17;
842 } else if (rt2x00dev->link.qual.false_cca < 100 && r17 > low_bound) {
843 rt2500usb_bbp_write(rt2x00dev, 17, --r17);
844 rt2x00dev->link.vgc_level = r17;
845 }
846}
847#else
848#define rt2500usb_link_tuner NULL
849#endif
850
851/*
852 * Initialization functions. 719 * Initialization functions.
853 */ 720 */
854static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) 721static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -1542,6 +1409,7 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1542 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1409 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1543 rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1410 rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1544 rt2x00_set_chip(rt2x00dev, RT2570, value, reg); 1411 rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
1412 rt2x00_print_chip(rt2x00dev);
1545 1413
1546 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) || 1414 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
1547 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { 1415 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
@@ -1910,7 +1778,6 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1910 .rfkill_poll = rt2500usb_rfkill_poll, 1778 .rfkill_poll = rt2500usb_rfkill_poll,
1911 .link_stats = rt2500usb_link_stats, 1779 .link_stats = rt2500usb_link_stats,
1912 .reset_tuner = rt2500usb_reset_tuner, 1780 .reset_tuner = rt2500usb_reset_tuner,
1913 .link_tuner = rt2500usb_link_tuner,
1914 .write_tx_desc = rt2500usb_write_tx_desc, 1781 .write_tx_desc = rt2500usb_write_tx_desc,
1915 .write_tx_data = rt2x00usb_write_tx_data, 1782 .write_tx_data = rt2x00usb_write_tx_data,
1916 .write_beacon = rt2500usb_write_beacon, 1783 .write_beacon = rt2500usb_write_beacon,
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index b01edca42583..341a70454635 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
new file mode 100644
index 000000000000..c5fe867665e6
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -0,0 +1,1852 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
6 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
7 Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
8 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
9 Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
10 <http://rt2x00.serialmonkey.com>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the
24 Free Software Foundation, Inc.,
25 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27
28/*
29 Module: rt2800
30 Abstract: Data structures and registers for the rt2800 modules.
31 Supported chipsets: RT2800E, RT2800ED & RT2800U.
32 */
33
34#ifndef RT2800_H
35#define RT2800_H
36
37/*
38 * RF chip defines.
39 *
40 * RF2820 2.4G 2T3R
41 * RF2850 2.4G/5G 2T3R
42 * RF2720 2.4G 1T2R
43 * RF2750 2.4G/5G 1T2R
44 * RF3020 2.4G 1T1R
45 * RF2020 2.4G B/G
46 * RF3021 2.4G 1T2R
47 * RF3022 2.4G 2T2R
48 * RF3052 2.4G 2T2R
49 */
50#define RF2820 0x0001
51#define RF2850 0x0002
52#define RF2720 0x0003
53#define RF2750 0x0004
54#define RF3020 0x0005
55#define RF2020 0x0006
56#define RF3021 0x0007
57#define RF3022 0x0008
58#define RF3052 0x0009
59
60/*
61 * Chipset version.
62 */
63#define RT2860C_VERSION 0x28600100
64#define RT2860D_VERSION 0x28600101
65#define RT2880E_VERSION 0x28720200
66#define RT2883_VERSION 0x28830300
67#define RT3070_VERSION 0x30700200
68
69/*
70 * Signal information.
71 * Default offset is required for RSSI <-> dBm conversion.
72 */
73#define DEFAULT_RSSI_OFFSET 120 /* FIXME */
74
75/*
76 * Register layout information.
77 */
78#define CSR_REG_BASE 0x1000
79#define CSR_REG_SIZE 0x0800
80#define EEPROM_BASE 0x0000
81#define EEPROM_SIZE 0x0110
82#define BBP_BASE 0x0000
83#define BBP_SIZE 0x0080
84#define RF_BASE 0x0004
85#define RF_SIZE 0x0010
86
87/*
88 * Number of TX queues.
89 */
90#define NUM_TX_QUEUES 4
91
92/*
93 * USB registers.
94 */
95
96/*
97 * INT_SOURCE_CSR: Interrupt source register.
98 * Write one to clear corresponding bit.
99 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
100 */
101#define INT_SOURCE_CSR 0x0200
102#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
103#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002)
104#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004)
105#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008)
106#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010)
107#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020)
108#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040)
109#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
110#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
111#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200)
112#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400)
113#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800)
114#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000)
115#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
116#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000)
117#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000)
118#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000)
119#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000)
120
121/*
122 * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
123 */
124#define INT_MASK_CSR 0x0204
125#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001)
126#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002)
127#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004)
128#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008)
129#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010)
130#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020)
131#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040)
132#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
133#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
134#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200)
135#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400)
136#define INT_MASK_CSR_TBTT FIELD32(0x00000800)
137#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000)
138#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
139#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000)
140#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000)
141#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000)
142#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000)
143
144/*
145 * WPDMA_GLO_CFG
146 */
147#define WPDMA_GLO_CFG 0x0208
148#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001)
149#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002)
150#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004)
151#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008)
152#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030)
153#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040)
154#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080)
155#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00)
156#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000)
157
158/*
159 * WPDMA_RST_IDX
160 */
161#define WPDMA_RST_IDX 0x020c
162#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001)
163#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002)
164#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004)
165#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008)
166#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010)
167#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020)
168#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000)
169
170/*
171 * DELAY_INT_CFG
172 */
173#define DELAY_INT_CFG 0x0210
174#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff)
175#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00)
176#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000)
177#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000)
178#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000)
179#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000)
180
181/*
182 * WMM_AIFSN_CFG: Aifsn for each EDCA AC
183 * AIFSN0: AC_BE
184 * AIFSN1: AC_BK
185 * AIFSN2: AC_VI
186 * AIFSN3: AC_VO
187 */
188#define WMM_AIFSN_CFG 0x0214
189#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
190#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0)
191#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00)
192#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000)
193
194/*
195 * WMM_CWMIN_CSR: CWmin for each EDCA AC
196 * CWMIN0: AC_BE
197 * CWMIN1: AC_BK
198 * CWMIN2: AC_VI
199 * CWMIN3: AC_VO
200 */
201#define WMM_CWMIN_CFG 0x0218
202#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
203#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0)
204#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00)
205#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000)
206
207/*
208 * WMM_CWMAX_CSR: CWmax for each EDCA AC
209 * CWMAX0: AC_BE
210 * CWMAX1: AC_BK
211 * CWMAX2: AC_VI
212 * CWMAX3: AC_VO
213 */
214#define WMM_CWMAX_CFG 0x021c
215#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
216#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0)
217#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00)
218#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
219
220/*
221 * AC_TXOP0: AC_BK/AC_BE TXOP register
222 * AC0TXOP: AC_BK in unit of 32us
223 * AC1TXOP: AC_BE in unit of 32us
224 */
225#define WMM_TXOP0_CFG 0x0220
226#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
227#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
228
229/*
230 * AC_TXOP1: AC_VO/AC_VI TXOP register
231 * AC2TXOP: AC_VI in unit of 32us
232 * AC3TXOP: AC_VO in unit of 32us
233 */
234#define WMM_TXOP1_CFG 0x0224
235#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
236#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
237
238/*
239 * GPIO_CTRL_CFG:
240 */
241#define GPIO_CTRL_CFG 0x0228
242#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
243#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
244#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
245#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
246#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
247#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
248#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
249#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
250#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
251
252/*
253 * MCU_CMD_CFG
254 */
255#define MCU_CMD_CFG 0x022c
256
257/*
258 * AC_BK register offsets
259 */
260#define TX_BASE_PTR0 0x0230
261#define TX_MAX_CNT0 0x0234
262#define TX_CTX_IDX0 0x0238
263#define TX_DTX_IDX0 0x023c
264
265/*
266 * AC_BE register offsets
267 */
268#define TX_BASE_PTR1 0x0240
269#define TX_MAX_CNT1 0x0244
270#define TX_CTX_IDX1 0x0248
271#define TX_DTX_IDX1 0x024c
272
273/*
274 * AC_VI register offsets
275 */
276#define TX_BASE_PTR2 0x0250
277#define TX_MAX_CNT2 0x0254
278#define TX_CTX_IDX2 0x0258
279#define TX_DTX_IDX2 0x025c
280
281/*
282 * AC_VO register offsets
283 */
284#define TX_BASE_PTR3 0x0260
285#define TX_MAX_CNT3 0x0264
286#define TX_CTX_IDX3 0x0268
287#define TX_DTX_IDX3 0x026c
288
289/*
290 * HCCA register offsets
291 */
292#define TX_BASE_PTR4 0x0270
293#define TX_MAX_CNT4 0x0274
294#define TX_CTX_IDX4 0x0278
295#define TX_DTX_IDX4 0x027c
296
297/*
298 * MGMT register offsets
299 */
300#define TX_BASE_PTR5 0x0280
301#define TX_MAX_CNT5 0x0284
302#define TX_CTX_IDX5 0x0288
303#define TX_DTX_IDX5 0x028c
304
305/*
306 * RX register offsets
307 */
308#define RX_BASE_PTR 0x0290
309#define RX_MAX_CNT 0x0294
310#define RX_CRX_IDX 0x0298
311#define RX_DRX_IDX 0x029c
312
313/*
314 * PBF_SYS_CTRL
315 * HOST_RAM_WRITE: enable Host program ram write selection
316 */
317#define PBF_SYS_CTRL 0x0400
318#define PBF_SYS_CTRL_READY FIELD32(0x00000080)
319#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000)
320
321/*
322 * HOST-MCU shared memory
323 */
324#define HOST_CMD_CSR 0x0404
325#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff)
326
327/*
328 * PBF registers
329 * Most are for debug. Driver doesn't touch PBF register.
330 */
331#define PBF_CFG 0x0408
332#define PBF_MAX_PCNT 0x040c
333#define PBF_CTRL 0x0410
334#define PBF_INT_STA 0x0414
335#define PBF_INT_ENA 0x0418
336
337/*
338 * BCN_OFFSET0:
339 */
340#define BCN_OFFSET0 0x042c
341#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff)
342#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00)
343#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000)
344#define BCN_OFFSET0_BCN3 FIELD32(0xff000000)
345
346/*
347 * BCN_OFFSET1:
348 */
349#define BCN_OFFSET1 0x0430
350#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff)
351#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00)
352#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000)
353#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
354
355/*
356 * PBF registers
357 * Most are for debug. Driver doesn't touch PBF register.
358 */
359#define TXRXQ_PCNT 0x0438
360#define PBF_DBG 0x043c
361
362/*
363 * RF registers
364 */
365#define RF_CSR_CFG 0x0500
366#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
367#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
368#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
369#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
370
371/*
372 * EFUSE_CSR: RT30x0 EEPROM
373 */
374#define EFUSE_CTRL 0x0580
375#define EFUSE_CTRL_ADDRESS_IN FIELD32(0x03fe0000)
376#define EFUSE_CTRL_MODE FIELD32(0x000000c0)
377#define EFUSE_CTRL_KICK FIELD32(0x40000000)
378#define EFUSE_CTRL_PRESENT FIELD32(0x80000000)
379
380/*
381 * EFUSE_DATA0
382 */
383#define EFUSE_DATA0 0x0590
384
385/*
386 * EFUSE_DATA1
387 */
388#define EFUSE_DATA1 0x0594
389
390/*
391 * EFUSE_DATA2
392 */
393#define EFUSE_DATA2 0x0598
394
395/*
396 * EFUSE_DATA3
397 */
398#define EFUSE_DATA3 0x059c
399
400/*
401 * MAC Control/Status Registers(CSR).
402 * Some values are set in TU, whereas 1 TU == 1024 us.
403 */
404
405/*
406 * MAC_CSR0: ASIC revision number.
407 * ASIC_REV: 0
408 * ASIC_VER: 2860 or 2870
409 */
410#define MAC_CSR0 0x1000
411#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
412#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
413
414/*
415 * MAC_SYS_CTRL:
416 */
417#define MAC_SYS_CTRL 0x1004
418#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001)
419#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002)
420#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004)
421#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008)
422#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010)
423#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020)
424#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040)
425#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080)
426
427/*
428 * MAC_ADDR_DW0: STA MAC register 0
429 */
430#define MAC_ADDR_DW0 0x1008
431#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff)
432#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00)
433#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000)
434#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000)
435
436/*
437 * MAC_ADDR_DW1: STA MAC register 1
438 * UNICAST_TO_ME_MASK:
439 * Used to mask off bits from byte 5 of the MAC address
440 * to determine the UNICAST_TO_ME bit for RX frames.
441 * The full mask is complemented by BSS_ID_MASK:
442 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
443 */
444#define MAC_ADDR_DW1 0x100c
445#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff)
446#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00)
447#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000)
448
449/*
450 * MAC_BSSID_DW0: BSSID register 0
451 */
452#define MAC_BSSID_DW0 0x1010
453#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff)
454#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00)
455#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000)
456#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000)
457
458/*
459 * MAC_BSSID_DW1: BSSID register 1
460 * BSS_ID_MASK:
461 * 0: 1-BSSID mode (BSS index = 0)
462 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
463 * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
464 * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
465 * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
466 * BSSID. This will make sure that those bits will be ignored
467 * when determining the MY_BSS of RX frames.
468 */
469#define MAC_BSSID_DW1 0x1014
470#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff)
471#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00)
472#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000)
473#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000)
474
475/*
476 * MAX_LEN_CFG: Maximum frame length register.
477 * MAX_MPDU: rt2860b max 16k bytes
478 * MAX_PSDU: Maximum PSDU length
479 * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
480 */
481#define MAX_LEN_CFG 0x1018
482#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff)
483#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000)
484#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000)
485#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000)
486
487/*
488 * BBP_CSR_CFG: BBP serial control register
489 * VALUE: Register value to program into BBP
490 * REG_NUM: Selected BBP register
491 * READ_CONTROL: 0 write BBP, 1 read BBP
492 * BUSY: ASIC is busy executing BBP commands
493 * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
494 * BBP_RW_MODE: 0 serial, 1 paralell
495 */
496#define BBP_CSR_CFG 0x101c
497#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
498#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00)
499#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000)
500#define BBP_CSR_CFG_BUSY FIELD32(0x00020000)
501#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000)
502#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000)
503
504/*
505 * RF_CSR_CFG0: RF control register
506 * REGID_AND_VALUE: Register value to program into RF
507 * BITWIDTH: Selected RF register
508 * STANDBYMODE: 0 high when standby, 1 low when standby
509 * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
510 * BUSY: ASIC is busy executing RF commands
511 */
512#define RF_CSR_CFG0 0x1020
513#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff)
514#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000)
515#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff)
516#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000)
517#define RF_CSR_CFG0_SEL FIELD32(0x40000000)
518#define RF_CSR_CFG0_BUSY FIELD32(0x80000000)
519
520/*
521 * RF_CSR_CFG1: RF control register
522 * REGID_AND_VALUE: Register value to program into RF
523 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
524 * 0: 3 system clock cycle (37.5usec)
525 * 1: 5 system clock cycle (62.5usec)
526 */
527#define RF_CSR_CFG1 0x1024
528#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff)
529#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000)
530
531/*
532 * RF_CSR_CFG2: RF control register
533 * VALUE: Register value to program into RF
534 */
535#define RF_CSR_CFG2 0x1028
536#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff)
537
538/*
539 * LED_CFG: LED control
540 * color LED's:
541 * 0: off
542 * 1: blinking upon TX2
543 * 2: periodic slow blinking
544 * 3: always on
545 * LED polarity:
546 * 0: active low
547 * 1: active high
548 */
549#define LED_CFG 0x102c
550#define LED_CFG_ON_PERIOD FIELD32(0x000000ff)
551#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00)
552#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000)
553#define LED_CFG_R_LED_MODE FIELD32(0x03000000)
554#define LED_CFG_G_LED_MODE FIELD32(0x0c000000)
555#define LED_CFG_Y_LED_MODE FIELD32(0x30000000)
556#define LED_CFG_LED_POLAR FIELD32(0x40000000)
557
558/*
559 * XIFS_TIME_CFG: MAC timing
560 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
561 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
562 * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
563 * when MAC doesn't reference BBP signal BBRXEND
564 * EIFS: unit 1us
565 * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
566 *
567 */
568#define XIFS_TIME_CFG 0x1100
569#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff)
570#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00)
571#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000)
572#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000)
573#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000)
574
575/*
576 * BKOFF_SLOT_CFG:
577 */
578#define BKOFF_SLOT_CFG 0x1104
579#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff)
580#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00)
581
582/*
583 * NAV_TIME_CFG:
584 */
585#define NAV_TIME_CFG 0x1108
586#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff)
587#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00)
588#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000)
589#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000)
590
591/*
592 * CH_TIME_CFG: count as channel busy
593 */
594#define CH_TIME_CFG 0x110c
595
596/*
597 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
598 */
599#define PBF_LIFE_TIMER 0x1110
600
601/*
602 * BCN_TIME_CFG:
603 * BEACON_INTERVAL: in unit of 1/16 TU
604 * TSF_TICKING: Enable TSF auto counting
605 * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
606 * BEACON_GEN: Enable beacon generator
607 */
608#define BCN_TIME_CFG 0x1114
609#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff)
610#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000)
611#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000)
612#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000)
613#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000)
614#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000)
615
616/*
617 * TBTT_SYNC_CFG:
618 */
619#define TBTT_SYNC_CFG 0x1118
620
621/*
622 * TSF_TIMER_DW0: Local lsb TSF timer, read-only
623 */
624#define TSF_TIMER_DW0 0x111c
625#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff)
626
627/*
628 * TSF_TIMER_DW1: Local msb TSF timer, read-only
629 */
630#define TSF_TIMER_DW1 0x1120
631#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff)
632
633/*
634 * TBTT_TIMER: TImer remains till next TBTT, read-only
635 */
636#define TBTT_TIMER 0x1124
637
638/*
639 * INT_TIMER_CFG:
640 */
641#define INT_TIMER_CFG 0x1128
642
643/*
644 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
645 */
646#define INT_TIMER_EN 0x112c
647
648/*
649 * CH_IDLE_STA: channel idle time
650 */
651#define CH_IDLE_STA 0x1130
652
653/*
654 * CH_BUSY_STA: channel busy time
655 */
656#define CH_BUSY_STA 0x1134
657
658/*
659 * MAC_STATUS_CFG:
660 * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
661 * if 1 or higher one of the 2 registers is busy.
662 */
663#define MAC_STATUS_CFG 0x1200
664#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
665
666/*
667 * PWR_PIN_CFG:
668 */
669#define PWR_PIN_CFG 0x1204
670
671/*
672 * AUTOWAKEUP_CFG: Manual power control / status register
673 * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
674 * AUTOWAKE: 0:sleep, 1:awake
675 */
676#define AUTOWAKEUP_CFG 0x1208
677#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff)
678#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00)
679#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
680
681/*
682 * EDCA_AC0_CFG:
683 */
684#define EDCA_AC0_CFG 0x1300
685#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff)
686#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00)
687#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000)
688#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000)
689
690/*
691 * EDCA_AC1_CFG:
692 */
693#define EDCA_AC1_CFG 0x1304
694#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff)
695#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00)
696#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000)
697#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000)
698
699/*
700 * EDCA_AC2_CFG:
701 */
702#define EDCA_AC2_CFG 0x1308
703#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff)
704#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00)
705#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000)
706#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000)
707
708/*
709 * EDCA_AC3_CFG:
710 */
711#define EDCA_AC3_CFG 0x130c
712#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff)
713#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00)
714#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000)
715#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000)
716
717/*
718 * EDCA_TID_AC_MAP:
719 */
720#define EDCA_TID_AC_MAP 0x1310
721
722/*
723 * TX_PWR_CFG_0:
724 */
725#define TX_PWR_CFG_0 0x1314
726#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f)
727#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0)
728#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00)
729#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000)
730#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000)
731#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
732#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
733#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
734
735/*
736 * TX_PWR_CFG_1:
737 */
738#define TX_PWR_CFG_1 0x1318
739#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f)
740#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0)
741#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00)
742#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000)
743#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000)
744#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
745#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
746#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
747
748/*
749 * TX_PWR_CFG_2:
750 */
751#define TX_PWR_CFG_2 0x131c
752#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f)
753#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0)
754#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00)
755#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000)
756#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000)
757#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
758#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
759#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
760
761/*
762 * TX_PWR_CFG_3:
763 */
764#define TX_PWR_CFG_3 0x1320
765#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f)
766#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
767#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
768#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
769#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
770#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
771#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
772#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
773
774/*
775 * TX_PWR_CFG_4:
776 */
777#define TX_PWR_CFG_4 0x1324
778#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
779#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
780#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
781#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
782
783/*
784 * TX_PIN_CFG:
785 */
786#define TX_PIN_CFG 0x1328
787#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
788#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
789#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
790#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008)
791#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010)
792#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020)
793#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040)
794#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080)
795#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100)
796#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200)
797#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400)
798#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800)
799#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000)
800#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000)
801#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000)
802#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000)
803#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000)
804#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
805#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
806#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
807
808/*
809 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
810 */
811#define TX_BAND_CFG 0x132c
812#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
813#define TX_BAND_CFG_A FIELD32(0x00000002)
814#define TX_BAND_CFG_BG FIELD32(0x00000004)
815
816/*
817 * TX_SW_CFG0:
818 */
819#define TX_SW_CFG0 0x1330
820
821/*
822 * TX_SW_CFG1:
823 */
824#define TX_SW_CFG1 0x1334
825
826/*
827 * TX_SW_CFG2:
828 */
829#define TX_SW_CFG2 0x1338
830
831/*
832 * TXOP_THRES_CFG:
833 */
834#define TXOP_THRES_CFG 0x133c
835
836/*
837 * TXOP_CTRL_CFG:
838 */
839#define TXOP_CTRL_CFG 0x1340
840
841/*
842 * TX_RTS_CFG:
843 * RTS_THRES: unit:byte
844 * RTS_FBK_EN: enable rts rate fallback
845 */
846#define TX_RTS_CFG 0x1344
847#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff)
848#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00)
849#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000)
850
851/*
852 * TX_TIMEOUT_CFG:
853 * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
854 * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
855 * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
856 * it is recommended that:
857 * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
858 */
859#define TX_TIMEOUT_CFG 0x1348
860#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0)
861#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00)
862#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000)
863
864/*
865 * TX_RTY_CFG:
866 * SHORT_RTY_LIMIT: short retry limit
867 * LONG_RTY_LIMIT: long retry limit
868 * LONG_RTY_THRE: Long retry threshoold
869 * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
870 * 0:expired by retry limit, 1: expired by mpdu life timer
871 * AGG_RTY_MODE: Aggregate MPDU retry mode
872 * 0:expired by retry limit, 1: expired by mpdu life timer
873 * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
874 */
875#define TX_RTY_CFG 0x134c
876#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff)
877#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00)
878#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000)
879#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000)
880#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000)
881#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000)
882
883/*
884 * TX_LINK_CFG:
885 * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
886 * MFB_ENABLE: TX apply remote MFB 1:enable
887 * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable
888 * 0: not apply remote remote unsolicit (MFS=7)
889 * TX_MRQ_EN: MCS request TX enable
890 * TX_RDG_EN: RDG TX enable
891 * TX_CF_ACK_EN: Piggyback CF-ACK enable
892 * REMOTE_MFB: remote MCS feedback
893 * REMOTE_MFS: remote MCS feedback sequence number
894 */
895#define TX_LINK_CFG 0x1350
896#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff)
897#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100)
898#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200)
899#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400)
900#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800)
901#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000)
902#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000)
903#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000)
904
905/*
906 * HT_FBK_CFG0:
907 */
908#define HT_FBK_CFG0 0x1354
909#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f)
910#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0)
911#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00)
912#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000)
913#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000)
914#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000)
915#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000)
916#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000)
917
918/*
919 * HT_FBK_CFG1:
920 */
921#define HT_FBK_CFG1 0x1358
922#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f)
923#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0)
924#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00)
925#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000)
926#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000)
927#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000)
928#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000)
929#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000)
930
931/*
932 * LG_FBK_CFG0:
933 */
934#define LG_FBK_CFG0 0x135c
935#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f)
936#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0)
937#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00)
938#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000)
939#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000)
940#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000)
941#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000)
942#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000)
943
944/*
945 * LG_FBK_CFG1:
946 */
947#define LG_FBK_CFG1 0x1360
948#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f)
949#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0)
950#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00)
951#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000)
952
953/*
954 * CCK_PROT_CFG: CCK Protection
955 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
956 * PROTECT_CTRL: Protection control frame type for CCK TX
957 * 0:none, 1:RTS/CTS, 2:CTS-to-self
958 * PROTECT_NAV: TXOP protection type for CCK TX
959 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
960 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
961 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
962 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
963 * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
964 * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
965 * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
966 * RTS_TH_EN: RTS threshold enable on CCK TX
967 */
968#define CCK_PROT_CFG 0x1364
969#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
970#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
971#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
972#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
973#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
974#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
975#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
976#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
977#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
978#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
979
980/*
981 * OFDM_PROT_CFG: OFDM Protection
982 */
983#define OFDM_PROT_CFG 0x1368
984#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
985#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
986#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
987#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
988#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
989#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
990#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
991#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
992#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
993#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
994
995/*
996 * MM20_PROT_CFG: MM20 Protection
997 */
998#define MM20_PROT_CFG 0x136c
999#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1000#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1001#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1002#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1003#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1004#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1005#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1006#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1007#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1008#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1009
1010/*
1011 * MM40_PROT_CFG: MM40 Protection
1012 */
1013#define MM40_PROT_CFG 0x1370
1014#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1015#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1016#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1017#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1018#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1019#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1020#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1021#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1022#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1023#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1024
1025/*
1026 * GF20_PROT_CFG: GF20 Protection
1027 */
1028#define GF20_PROT_CFG 0x1374
1029#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1030#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1031#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1032#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1033#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1034#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1035#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1036#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1037#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1038#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1039
1040/*
1041 * GF40_PROT_CFG: GF40 Protection
1042 */
1043#define GF40_PROT_CFG 0x1378
1044#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1045#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1046#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1047#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1048#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1049#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1050#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1051#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1052#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1053#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1054
1055/*
1056 * EXP_CTS_TIME:
1057 */
1058#define EXP_CTS_TIME 0x137c
1059
1060/*
1061 * EXP_ACK_TIME:
1062 */
1063#define EXP_ACK_TIME 0x1380
1064
1065/*
1066 * RX_FILTER_CFG: RX configuration register.
1067 */
1068#define RX_FILTER_CFG 0x1400
1069#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001)
1070#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002)
1071#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004)
1072#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008)
1073#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010)
1074#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020)
1075#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040)
1076#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080)
1077#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100)
1078#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200)
1079#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400)
1080#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800)
1081#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000)
1082#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000)
1083#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000)
1084#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000)
1085#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000)
1086
1087/*
1088 * AUTO_RSP_CFG:
1089 * AUTORESPONDER: 0: disable, 1: enable
1090 * BAC_ACK_POLICY: 0:long, 1:short preamble
1091 * CTS_40_MMODE: Response CTS 40MHz duplicate mode
1092 * CTS_40_MREF: Response CTS 40MHz duplicate mode
1093 * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
1094 * DUAL_CTS_EN: Power bit value in control frame
1095 * ACK_CTS_PSM_BIT:Power bit value in control frame
1096 */
1097#define AUTO_RSP_CFG 0x1404
1098#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001)
1099#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002)
1100#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004)
1101#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008)
1102#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010)
1103#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040)
1104#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080)
1105
1106/*
1107 * LEGACY_BASIC_RATE:
1108 */
1109#define LEGACY_BASIC_RATE 0x1408
1110
1111/*
1112 * HT_BASIC_RATE:
1113 */
1114#define HT_BASIC_RATE 0x140c
1115
1116/*
1117 * HT_CTRL_CFG:
1118 */
1119#define HT_CTRL_CFG 0x1410
1120
1121/*
1122 * SIFS_COST_CFG:
1123 */
1124#define SIFS_COST_CFG 0x1414
1125
1126/*
1127 * RX_PARSER_CFG:
1128 * Set NAV for all received frames
1129 */
1130#define RX_PARSER_CFG 0x1418
1131
1132/*
1133 * TX_SEC_CNT0:
1134 */
1135#define TX_SEC_CNT0 0x1500
1136
1137/*
1138 * RX_SEC_CNT0:
1139 */
1140#define RX_SEC_CNT0 0x1504
1141
1142/*
1143 * CCMP_FC_MUTE:
1144 */
1145#define CCMP_FC_MUTE 0x1508
1146
1147/*
1148 * TXOP_HLDR_ADDR0:
1149 */
1150#define TXOP_HLDR_ADDR0 0x1600
1151
1152/*
1153 * TXOP_HLDR_ADDR1:
1154 */
1155#define TXOP_HLDR_ADDR1 0x1604
1156
1157/*
1158 * TXOP_HLDR_ET:
1159 */
1160#define TXOP_HLDR_ET 0x1608
1161
1162/*
1163 * QOS_CFPOLL_RA_DW0:
1164 */
1165#define QOS_CFPOLL_RA_DW0 0x160c
1166
1167/*
1168 * QOS_CFPOLL_RA_DW1:
1169 */
1170#define QOS_CFPOLL_RA_DW1 0x1610
1171
1172/*
1173 * QOS_CFPOLL_QC:
1174 */
1175#define QOS_CFPOLL_QC 0x1614
1176
1177/*
1178 * RX_STA_CNT0: RX PLCP error count & RX CRC error count
1179 */
1180#define RX_STA_CNT0 0x1700
1181#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff)
1182#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000)
1183
1184/*
1185 * RX_STA_CNT1: RX False CCA count & RX LONG frame count
1186 */
1187#define RX_STA_CNT1 0x1704
1188#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff)
1189#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000)
1190
1191/*
1192 * RX_STA_CNT2:
1193 */
1194#define RX_STA_CNT2 0x1708
1195#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff)
1196#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000)
1197
1198/*
1199 * TX_STA_CNT0: TX Beacon count
1200 */
1201#define TX_STA_CNT0 0x170c
1202#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff)
1203#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000)
1204
1205/*
1206 * TX_STA_CNT1: TX tx count
1207 */
1208#define TX_STA_CNT1 0x1710
1209#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff)
1210#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000)
1211
1212/*
1213 * TX_STA_CNT2: TX tx count
1214 */
1215#define TX_STA_CNT2 0x1714
1216#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff)
1217#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1218
1219/*
1220 * TX_STA_FIFO: TX Result for specific PID status fifo register
1221 */
1222#define TX_STA_FIFO 0x1718
1223#define TX_STA_FIFO_VALID FIELD32(0x00000001)
1224#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
1225#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
1226#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
1227#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
1228#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
1229#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000)
1230#define TX_STA_FIFO_MCS FIELD32(0x007f0000)
1231#define TX_STA_FIFO_PHYMODE FIELD32(0xc0000000)
1232
1233/*
1234 * TX_AGG_CNT: Debug counter
1235 */
1236#define TX_AGG_CNT 0x171c
1237#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff)
1238#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000)
1239
1240/*
1241 * TX_AGG_CNT0:
1242 */
1243#define TX_AGG_CNT0 0x1720
1244#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff)
1245#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000)
1246
1247/*
1248 * TX_AGG_CNT1:
1249 */
1250#define TX_AGG_CNT1 0x1724
1251#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff)
1252#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000)
1253
1254/*
1255 * TX_AGG_CNT2:
1256 */
1257#define TX_AGG_CNT2 0x1728
1258#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff)
1259#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000)
1260
1261/*
1262 * TX_AGG_CNT3:
1263 */
1264#define TX_AGG_CNT3 0x172c
1265#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff)
1266#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000)
1267
1268/*
1269 * TX_AGG_CNT4:
1270 */
1271#define TX_AGG_CNT4 0x1730
1272#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff)
1273#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000)
1274
1275/*
1276 * TX_AGG_CNT5:
1277 */
1278#define TX_AGG_CNT5 0x1734
1279#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff)
1280#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000)
1281
1282/*
1283 * TX_AGG_CNT6:
1284 */
1285#define TX_AGG_CNT6 0x1738
1286#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff)
1287#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000)
1288
1289/*
1290 * TX_AGG_CNT7:
1291 */
1292#define TX_AGG_CNT7 0x173c
1293#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff)
1294#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000)
1295
1296/*
1297 * MPDU_DENSITY_CNT:
1298 * TX_ZERO_DEL: TX zero length delimiter count
1299 * RX_ZERO_DEL: RX zero length delimiter count
1300 */
1301#define MPDU_DENSITY_CNT 0x1740
1302#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff)
1303#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000)
1304
1305/*
1306 * Security key table memory.
1307 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
1308 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
1309 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
1310 * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
1311 * SHARED_KEY_TABLE_BASE: 32-byte * 16-entry
1312 * SHARED_KEY_MODE_BASE: 4-byte * 16-entry
1313 */
1314#define MAC_WCID_BASE 0x1800
1315#define PAIRWISE_KEY_TABLE_BASE 0x4000
1316#define MAC_IVEIV_TABLE_BASE 0x6000
1317#define MAC_WCID_ATTRIBUTE_BASE 0x6800
1318#define SHARED_KEY_TABLE_BASE 0x6c00
1319#define SHARED_KEY_MODE_BASE 0x7000
1320
1321#define MAC_WCID_ENTRY(__idx) \
1322 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
1323#define PAIRWISE_KEY_ENTRY(__idx) \
1324 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1325#define MAC_IVEIV_ENTRY(__idx) \
1326 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
1327#define MAC_WCID_ATTR_ENTRY(__idx) \
1328 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1329#define SHARED_KEY_ENTRY(__idx) \
1330 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1331#define SHARED_KEY_MODE_ENTRY(__idx) \
1332 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
1333
1334struct mac_wcid_entry {
1335 u8 mac[6];
1336 u8 reserved[2];
1337} __attribute__ ((packed));
1338
1339struct hw_key_entry {
1340 u8 key[16];
1341 u8 tx_mic[8];
1342 u8 rx_mic[8];
1343} __attribute__ ((packed));
1344
1345struct mac_iveiv_entry {
1346 u8 iv[8];
1347} __attribute__ ((packed));
1348
1349/*
1350 * MAC_WCID_ATTRIBUTE:
1351 */
1352#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001)
1353#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
1354#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
1355#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
1356
1357/*
1358 * SHARED_KEY_MODE:
1359 */
1360#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007)
1361#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070)
1362#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700)
1363#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000)
1364#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000)
1365#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000)
1366#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000)
1367#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000)
1368
1369/*
1370 * HOST-MCU communication
1371 */
1372
1373/*
1374 * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
1375 */
1376#define H2M_MAILBOX_CSR 0x7010
1377#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
1378#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00)
1379#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000)
1380#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000)
1381
1382/*
1383 * H2M_MAILBOX_CID:
1384 */
1385#define H2M_MAILBOX_CID 0x7014
1386#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
1387#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00)
1388#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000)
1389#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000)
1390
1391/*
1392 * H2M_MAILBOX_STATUS:
1393 */
1394#define H2M_MAILBOX_STATUS 0x701c
1395
1396/*
1397 * H2M_INT_SRC:
1398 */
1399#define H2M_INT_SRC 0x7024
1400
1401/*
1402 * H2M_BBP_AGENT:
1403 */
1404#define H2M_BBP_AGENT 0x7028
1405
1406/*
1407 * MCU_LEDCS: LED control for MCU Mailbox.
1408 */
1409#define MCU_LEDCS_LED_MODE FIELD8(0x1f)
1410#define MCU_LEDCS_POLARITY FIELD8(0x01)
1411
1412/*
1413 * HW_CS_CTS_BASE:
1414 * Carrier-sense CTS frame base address.
1415 * It's where mac stores carrier-sense frame for carrier-sense function.
1416 */
1417#define HW_CS_CTS_BASE 0x7700
1418
1419/*
1420 * HW_DFS_CTS_BASE:
1421 * DFS CTS frame base address. It's where mac stores CTS frame for DFS.
1422 */
1423#define HW_DFS_CTS_BASE 0x7780
1424
1425/*
1426 * TXRX control registers - base address 0x3000
1427 */
1428
1429/*
1430 * TXRX_CSR1:
1431 * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first..
1432 */
1433#define TXRX_CSR1 0x77d0
1434
1435/*
1436 * HW_DEBUG_SETTING_BASE:
1437 * since NULL frame won't be that long (256 byte)
1438 * We steal 16 tail bytes to save debugging settings
1439 */
1440#define HW_DEBUG_SETTING_BASE 0x77f0
1441#define HW_DEBUG_SETTING_BASE2 0x7770
1442
1443/*
1444 * HW_BEACON_BASE
1445 * In order to support maximum 8 MBSS and its maximum length
1446 * is 512 bytes for each beacon
1447 * Three section discontinue memory segments will be used.
1448 * 1. The original region for BCN 0~3
1449 * 2. Extract memory from FCE table for BCN 4~5
1450 * 3. Extract memory from Pair-wise key table for BCN 6~7
1451 * It occupied those memory of wcid 238~253 for BCN 6
1452 * and wcid 222~237 for BCN 7
1453 *
1454 * IMPORTANT NOTE: Not sure why legacy driver does this,
1455 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
1456 */
1457#define HW_BEACON_BASE0 0x7800
1458#define HW_BEACON_BASE1 0x7a00
1459#define HW_BEACON_BASE2 0x7c00
1460#define HW_BEACON_BASE3 0x7e00
1461#define HW_BEACON_BASE4 0x7200
1462#define HW_BEACON_BASE5 0x7400
1463#define HW_BEACON_BASE6 0x5dc0
1464#define HW_BEACON_BASE7 0x5bc0
1465
1466#define HW_BEACON_OFFSET(__index) \
1467 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
1468 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
1469 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
1470
1471/*
1472 * BBP registers.
1473 * The wordsize of the BBP is 8 bits.
1474 */
1475
1476/*
1477 * BBP 1: TX Antenna
1478 */
1479#define BBP1_TX_POWER FIELD8(0x07)
1480#define BBP1_TX_ANTENNA FIELD8(0x18)
1481
1482/*
1483 * BBP 3: RX Antenna
1484 */
1485#define BBP3_RX_ANTENNA FIELD8(0x18)
1486#define BBP3_HT40_PLUS FIELD8(0x20)
1487
1488/*
1489 * BBP 4: Bandwidth
1490 */
1491#define BBP4_TX_BF FIELD8(0x01)
1492#define BBP4_BANDWIDTH FIELD8(0x18)
1493
1494/*
1495 * RFCSR registers
1496 * The wordsize of the RFCSR is 8 bits.
1497 */
1498
1499/*
1500 * RFCSR 6:
1501 */
1502#define RFCSR6_R FIELD8(0x03)
1503
1504/*
1505 * RFCSR 7:
1506 */
1507#define RFCSR7_RF_TUNING FIELD8(0x01)
1508
1509/*
1510 * RFCSR 12:
1511 */
1512#define RFCSR12_TX_POWER FIELD8(0x1f)
1513
1514/*
1515 * RFCSR 22:
1516 */
1517#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
1518
1519/*
1520 * RFCSR 23:
1521 */
1522#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1523
1524/*
1525 * RFCSR 30:
1526 */
1527#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1528
1529/*
1530 * RF registers
1531 */
1532
1533/*
1534 * RF 2
1535 */
1536#define RF2_ANTENNA_RX2 FIELD32(0x00000040)
1537#define RF2_ANTENNA_TX1 FIELD32(0x00004000)
1538#define RF2_ANTENNA_RX1 FIELD32(0x00020000)
1539
1540/*
1541 * RF 3
1542 */
1543#define RF3_TXPOWER_G FIELD32(0x00003e00)
1544#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200)
1545#define RF3_TXPOWER_A FIELD32(0x00003c00)
1546
1547/*
1548 * RF 4
1549 */
1550#define RF4_TXPOWER_G FIELD32(0x000007c0)
1551#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040)
1552#define RF4_TXPOWER_A FIELD32(0x00000780)
1553#define RF4_FREQ_OFFSET FIELD32(0x001f8000)
1554#define RF4_HT40 FIELD32(0x00200000)
1555
1556/*
1557 * EEPROM content.
1558 * The wordsize of the EEPROM is 16 bits.
1559 */
1560
1561/*
1562 * EEPROM Version
1563 */
1564#define EEPROM_VERSION 0x0001
1565#define EEPROM_VERSION_FAE FIELD16(0x00ff)
1566#define EEPROM_VERSION_VERSION FIELD16(0xff00)
1567
1568/*
1569 * HW MAC address.
1570 */
1571#define EEPROM_MAC_ADDR_0 0x0002
1572#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
1573#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
1574#define EEPROM_MAC_ADDR_1 0x0003
1575#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
1576#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
1577#define EEPROM_MAC_ADDR_2 0x0004
1578#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
1579#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1580
1581/*
1582 * EEPROM ANTENNA config
1583 * RXPATH: 1: 1R, 2: 2R, 3: 3R
1584 * TXPATH: 1: 1T, 2: 2T
1585 */
1586#define EEPROM_ANTENNA 0x001a
1587#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
1588#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
1589#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
1590
1591/*
1592 * EEPROM NIC config
1593 * CARDBUS_ACCEL: 0 - enable, 1 - disable
1594 */
1595#define EEPROM_NIC 0x001b
1596#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
1597#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
1598#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
1599#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
1600#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
1601#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
1602#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
1603#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1604#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1605#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1606
1607/*
1608 * EEPROM frequency
1609 */
1610#define EEPROM_FREQ 0x001d
1611#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
1612#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
1613#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
1614
1615/*
1616 * EEPROM LED
1617 * POLARITY_RDY_G: Polarity RDY_G setting.
1618 * POLARITY_RDY_A: Polarity RDY_A setting.
1619 * POLARITY_ACT: Polarity ACT setting.
1620 * POLARITY_GPIO_0: Polarity GPIO0 setting.
1621 * POLARITY_GPIO_1: Polarity GPIO1 setting.
1622 * POLARITY_GPIO_2: Polarity GPIO2 setting.
1623 * POLARITY_GPIO_3: Polarity GPIO3 setting.
1624 * POLARITY_GPIO_4: Polarity GPIO4 setting.
1625 * LED_MODE: Led mode.
1626 */
1627#define EEPROM_LED1 0x001e
1628#define EEPROM_LED2 0x001f
1629#define EEPROM_LED3 0x0020
1630#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
1631#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
1632#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
1633#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008)
1634#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010)
1635#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020)
1636#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040)
1637#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080)
1638#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
1639
1640/*
1641 * EEPROM LNA
1642 */
1643#define EEPROM_LNA 0x0022
1644#define EEPROM_LNA_BG FIELD16(0x00ff)
1645#define EEPROM_LNA_A0 FIELD16(0xff00)
1646
1647/*
1648 * EEPROM RSSI BG offset
1649 */
1650#define EEPROM_RSSI_BG 0x0023
1651#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
1652#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
1653
1654/*
1655 * EEPROM RSSI BG2 offset
1656 */
1657#define EEPROM_RSSI_BG2 0x0024
1658#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
1659#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1660
1661/*
1662 * EEPROM RSSI A offset
1663 */
1664#define EEPROM_RSSI_A 0x0025
1665#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
1666#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
1667
1668/*
1669 * EEPROM RSSI A2 offset
1670 */
1671#define EEPROM_RSSI_A2 0x0026
1672#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
1673#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1674
1675/*
1676 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1677 * This is delta in 40MHZ.
1678 * VALUE: Tx Power dalta value (MAX=4)
1679 * TYPE: 1: Plus the delta value, 0: minus the delta value
1680 * TXPOWER: Enable:
1681 */
1682#define EEPROM_TXPOWER_DELTA 0x0028
1683#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
1684#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
1685#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
1686
1687/*
1688 * EEPROM TXPOWER 802.11BG
1689 */
1690#define EEPROM_TXPOWER_BG1 0x0029
1691#define EEPROM_TXPOWER_BG2 0x0030
1692#define EEPROM_TXPOWER_BG_SIZE 7
1693#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
1694#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
1695
1696/*
1697 * EEPROM TXPOWER 802.11A
1698 */
1699#define EEPROM_TXPOWER_A1 0x003c
1700#define EEPROM_TXPOWER_A2 0x0053
1701#define EEPROM_TXPOWER_A_SIZE 6
1702#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
1703#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
1704
1705/*
1706 * EEPROM TXpower byrate: 20MHZ power
1707 */
1708#define EEPROM_TXPOWER_BYRATE 0x006f
1709
1710/*
1711 * EEPROM BBP.
1712 */
1713#define EEPROM_BBP_START 0x0078
1714#define EEPROM_BBP_SIZE 16
1715#define EEPROM_BBP_VALUE FIELD16(0x00ff)
1716#define EEPROM_BBP_REG_ID FIELD16(0xff00)
1717
1718/*
1719 * MCU mailbox commands.
1720 */
1721#define MCU_SLEEP 0x30
1722#define MCU_WAKEUP 0x31
1723#define MCU_RADIO_OFF 0x35
1724#define MCU_CURRENT 0x36
1725#define MCU_LED 0x50
1726#define MCU_LED_STRENGTH 0x51
1727#define MCU_LED_1 0x52
1728#define MCU_LED_2 0x53
1729#define MCU_LED_3 0x54
1730#define MCU_RADAR 0x60
1731#define MCU_BOOT_SIGNAL 0x72
1732#define MCU_BBP_SIGNAL 0x80
1733#define MCU_POWER_SAVE 0x83
1734
1735/*
1736 * MCU mailbox tokens
1737 */
1738#define TOKEN_WAKUP 3
1739
1740/*
1741 * DMA descriptor defines.
1742 */
1743#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1744#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1745
1746/*
1747 * TX WI structure
1748 */
1749
1750/*
1751 * Word0
1752 * FRAG: 1 To inform TKIP engine this is a fragment.
1753 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
1754 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1755 * BW: Channel bandwidth 20MHz or 40 MHz
1756 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1757 */
1758#define TXWI_W0_FRAG FIELD32(0x00000001)
1759#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
1760#define TXWI_W0_CF_ACK FIELD32(0x00000004)
1761#define TXWI_W0_TS FIELD32(0x00000008)
1762#define TXWI_W0_AMPDU FIELD32(0x00000010)
1763#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0)
1764#define TXWI_W0_TX_OP FIELD32(0x00000300)
1765#define TXWI_W0_MCS FIELD32(0x007f0000)
1766#define TXWI_W0_BW FIELD32(0x00800000)
1767#define TXWI_W0_SHORT_GI FIELD32(0x01000000)
1768#define TXWI_W0_STBC FIELD32(0x06000000)
1769#define TXWI_W0_IFS FIELD32(0x08000000)
1770#define TXWI_W0_PHYMODE FIELD32(0xc0000000)
1771
1772/*
1773 * Word1
1774 */
1775#define TXWI_W1_ACK FIELD32(0x00000001)
1776#define TXWI_W1_NSEQ FIELD32(0x00000002)
1777#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc)
1778#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
1779#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1780#define TXWI_W1_PACKETID FIELD32(0xf0000000)
1781
1782/*
1783 * Word2
1784 */
1785#define TXWI_W2_IV FIELD32(0xffffffff)
1786
1787/*
1788 * Word3
1789 */
1790#define TXWI_W3_EIV FIELD32(0xffffffff)
1791
1792/*
1793 * RX WI structure
1794 */
1795
1796/*
1797 * Word0
1798 */
1799#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
1800#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
1801#define RXWI_W0_BSSID FIELD32(0x00001c00)
1802#define RXWI_W0_UDF FIELD32(0x0000e000)
1803#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1804#define RXWI_W0_TID FIELD32(0xf0000000)
1805
1806/*
1807 * Word1
1808 */
1809#define RXWI_W1_FRAG FIELD32(0x0000000f)
1810#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
1811#define RXWI_W1_MCS FIELD32(0x007f0000)
1812#define RXWI_W1_BW FIELD32(0x00800000)
1813#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
1814#define RXWI_W1_STBC FIELD32(0x06000000)
1815#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
1816
1817/*
1818 * Word2
1819 */
1820#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
1821#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
1822#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
1823
1824/*
1825 * Word3
1826 */
1827#define RXWI_W3_SNR0 FIELD32(0x000000ff)
1828#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
1829
1830/*
1831 * Macros for converting txpower from EEPROM to mac80211 value
1832 * and from mac80211 value to register value.
1833 */
1834#define MIN_G_TXPOWER 0
1835#define MIN_A_TXPOWER -7
1836#define MAX_G_TXPOWER 31
1837#define MAX_A_TXPOWER 15
1838#define DEFAULT_TXPOWER 5
1839
1840#define TXPOWER_G_FROM_DEV(__txpower) \
1841 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1842
1843#define TXPOWER_G_TO_DEV(__txpower) \
1844 clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
1845
1846#define TXPOWER_A_FROM_DEV(__txpower) \
1847 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1848
1849#define TXPOWER_A_TO_DEV(__txpower) \
1850 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
1851
1852#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
new file mode 100644
index 000000000000..e94f1e13fea9
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -0,0 +1,2289 @@
1/*
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
3 Copyright (C) 2009 Gertjan van Wingerde <gwingerde@gmail.com>
4
5 Based on the original rt2800pci.c and rt2800usb.c.
6 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
7 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
8 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
9 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
10 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
11 Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
12 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
13 <http://rt2x00.serialmonkey.com>
14
15 This program is free software; you can redistribute it and/or modify
16 it under the terms of the GNU General Public License as published by
17 the Free Software Foundation; either version 2 of the License, or
18 (at your option) any later version.
19
20 This program is distributed in the hope that it will be useful,
21 but WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 GNU General Public License for more details.
24
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the
27 Free Software Foundation, Inc.,
28 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
29 */
30
31/*
32 Module: rt2800lib
33 Abstract: rt2800 generic device routines.
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38
39#include "rt2x00.h"
40#ifdef CONFIG_RT2800USB
41#include "rt2x00usb.h"
42#endif
43#include "rt2800lib.h"
44#include "rt2800.h"
45#include "rt2800usb.h"
46
47MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
48MODULE_DESCRIPTION("rt2800 library");
49MODULE_LICENSE("GPL");
50
51/*
52 * Register access.
53 * All access to the CSR registers will go through the methods
54 * rt2800_register_read and rt2800_register_write.
55 * BBP and RF register require indirect register access,
56 * and use the CSR registers BBPCSR and RFCSR to achieve this.
57 * These indirect registers work with busy bits,
58 * and we will try maximal REGISTER_BUSY_COUNT times to access
59 * the register while taking a REGISTER_BUSY_DELAY us delay
60 * between each attampt. When the busy bit is still set at that time,
61 * the access attempt is considered to have failed,
62 * and we will print an error.
63 * The _lock versions must be used if you already hold the csr_mutex
64 */
65#define WAIT_FOR_BBP(__dev, __reg) \
66 rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
67#define WAIT_FOR_RFCSR(__dev, __reg) \
68 rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
69#define WAIT_FOR_RF(__dev, __reg) \
70 rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
71#define WAIT_FOR_MCU(__dev, __reg) \
72 rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
73 H2M_MAILBOX_CSR_OWNER, (__reg))
74
75static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
76 const unsigned int word, const u8 value)
77{
78 u32 reg;
79
80 mutex_lock(&rt2x00dev->csr_mutex);
81
82 /*
83 * Wait until the BBP becomes available, afterwards we
84 * can safely write the new data into the register.
85 */
86 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
87 reg = 0;
88 rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
89 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
90 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
91 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
92 if (rt2x00_intf_is_pci(rt2x00dev))
93 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
94
95 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
96 }
97
98 mutex_unlock(&rt2x00dev->csr_mutex);
99}
100
101static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
102 const unsigned int word, u8 *value)
103{
104 u32 reg;
105
106 mutex_lock(&rt2x00dev->csr_mutex);
107
108 /*
109 * Wait until the BBP becomes available, afterwards we
110 * can safely write the read request into the register.
111 * After the data has been written, we wait until hardware
112 * returns the correct value, if at any time the register
113 * doesn't become available in time, reg will be 0xffffffff
114 * which means we return 0xff to the caller.
115 */
116 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
117 reg = 0;
118 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
119 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
120 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
121 if (rt2x00_intf_is_pci(rt2x00dev))
122 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
123
124 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
125
126 WAIT_FOR_BBP(rt2x00dev, &reg);
127 }
128
129 *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
130
131 mutex_unlock(&rt2x00dev->csr_mutex);
132}
133
134static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
135 const unsigned int word, const u8 value)
136{
137 u32 reg;
138
139 mutex_lock(&rt2x00dev->csr_mutex);
140
141 /*
142 * Wait until the RFCSR becomes available, afterwards we
143 * can safely write the new data into the register.
144 */
145 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
146 reg = 0;
147 rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
148 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
149 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
150 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
151
152 rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
153 }
154
155 mutex_unlock(&rt2x00dev->csr_mutex);
156}
157
158static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
159 const unsigned int word, u8 *value)
160{
161 u32 reg;
162
163 mutex_lock(&rt2x00dev->csr_mutex);
164
165 /*
166 * Wait until the RFCSR becomes available, afterwards we
167 * can safely write the read request into the register.
168 * After the data has been written, we wait until hardware
169 * returns the correct value, if at any time the register
170 * doesn't become available in time, reg will be 0xffffffff
171 * which means we return 0xff to the caller.
172 */
173 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
174 reg = 0;
175 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
176 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
177 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
178
179 rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
180
181 WAIT_FOR_RFCSR(rt2x00dev, &reg);
182 }
183
184 *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
185
186 mutex_unlock(&rt2x00dev->csr_mutex);
187}
188
189static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
190 const unsigned int word, const u32 value)
191{
192 u32 reg;
193
194 mutex_lock(&rt2x00dev->csr_mutex);
195
196 /*
197 * Wait until the RF becomes available, afterwards we
198 * can safely write the new data into the register.
199 */
200 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
201 reg = 0;
202 rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
203 rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
204 rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
205 rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
206
207 rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
208 rt2x00_rf_write(rt2x00dev, word, value);
209 }
210
211 mutex_unlock(&rt2x00dev->csr_mutex);
212}
213
214void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
215 const u8 command, const u8 token,
216 const u8 arg0, const u8 arg1)
217{
218 u32 reg;
219
220 if (rt2x00_intf_is_pci(rt2x00dev)) {
221 /*
222 * RT2880 and RT3052 don't support MCU requests.
223 */
224 if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
225 rt2x00_rt(&rt2x00dev->chip, RT3052))
226 return;
227 }
228
229 mutex_lock(&rt2x00dev->csr_mutex);
230
231 /*
232 * Wait until the MCU becomes available, afterwards we
233 * can safely write the new data into the register.
234 */
235 if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
236 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
237 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
238 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
239 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
240 rt2800_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
241
242 reg = 0;
243 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
244 rt2800_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
245 }
246
247 mutex_unlock(&rt2x00dev->csr_mutex);
248}
249EXPORT_SYMBOL_GPL(rt2800_mcu_request);
250
251#ifdef CONFIG_RT2X00_LIB_DEBUGFS
252const struct rt2x00debug rt2800_rt2x00debug = {
253 .owner = THIS_MODULE,
254 .csr = {
255 .read = rt2800_register_read,
256 .write = rt2800_register_write,
257 .flags = RT2X00DEBUGFS_OFFSET,
258 .word_base = CSR_REG_BASE,
259 .word_size = sizeof(u32),
260 .word_count = CSR_REG_SIZE / sizeof(u32),
261 },
262 .eeprom = {
263 .read = rt2x00_eeprom_read,
264 .write = rt2x00_eeprom_write,
265 .word_base = EEPROM_BASE,
266 .word_size = sizeof(u16),
267 .word_count = EEPROM_SIZE / sizeof(u16),
268 },
269 .bbp = {
270 .read = rt2800_bbp_read,
271 .write = rt2800_bbp_write,
272 .word_base = BBP_BASE,
273 .word_size = sizeof(u8),
274 .word_count = BBP_SIZE / sizeof(u8),
275 },
276 .rf = {
277 .read = rt2x00_rf_read,
278 .write = rt2800_rf_write,
279 .word_base = RF_BASE,
280 .word_size = sizeof(u32),
281 .word_count = RF_SIZE / sizeof(u32),
282 },
283};
284EXPORT_SYMBOL_GPL(rt2800_rt2x00debug);
285#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
286
287int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
288{
289 u32 reg;
290
291 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
292 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
293}
294EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
295
296#ifdef CONFIG_RT2X00_LIB_LEDS
297static void rt2800_brightness_set(struct led_classdev *led_cdev,
298 enum led_brightness brightness)
299{
300 struct rt2x00_led *led =
301 container_of(led_cdev, struct rt2x00_led, led_dev);
302 unsigned int enabled = brightness != LED_OFF;
303 unsigned int bg_mode =
304 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
305 unsigned int polarity =
306 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
307 EEPROM_FREQ_LED_POLARITY);
308 unsigned int ledmode =
309 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
310 EEPROM_FREQ_LED_MODE);
311
312 if (led->type == LED_TYPE_RADIO) {
313 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
314 enabled ? 0x20 : 0);
315 } else if (led->type == LED_TYPE_ASSOC) {
316 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
317 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
318 } else if (led->type == LED_TYPE_QUALITY) {
319 /*
320 * The brightness is divided into 6 levels (0 - 5),
321 * The specs tell us the following levels:
322 * 0, 1 ,3, 7, 15, 31
323 * to determine the level in a simple way we can simply
324 * work with bitshifting:
325 * (1 << level) - 1
326 */
327 rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
328 (1 << brightness / (LED_FULL / 6)) - 1,
329 polarity);
330 }
331}
332
333static int rt2800_blink_set(struct led_classdev *led_cdev,
334 unsigned long *delay_on, unsigned long *delay_off)
335{
336 struct rt2x00_led *led =
337 container_of(led_cdev, struct rt2x00_led, led_dev);
338 u32 reg;
339
340 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
341 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
342 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
343 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
344 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
345 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
346 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
347 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
348 rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
349
350 return 0;
351}
352
353void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
354 struct rt2x00_led *led, enum led_type type)
355{
356 led->rt2x00dev = rt2x00dev;
357 led->type = type;
358 led->led_dev.brightness_set = rt2800_brightness_set;
359 led->led_dev.blink_set = rt2800_blink_set;
360 led->flags = LED_INITIALIZED;
361}
362EXPORT_SYMBOL_GPL(rt2800_init_led);
363#endif /* CONFIG_RT2X00_LIB_LEDS */
364
365/*
366 * Configuration handlers.
367 */
368static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
369 struct rt2x00lib_crypto *crypto,
370 struct ieee80211_key_conf *key)
371{
372 struct mac_wcid_entry wcid_entry;
373 struct mac_iveiv_entry iveiv_entry;
374 u32 offset;
375 u32 reg;
376
377 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
378
379 rt2800_register_read(rt2x00dev, offset, &reg);
380 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
381 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
382 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
383 (crypto->cmd == SET_KEY) * crypto->cipher);
384 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
385 (crypto->cmd == SET_KEY) * crypto->bssidx);
386 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
387 rt2800_register_write(rt2x00dev, offset, reg);
388
389 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
390
391 memset(&iveiv_entry, 0, sizeof(iveiv_entry));
392 if ((crypto->cipher == CIPHER_TKIP) ||
393 (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
394 (crypto->cipher == CIPHER_AES))
395 iveiv_entry.iv[3] |= 0x20;
396 iveiv_entry.iv[3] |= key->keyidx << 6;
397 rt2800_register_multiwrite(rt2x00dev, offset,
398 &iveiv_entry, sizeof(iveiv_entry));
399
400 offset = MAC_WCID_ENTRY(key->hw_key_idx);
401
402 memset(&wcid_entry, 0, sizeof(wcid_entry));
403 if (crypto->cmd == SET_KEY)
404 memcpy(&wcid_entry, crypto->address, ETH_ALEN);
405 rt2800_register_multiwrite(rt2x00dev, offset,
406 &wcid_entry, sizeof(wcid_entry));
407}
408
409int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
410 struct rt2x00lib_crypto *crypto,
411 struct ieee80211_key_conf *key)
412{
413 struct hw_key_entry key_entry;
414 struct rt2x00_field32 field;
415 u32 offset;
416 u32 reg;
417
418 if (crypto->cmd == SET_KEY) {
419 key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
420
421 memcpy(key_entry.key, crypto->key,
422 sizeof(key_entry.key));
423 memcpy(key_entry.tx_mic, crypto->tx_mic,
424 sizeof(key_entry.tx_mic));
425 memcpy(key_entry.rx_mic, crypto->rx_mic,
426 sizeof(key_entry.rx_mic));
427
428 offset = SHARED_KEY_ENTRY(key->hw_key_idx);
429 rt2800_register_multiwrite(rt2x00dev, offset,
430 &key_entry, sizeof(key_entry));
431 }
432
433 /*
434 * The cipher types are stored over multiple registers
435 * starting with SHARED_KEY_MODE_BASE each word will have
436 * 32 bits and contains the cipher types for 2 bssidx each.
437 * Using the correct defines correctly will cause overhead,
438 * so just calculate the correct offset.
439 */
440 field.bit_offset = 4 * (key->hw_key_idx % 8);
441 field.bit_mask = 0x7 << field.bit_offset;
442
443 offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
444
445 rt2800_register_read(rt2x00dev, offset, &reg);
446 rt2x00_set_field32(&reg, field,
447 (crypto->cmd == SET_KEY) * crypto->cipher);
448 rt2800_register_write(rt2x00dev, offset, reg);
449
450 /*
451 * Update WCID information
452 */
453 rt2800_config_wcid_attr(rt2x00dev, crypto, key);
454
455 return 0;
456}
457EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
458
459int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
460 struct rt2x00lib_crypto *crypto,
461 struct ieee80211_key_conf *key)
462{
463 struct hw_key_entry key_entry;
464 u32 offset;
465
466 if (crypto->cmd == SET_KEY) {
467 /*
468 * 1 pairwise key is possible per AID, this means that the AID
469 * equals our hw_key_idx. Make sure the WCID starts _after_ the
470 * last possible shared key entry.
471 */
472 if (crypto->aid > (256 - 32))
473 return -ENOSPC;
474
475 key->hw_key_idx = 32 + crypto->aid;
476
477 memcpy(key_entry.key, crypto->key,
478 sizeof(key_entry.key));
479 memcpy(key_entry.tx_mic, crypto->tx_mic,
480 sizeof(key_entry.tx_mic));
481 memcpy(key_entry.rx_mic, crypto->rx_mic,
482 sizeof(key_entry.rx_mic));
483
484 offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
485 rt2800_register_multiwrite(rt2x00dev, offset,
486 &key_entry, sizeof(key_entry));
487 }
488
489 /*
490 * Update WCID information
491 */
492 rt2800_config_wcid_attr(rt2x00dev, crypto, key);
493
494 return 0;
495}
496EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
497
498void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
499 const unsigned int filter_flags)
500{
501 u32 reg;
502
503 /*
504 * Start configuration steps.
505 * Note that the version error will always be dropped
506 * and broadcast frames will always be accepted since
507 * there is no filter for it at this time.
508 */
509 rt2800_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
510 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
511 !(filter_flags & FIF_FCSFAIL));
512 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
513 !(filter_flags & FIF_PLCPFAIL));
514 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
515 !(filter_flags & FIF_PROMISC_IN_BSS));
516 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
517 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
518 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
519 !(filter_flags & FIF_ALLMULTI));
520 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
521 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
522 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
523 !(filter_flags & FIF_CONTROL));
524 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
525 !(filter_flags & FIF_CONTROL));
526 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
527 !(filter_flags & FIF_CONTROL));
528 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
529 !(filter_flags & FIF_CONTROL));
530 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
531 !(filter_flags & FIF_CONTROL));
532 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
533 !(filter_flags & FIF_PSPOLL));
534 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
535 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
536 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
537 !(filter_flags & FIF_CONTROL));
538 rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
539}
540EXPORT_SYMBOL_GPL(rt2800_config_filter);
541
542void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
543 struct rt2x00intf_conf *conf, const unsigned int flags)
544{
545 unsigned int beacon_base;
546 u32 reg;
547
548 if (flags & CONFIG_UPDATE_TYPE) {
549 /*
550 * Clear current synchronisation setup.
551 * For the Beacon base registers we only need to clear
552 * the first byte since that byte contains the VALID and OWNER
553 * bits which (when set to 0) will invalidate the entire beacon.
554 */
555 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
556 rt2800_register_write(rt2x00dev, beacon_base, 0);
557
558 /*
559 * Enable synchronisation.
560 */
561 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
562 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
563 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
564 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE,
565 (conf->sync == TSF_SYNC_BEACON));
566 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
567 }
568
569 if (flags & CONFIG_UPDATE_MAC) {
570 reg = le32_to_cpu(conf->mac[1]);
571 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
572 conf->mac[1] = cpu_to_le32(reg);
573
574 rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
575 conf->mac, sizeof(conf->mac));
576 }
577
578 if (flags & CONFIG_UPDATE_BSSID) {
579 reg = le32_to_cpu(conf->bssid[1]);
580 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
581 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
582 conf->bssid[1] = cpu_to_le32(reg);
583
584 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
585 conf->bssid, sizeof(conf->bssid));
586 }
587}
588EXPORT_SYMBOL_GPL(rt2800_config_intf);
589
590void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
591{
592 u32 reg;
593
594 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
595 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
596 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
597
598 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
599 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
600 !!erp->short_preamble);
601 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
602 !!erp->short_preamble);
603 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
604
605 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
606 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
607 erp->cts_protection ? 2 : 0);
608 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
609
610 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
611 erp->basic_rates);
612 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
613
614 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
615 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
616 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
617 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
618
619 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
620 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
621 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
622 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
623 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
624 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
625 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
626
627 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
628 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
629 erp->beacon_int * 16);
630 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
631}
632EXPORT_SYMBOL_GPL(rt2800_config_erp);
633
634void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
635{
636 u8 r1;
637 u8 r3;
638
639 rt2800_bbp_read(rt2x00dev, 1, &r1);
640 rt2800_bbp_read(rt2x00dev, 3, &r3);
641
642 /*
643 * Configure the TX antenna.
644 */
645 switch ((int)ant->tx) {
646 case 1:
647 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
648 if (rt2x00_intf_is_pci(rt2x00dev))
649 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
650 break;
651 case 2:
652 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
653 break;
654 case 3:
655 /* Do nothing */
656 break;
657 }
658
659 /*
660 * Configure the RX antenna.
661 */
662 switch ((int)ant->rx) {
663 case 1:
664 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
665 break;
666 case 2:
667 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
668 break;
669 case 3:
670 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
671 break;
672 }
673
674 rt2800_bbp_write(rt2x00dev, 3, r3);
675 rt2800_bbp_write(rt2x00dev, 1, r1);
676}
677EXPORT_SYMBOL_GPL(rt2800_config_ant);
678
679static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
680 struct rt2x00lib_conf *libconf)
681{
682 u16 eeprom;
683 short lna_gain;
684
685 if (libconf->rf.channel <= 14) {
686 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
687 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
688 } else if (libconf->rf.channel <= 64) {
689 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
690 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
691 } else if (libconf->rf.channel <= 128) {
692 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
693 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
694 } else {
695 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
696 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
697 }
698
699 rt2x00dev->lna_gain = lna_gain;
700}
701
702static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
703 struct ieee80211_conf *conf,
704 struct rf_channel *rf,
705 struct channel_info *info)
706{
707 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
708
709 if (rt2x00dev->default_ant.tx == 1)
710 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
711
712 if (rt2x00dev->default_ant.rx == 1) {
713 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
714 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
715 } else if (rt2x00dev->default_ant.rx == 2)
716 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
717
718 if (rf->channel > 14) {
719 /*
720 * When TX power is below 0, we should increase it by 7 to
721 * make it a positive value (Minumum value is -7).
722 * However this means that values between 0 and 7 have
723 * double meaning, and we should set a 7DBm boost flag.
724 */
725 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
726 (info->tx_power1 >= 0));
727
728 if (info->tx_power1 < 0)
729 info->tx_power1 += 7;
730
731 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
732 TXPOWER_A_TO_DEV(info->tx_power1));
733
734 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
735 (info->tx_power2 >= 0));
736
737 if (info->tx_power2 < 0)
738 info->tx_power2 += 7;
739
740 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
741 TXPOWER_A_TO_DEV(info->tx_power2));
742 } else {
743 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
744 TXPOWER_G_TO_DEV(info->tx_power1));
745 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
746 TXPOWER_G_TO_DEV(info->tx_power2));
747 }
748
749 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
750
751 rt2800_rf_write(rt2x00dev, 1, rf->rf1);
752 rt2800_rf_write(rt2x00dev, 2, rf->rf2);
753 rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
754 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
755
756 udelay(200);
757
758 rt2800_rf_write(rt2x00dev, 1, rf->rf1);
759 rt2800_rf_write(rt2x00dev, 2, rf->rf2);
760 rt2800_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
761 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
762
763 udelay(200);
764
765 rt2800_rf_write(rt2x00dev, 1, rf->rf1);
766 rt2800_rf_write(rt2x00dev, 2, rf->rf2);
767 rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
768 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
769}
770
771static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
772 struct ieee80211_conf *conf,
773 struct rf_channel *rf,
774 struct channel_info *info)
775{
776 u8 rfcsr;
777
778 rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
779 rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
780
781 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
782 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
783 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
784
785 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
786 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
787 TXPOWER_G_TO_DEV(info->tx_power1));
788 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
789
790 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
791 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
792 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
793
794 rt2800_rfcsr_write(rt2x00dev, 24,
795 rt2x00dev->calibration[conf_is_ht40(conf)]);
796
797 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
798 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
799 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
800}
801
802static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
803 struct ieee80211_conf *conf,
804 struct rf_channel *rf,
805 struct channel_info *info)
806{
807 u32 reg;
808 unsigned int tx_pin;
809 u8 bbp;
810
811 if ((rt2x00_rt(&rt2x00dev->chip, RT3070) ||
812 rt2x00_rt(&rt2x00dev->chip, RT3090)) &&
813 (rt2x00_rf(&rt2x00dev->chip, RF2020) ||
814 rt2x00_rf(&rt2x00dev->chip, RF3020) ||
815 rt2x00_rf(&rt2x00dev->chip, RF3021) ||
816 rt2x00_rf(&rt2x00dev->chip, RF3022)))
817 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
818 else
819 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
820
821 /*
822 * Change BBP settings
823 */
824 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
825 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
826 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
827 rt2800_bbp_write(rt2x00dev, 86, 0);
828
829 if (rf->channel <= 14) {
830 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
831 rt2800_bbp_write(rt2x00dev, 82, 0x62);
832 rt2800_bbp_write(rt2x00dev, 75, 0x46);
833 } else {
834 rt2800_bbp_write(rt2x00dev, 82, 0x84);
835 rt2800_bbp_write(rt2x00dev, 75, 0x50);
836 }
837 } else {
838 rt2800_bbp_write(rt2x00dev, 82, 0xf2);
839
840 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
841 rt2800_bbp_write(rt2x00dev, 75, 0x46);
842 else
843 rt2800_bbp_write(rt2x00dev, 75, 0x50);
844 }
845
846 rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
847 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
848 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
849 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
850 rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
851
852 tx_pin = 0;
853
854 /* Turn on unused PA or LNA when not using 1T or 1R */
855 if (rt2x00dev->default_ant.tx != 1) {
856 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
857 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
858 }
859
860 /* Turn on unused PA or LNA when not using 1T or 1R */
861 if (rt2x00dev->default_ant.rx != 1) {
862 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
863 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
864 }
865
866 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
867 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
868 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
869 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
870 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
871 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
872
873 rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
874
875 rt2800_bbp_read(rt2x00dev, 4, &bbp);
876 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
877 rt2800_bbp_write(rt2x00dev, 4, bbp);
878
879 rt2800_bbp_read(rt2x00dev, 3, &bbp);
880 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
881 rt2800_bbp_write(rt2x00dev, 3, bbp);
882
883 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
884 if (conf_is_ht40(conf)) {
885 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
886 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
887 rt2800_bbp_write(rt2x00dev, 73, 0x16);
888 } else {
889 rt2800_bbp_write(rt2x00dev, 69, 0x16);
890 rt2800_bbp_write(rt2x00dev, 70, 0x08);
891 rt2800_bbp_write(rt2x00dev, 73, 0x11);
892 }
893 }
894
895 msleep(1);
896}
897
898static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
899 const int txpower)
900{
901 u32 reg;
902 u32 value = TXPOWER_G_TO_DEV(txpower);
903 u8 r1;
904
905 rt2800_bbp_read(rt2x00dev, 1, &r1);
906 rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
907 rt2800_bbp_write(rt2x00dev, 1, r1);
908
909 rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
910 rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
911 rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
912 rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
913 rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
914 rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
915 rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
916 rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
917 rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
918 rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
919
920 rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
921 rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
922 rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
923 rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
924 rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
925 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
926 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
927 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
928 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
929 rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
930
931 rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
932 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
933 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
934 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
935 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
936 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
937 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
938 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
939 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
940 rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
941
942 rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
943 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
944 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
945 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
946 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
947 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
948 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
949 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
950 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
951 rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
952
953 rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
954 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
955 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
956 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
957 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
958 rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
959}
960
961static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
962 struct rt2x00lib_conf *libconf)
963{
964 u32 reg;
965
966 rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
967 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
968 libconf->conf->short_frame_max_tx_count);
969 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
970 libconf->conf->long_frame_max_tx_count);
971 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
972 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
973 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
974 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
975 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
976}
977
978static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
979 struct rt2x00lib_conf *libconf)
980{
981 enum dev_state state =
982 (libconf->conf->flags & IEEE80211_CONF_PS) ?
983 STATE_SLEEP : STATE_AWAKE;
984 u32 reg;
985
986 if (state == STATE_SLEEP) {
987 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
988
989 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
990 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
991 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
992 libconf->conf->listen_interval - 1);
993 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
994 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
995
996 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
997 } else {
998 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
999
1000 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
1001 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
1002 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
1003 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
1004 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
1005 }
1006}
1007
1008void rt2800_config(struct rt2x00_dev *rt2x00dev,
1009 struct rt2x00lib_conf *libconf,
1010 const unsigned int flags)
1011{
1012 /* Always recalculate LNA gain before changing configuration */
1013 rt2800_config_lna_gain(rt2x00dev, libconf);
1014
1015 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
1016 rt2800_config_channel(rt2x00dev, libconf->conf,
1017 &libconf->rf, &libconf->channel);
1018 if (flags & IEEE80211_CONF_CHANGE_POWER)
1019 rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
1020 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1021 rt2800_config_retry_limit(rt2x00dev, libconf);
1022 if (flags & IEEE80211_CONF_CHANGE_PS)
1023 rt2800_config_ps(rt2x00dev, libconf);
1024}
1025EXPORT_SYMBOL_GPL(rt2800_config);
1026
1027/*
1028 * Link tuning
1029 */
1030void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
1031{
1032 u32 reg;
1033
1034 /*
1035 * Update FCS error count from register.
1036 */
1037 rt2800_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1038 qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
1039}
1040EXPORT_SYMBOL_GPL(rt2800_link_stats);
1041
1042static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1043{
1044 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1045 if (rt2x00_intf_is_usb(rt2x00dev) &&
1046 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
1047 return 0x1c + (2 * rt2x00dev->lna_gain);
1048 else
1049 return 0x2e + rt2x00dev->lna_gain;
1050 }
1051
1052 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1053 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
1054 else
1055 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
1056}
1057
1058static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
1059 struct link_qual *qual, u8 vgc_level)
1060{
1061 if (qual->vgc_level != vgc_level) {
1062 rt2800_bbp_write(rt2x00dev, 66, vgc_level);
1063 qual->vgc_level = vgc_level;
1064 qual->vgc_level_reg = vgc_level;
1065 }
1066}
1067
1068void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
1069{
1070 rt2800_set_vgc(rt2x00dev, qual, rt2800_get_default_vgc(rt2x00dev));
1071}
1072EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1073
1074void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1075 const u32 count)
1076{
1077 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
1078 return;
1079
1080 /*
1081 * When RSSI is better then -80 increase VGC level with 0x10
1082 */
1083 rt2800_set_vgc(rt2x00dev, qual,
1084 rt2800_get_default_vgc(rt2x00dev) +
1085 ((qual->rssi > -80) * 0x10));
1086}
1087EXPORT_SYMBOL_GPL(rt2800_link_tuner);
1088
1089/*
1090 * Initialization functions.
1091 */
1092int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1093{
1094 u32 reg;
1095 unsigned int i;
1096
1097 if (rt2x00_intf_is_usb(rt2x00dev)) {
1098 /*
1099 * Wait until BBP and RF are ready.
1100 */
1101 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1102 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1103 if (reg && reg != ~0)
1104 break;
1105 msleep(1);
1106 }
1107
1108 if (i == REGISTER_BUSY_COUNT) {
1109 ERROR(rt2x00dev, "Unstable hardware.\n");
1110 return -EBUSY;
1111 }
1112
1113 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1114 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
1115 reg & ~0x00002000);
1116 } else if (rt2x00_intf_is_pci(rt2x00dev))
1117 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1118
1119 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1120 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
1121 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1122 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1123
1124 if (rt2x00_intf_is_usb(rt2x00dev)) {
1125 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1126#ifdef CONFIG_RT2800USB
1127 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1128 USB_MODE_RESET, REGISTER_TIMEOUT);
1129#endif
1130 }
1131
1132 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1133
1134 rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
1135 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
1136 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
1137 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
1138 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
1139 rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
1140
1141 rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
1142 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
1143 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
1144 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
1145 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
1146 rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
1147
1148 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
1149 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1150
1151 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1152
1153 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1154 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
1155 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
1156 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
1157 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
1158 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1159 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1160 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1161
1162 if (rt2x00_intf_is_usb(rt2x00dev) &&
1163 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1164 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1165 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1166 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1167 } else {
1168 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1169 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1170 }
1171
1172 rt2800_register_read(rt2x00dev, TX_LINK_CFG, &reg);
1173 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
1174 rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
1175 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
1176 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
1177 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
1178 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
1179 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
1180 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
1181 rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg);
1182
1183 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1184 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1185 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1186 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1187
1188 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1189 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1190 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
1191 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
1192 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1193 else
1194 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
1195 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
1196 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1197 rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1198
1199 rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1200
1201 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1202 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1203 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1204 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1205 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1206 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1207 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1208
1209 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1210 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
1211 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1212 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1213 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1214 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1215 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1216 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1217 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1218 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1219 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1220
1221 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1222 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
1223 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1224 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1225 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1226 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1227 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1228 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1229 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1230 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1231 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1232
1233 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1234 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
1235 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
1236 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
1237 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1238 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1239 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1240 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1241 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1242 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1243 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1244
1245 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1246 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1247 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
1248 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1249 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1250 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1251 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1252 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1253 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1254 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1255 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1256
1257 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1258 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
1259 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
1260 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
1261 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1262 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1263 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1264 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1265 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1266 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1267 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1268
1269 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1270 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
1271 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
1272 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
1273 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1274 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1275 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1276 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1277 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1278 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1279 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1280
1281 if (rt2x00_intf_is_usb(rt2x00dev)) {
1282 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
1283
1284 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1285 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1286 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1287 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1288 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1289 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
1290 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
1291 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
1292 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
1293 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
1294 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1295 }
1296
1297 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
1298 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
1299
1300 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
1301 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
1302 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
1303 IEEE80211_MAX_RTS_THRESHOLD);
1304 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
1305 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
1306
1307 rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1308 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1309
1310 /*
1311 * ASIC will keep garbage value after boot, clear encryption keys.
1312 */
1313 for (i = 0; i < 4; i++)
1314 rt2800_register_write(rt2x00dev,
1315 SHARED_KEY_MODE_ENTRY(i), 0);
1316
1317 for (i = 0; i < 256; i++) {
1318 u32 wcid[2] = { 0xffffffff, 0x00ffffff };
1319 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
1320 wcid, sizeof(wcid));
1321
1322 rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
1323 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
1324 }
1325
1326 /*
1327 * Clear all beacons
1328 * For the Beacon base registers we only need to clear
1329 * the first byte since that byte contains the VALID and OWNER
1330 * bits which (when set to 0) will invalidate the entire beacon.
1331 */
1332 rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1333 rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1334 rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1335 rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1336 rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
1337 rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
1338 rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1339 rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1340
1341 if (rt2x00_intf_is_usb(rt2x00dev)) {
1342 rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
1343 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
1344 rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
1345 }
1346
1347 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
1348 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
1349 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
1350 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
1351 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
1352 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
1353 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
1354 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
1355 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
1356 rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg);
1357
1358 rt2800_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
1359 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
1360 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
1361 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
1362 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
1363 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
1364 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
1365 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
1366 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
1367 rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg);
1368
1369 rt2800_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
1370 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
1371 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
1372 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
1373 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
1374 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
1375 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
1376 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
1377 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
1378 rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg);
1379
1380 rt2800_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
1381 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
1382 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
1383 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
1384 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
1385 rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg);
1386
1387 /*
1388 * We must clear the error counters.
1389 * These registers are cleared on read,
1390 * so we may pass a useless variable to store the value.
1391 */
1392 rt2800_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1393 rt2800_register_read(rt2x00dev, RX_STA_CNT1, &reg);
1394 rt2800_register_read(rt2x00dev, RX_STA_CNT2, &reg);
1395 rt2800_register_read(rt2x00dev, TX_STA_CNT0, &reg);
1396 rt2800_register_read(rt2x00dev, TX_STA_CNT1, &reg);
1397 rt2800_register_read(rt2x00dev, TX_STA_CNT2, &reg);
1398
1399 return 0;
1400}
1401EXPORT_SYMBOL_GPL(rt2800_init_registers);
1402
1403static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1404{
1405 unsigned int i;
1406 u32 reg;
1407
1408 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1409 rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
1410 if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
1411 return 0;
1412
1413 udelay(REGISTER_BUSY_DELAY);
1414 }
1415
1416 ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
1417 return -EACCES;
1418}
1419
1420static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1421{
1422 unsigned int i;
1423 u8 value;
1424
1425 /*
1426 * BBP was enabled after firmware was loaded,
1427 * but we need to reactivate it now.
1428 */
1429 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1430 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1431 msleep(1);
1432
1433 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1434 rt2800_bbp_read(rt2x00dev, 0, &value);
1435 if ((value != 0xff) && (value != 0x00))
1436 return 0;
1437 udelay(REGISTER_BUSY_DELAY);
1438 }
1439
1440 ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
1441 return -EACCES;
1442}
1443
1444int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1445{
1446 unsigned int i;
1447 u16 eeprom;
1448 u8 reg_id;
1449 u8 value;
1450
1451 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
1452 rt2800_wait_bbp_ready(rt2x00dev)))
1453 return -EACCES;
1454
1455 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
1456 rt2800_bbp_write(rt2x00dev, 66, 0x38);
1457 rt2800_bbp_write(rt2x00dev, 69, 0x12);
1458 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1459 rt2800_bbp_write(rt2x00dev, 73, 0x10);
1460 rt2800_bbp_write(rt2x00dev, 81, 0x37);
1461 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1462 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
1463 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1464 rt2800_bbp_write(rt2x00dev, 86, 0x00);
1465 rt2800_bbp_write(rt2x00dev, 91, 0x04);
1466 rt2800_bbp_write(rt2x00dev, 92, 0x00);
1467 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1468 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1469
1470 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
1471 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1472 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1473 }
1474
1475 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
1476 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1477
1478 if (rt2x00_intf_is_usb(rt2x00dev) &&
1479 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1480 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1481 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1482 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1483 }
1484
1485 if (rt2x00_intf_is_pci(rt2x00dev) &&
1486 rt2x00_rt(&rt2x00dev->chip, RT3052)) {
1487 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1488 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1489 rt2800_bbp_write(rt2x00dev, 80, 0x08);
1490 }
1491
1492 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1493 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1494
1495 if (eeprom != 0xffff && eeprom != 0x0000) {
1496 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1497 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1498 rt2800_bbp_write(rt2x00dev, reg_id, value);
1499 }
1500 }
1501
1502 return 0;
1503}
1504EXPORT_SYMBOL_GPL(rt2800_init_bbp);
1505
1506static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
1507 bool bw40, u8 rfcsr24, u8 filter_target)
1508{
1509 unsigned int i;
1510 u8 bbp;
1511 u8 rfcsr;
1512 u8 passband;
1513 u8 stopband;
1514 u8 overtuned = 0;
1515
1516 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
1517
1518 rt2800_bbp_read(rt2x00dev, 4, &bbp);
1519 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
1520 rt2800_bbp_write(rt2x00dev, 4, bbp);
1521
1522 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
1523 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
1524 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
1525
1526 /*
1527 * Set power & frequency of passband test tone
1528 */
1529 rt2800_bbp_write(rt2x00dev, 24, 0);
1530
1531 for (i = 0; i < 100; i++) {
1532 rt2800_bbp_write(rt2x00dev, 25, 0x90);
1533 msleep(1);
1534
1535 rt2800_bbp_read(rt2x00dev, 55, &passband);
1536 if (passband)
1537 break;
1538 }
1539
1540 /*
1541 * Set power & frequency of stopband test tone
1542 */
1543 rt2800_bbp_write(rt2x00dev, 24, 0x06);
1544
1545 for (i = 0; i < 100; i++) {
1546 rt2800_bbp_write(rt2x00dev, 25, 0x90);
1547 msleep(1);
1548
1549 rt2800_bbp_read(rt2x00dev, 55, &stopband);
1550
1551 if ((passband - stopband) <= filter_target) {
1552 rfcsr24++;
1553 overtuned += ((passband - stopband) == filter_target);
1554 } else
1555 break;
1556
1557 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
1558 }
1559
1560 rfcsr24 -= !!overtuned;
1561
1562 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
1563 return rfcsr24;
1564}
1565
1566int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1567{
1568 u8 rfcsr;
1569 u8 bbp;
1570
1571 if (rt2x00_intf_is_usb(rt2x00dev) &&
1572 rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
1573 return 0;
1574
1575 if (rt2x00_intf_is_pci(rt2x00dev)) {
1576 if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
1577 !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
1578 !rt2x00_rf(&rt2x00dev->chip, RF3022))
1579 return 0;
1580 }
1581
1582 /*
1583 * Init RF calibration.
1584 */
1585 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
1586 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1587 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1588 msleep(1);
1589 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1590 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1591
1592 if (rt2x00_intf_is_usb(rt2x00dev)) {
1593 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1594 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1595 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1596 rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
1597 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1598 rt2800_rfcsr_write(rt2x00dev, 10, 0x71);
1599 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1600 rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
1601 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1602 rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
1603 rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
1604 rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
1605 rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
1606 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
1607 rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
1608 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1609 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
1610 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1611 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
1612 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
1613 } else if (rt2x00_intf_is_pci(rt2x00dev)) {
1614 rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
1615 rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
1616 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
1617 rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
1618 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1619 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1620 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1621 rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
1622 rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
1623 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1624 rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
1625 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1626 rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
1627 rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
1628 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1629 rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
1630 rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
1631 rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
1632 rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
1633 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
1634 rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
1635 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1636 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
1637 rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
1638 rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
1639 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1640 rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
1641 rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
1642 rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
1643 rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
1644 }
1645
1646 /*
1647 * Set RX Filter calibration for 20MHz and 40MHz
1648 */
1649 rt2x00dev->calibration[0] =
1650 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1651 rt2x00dev->calibration[1] =
1652 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1653
1654 /*
1655 * Set back to initial state
1656 */
1657 rt2800_bbp_write(rt2x00dev, 24, 0);
1658
1659 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
1660 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
1661 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
1662
1663 /*
1664 * set BBP back to BW20
1665 */
1666 rt2800_bbp_read(rt2x00dev, 4, &bbp);
1667 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1668 rt2800_bbp_write(rt2x00dev, 4, bbp);
1669
1670 return 0;
1671}
1672EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
1673
1674int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
1675{
1676 u32 reg;
1677
1678 rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg);
1679
1680 return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
1681}
1682EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
1683
1684static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
1685{
1686 u32 reg;
1687
1688 mutex_lock(&rt2x00dev->csr_mutex);
1689
1690 rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, &reg);
1691 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
1692 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
1693 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
1694 rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg);
1695
1696 /* Wait until the EEPROM has been loaded */
1697 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
1698
1699 /* Apparently the data is read from end to start */
1700 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3,
1701 (u32 *)&rt2x00dev->eeprom[i]);
1702 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2,
1703 (u32 *)&rt2x00dev->eeprom[i + 2]);
1704 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1,
1705 (u32 *)&rt2x00dev->eeprom[i + 4]);
1706 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0,
1707 (u32 *)&rt2x00dev->eeprom[i + 6]);
1708
1709 mutex_unlock(&rt2x00dev->csr_mutex);
1710}
1711
1712void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
1713{
1714 unsigned int i;
1715
1716 for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
1717 rt2800_efuse_read(rt2x00dev, i);
1718}
1719EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
1720
1721int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1722{
1723 u16 word;
1724 u8 *mac;
1725 u8 default_lna_gain;
1726
1727 /*
1728 * Start validation of the data that has been read.
1729 */
1730 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1731 if (!is_valid_ether_addr(mac)) {
1732 random_ether_addr(mac);
1733 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1734 }
1735
1736 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
1737 if (word == 0xffff) {
1738 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
1739 rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
1740 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
1741 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1742 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1743 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
1744 /*
1745 * There is a max of 2 RX streams for RT28x0 series
1746 */
1747 if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
1748 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
1749 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1750 }
1751
1752 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
1753 if (word == 0xffff) {
1754 rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
1755 rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
1756 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
1757 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
1758 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
1759 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
1760 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
1761 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
1762 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
1763 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
1764 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
1765 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
1766 }
1767
1768 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
1769 if ((word & 0x00ff) == 0x00ff) {
1770 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
1771 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
1772 LED_MODE_TXRX_ACTIVITY);
1773 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
1774 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
1775 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
1776 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
1777 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
1778 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
1779 }
1780
1781 /*
1782 * During the LNA validation we are going to use
1783 * lna0 as correct value. Note that EEPROM_LNA
1784 * is never validated.
1785 */
1786 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
1787 default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
1788
1789 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
1790 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
1791 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
1792 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
1793 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
1794 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
1795
1796 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
1797 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
1798 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
1799 if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
1800 rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
1801 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
1802 default_lna_gain);
1803 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
1804
1805 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
1806 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
1807 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
1808 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
1809 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
1810 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
1811
1812 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
1813 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
1814 rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
1815 if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
1816 rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
1817 rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
1818 default_lna_gain);
1819 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
1820
1821 return 0;
1822}
1823EXPORT_SYMBOL_GPL(rt2800_validate_eeprom);
1824
1825int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1826{
1827 u32 reg;
1828 u16 value;
1829 u16 eeprom;
1830
1831 /*
1832 * Read EEPROM word for configuration.
1833 */
1834 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
1835
1836 /*
1837 * Identify RF chipset.
1838 */
1839 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1840 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1841
1842 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1843
1844 if (rt2x00_intf_is_usb(rt2x00dev)) {
1845 struct rt2x00_chip *chip = &rt2x00dev->chip;
1846
1847 /*
1848 * The check for rt2860 is not a typo, some rt2870 hardware
1849 * identifies itself as rt2860 in the CSR register.
1850 */
1851 if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) ||
1852 rt2x00_check_rev(chip, 0xfff00000, 0x28700000) ||
1853 rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) {
1854 rt2x00_set_chip_rt(rt2x00dev, RT2870);
1855 } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) {
1856 rt2x00_set_chip_rt(rt2x00dev, RT3070);
1857 } else {
1858 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
1859 return -ENODEV;
1860 }
1861 }
1862 rt2x00_print_chip(rt2x00dev);
1863
1864 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
1865 !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
1866 !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
1867 !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
1868 !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
1869 !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
1870 !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
1871 !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
1872 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1873 return -ENODEV;
1874 }
1875
1876 /*
1877 * Identify default antenna configuration.
1878 */
1879 rt2x00dev->default_ant.tx =
1880 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
1881 rt2x00dev->default_ant.rx =
1882 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
1883
1884 /*
1885 * Read frequency offset and RF programming sequence.
1886 */
1887 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
1888 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
1889
1890 /*
1891 * Read external LNA informations.
1892 */
1893 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1894
1895 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
1896 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
1897 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
1898 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
1899
1900 /*
1901 * Detect if this device has an hardware controlled radio.
1902 */
1903 if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
1904 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1905
1906 /*
1907 * Store led settings, for correct led behaviour.
1908 */
1909#ifdef CONFIG_RT2X00_LIB_LEDS
1910 rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1911 rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
1912 rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
1913
1914 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
1915#endif /* CONFIG_RT2X00_LIB_LEDS */
1916
1917 return 0;
1918}
1919EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
1920
1921/*
1922 * RF value list for rt28x0
1923 * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
1924 */
1925static const struct rf_channel rf_vals[] = {
1926 { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
1927 { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
1928 { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
1929 { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
1930 { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
1931 { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
1932 { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
1933 { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
1934 { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
1935 { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
1936 { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
1937 { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
1938 { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
1939 { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
1940
1941 /* 802.11 UNI / HyperLan 2 */
1942 { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
1943 { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
1944 { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
1945 { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
1946 { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
1947 { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
1948 { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
1949 { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
1950 { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
1951 { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
1952 { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
1953 { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
1954
1955 /* 802.11 HyperLan 2 */
1956 { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
1957 { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
1958 { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
1959 { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
1960 { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
1961 { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
1962 { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
1963 { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
1964 { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
1965 { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
1966 { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
1967 { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
1968 { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
1969 { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
1970 { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
1971 { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
1972
1973 /* 802.11 UNII */
1974 { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
1975 { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
1976 { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
1977 { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
1978 { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
1979 { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
1980 { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
1981 { 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f },
1982 { 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 },
1983 { 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 },
1984 { 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f },
1985
1986 /* 802.11 Japan */
1987 { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
1988 { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
1989 { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
1990 { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
1991 { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
1992 { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
1993 { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
1994};
1995
1996/*
1997 * RF value list for rt3070
1998 * Supports: 2.4 GHz
1999 */
2000static const struct rf_channel rf_vals_302x[] = {
2001 {1, 241, 2, 2 },
2002 {2, 241, 2, 7 },
2003 {3, 242, 2, 2 },
2004 {4, 242, 2, 7 },
2005 {5, 243, 2, 2 },
2006 {6, 243, 2, 7 },
2007 {7, 244, 2, 2 },
2008 {8, 244, 2, 7 },
2009 {9, 245, 2, 2 },
2010 {10, 245, 2, 7 },
2011 {11, 246, 2, 2 },
2012 {12, 246, 2, 7 },
2013 {13, 247, 2, 2 },
2014 {14, 248, 2, 4 },
2015};
2016
2017int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2018{
2019 struct rt2x00_chip *chip = &rt2x00dev->chip;
2020 struct hw_mode_spec *spec = &rt2x00dev->spec;
2021 struct channel_info *info;
2022 char *tx_power1;
2023 char *tx_power2;
2024 unsigned int i;
2025 u16 eeprom;
2026
2027 /*
2028 * Initialize all hw fields.
2029 */
2030 rt2x00dev->hw->flags =
2031 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2032 IEEE80211_HW_SIGNAL_DBM |
2033 IEEE80211_HW_SUPPORTS_PS |
2034 IEEE80211_HW_PS_NULLFUNC_STACK;
2035
2036 if (rt2x00_intf_is_usb(rt2x00dev))
2037 rt2x00dev->hw->extra_tx_headroom =
2038 TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
2039 else if (rt2x00_intf_is_pci(rt2x00dev))
2040 rt2x00dev->hw->extra_tx_headroom = TXWI_DESC_SIZE;
2041
2042 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
2043 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
2044 rt2x00_eeprom_addr(rt2x00dev,
2045 EEPROM_MAC_ADDR_0));
2046
2047 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2048
2049 /*
2050 * Initialize hw_mode information.
2051 */
2052 spec->supported_bands = SUPPORT_BAND_2GHZ;
2053 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2054
2055 if (rt2x00_rf(chip, RF2820) ||
2056 rt2x00_rf(chip, RF2720) ||
2057 (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) {
2058 spec->num_channels = 14;
2059 spec->channels = rf_vals;
2060 } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) {
2061 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2062 spec->num_channels = ARRAY_SIZE(rf_vals);
2063 spec->channels = rf_vals;
2064 } else if (rt2x00_rf(chip, RF3020) ||
2065 rt2x00_rf(chip, RF2020) ||
2066 rt2x00_rf(chip, RF3021) ||
2067 rt2x00_rf(chip, RF3022)) {
2068 spec->num_channels = ARRAY_SIZE(rf_vals_302x);
2069 spec->channels = rf_vals_302x;
2070 }
2071
2072 /*
2073 * Initialize HT information.
2074 */
2075 spec->ht.ht_supported = true;
2076 spec->ht.cap =
2077 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
2078 IEEE80211_HT_CAP_GRN_FLD |
2079 IEEE80211_HT_CAP_SGI_20 |
2080 IEEE80211_HT_CAP_SGI_40 |
2081 IEEE80211_HT_CAP_TX_STBC |
2082 IEEE80211_HT_CAP_RX_STBC |
2083 IEEE80211_HT_CAP_PSMP_SUPPORT;
2084 spec->ht.ampdu_factor = 3;
2085 spec->ht.ampdu_density = 4;
2086 spec->ht.mcs.tx_params =
2087 IEEE80211_HT_MCS_TX_DEFINED |
2088 IEEE80211_HT_MCS_TX_RX_DIFF |
2089 ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
2090 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
2091
2092 switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
2093 case 3:
2094 spec->ht.mcs.rx_mask[2] = 0xff;
2095 case 2:
2096 spec->ht.mcs.rx_mask[1] = 0xff;
2097 case 1:
2098 spec->ht.mcs.rx_mask[0] = 0xff;
2099 spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
2100 break;
2101 }
2102
2103 /*
2104 * Create channel information array
2105 */
2106 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
2107 if (!info)
2108 return -ENOMEM;
2109
2110 spec->channels_info = info;
2111
2112 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
2113 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
2114
2115 for (i = 0; i < 14; i++) {
2116 info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
2117 info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
2118 }
2119
2120 if (spec->num_channels > 14) {
2121 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
2122 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
2123
2124 for (i = 14; i < spec->num_channels; i++) {
2125 info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
2126 info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
2127 }
2128 }
2129
2130 return 0;
2131}
2132EXPORT_SYMBOL_GPL(rt2800_probe_hw_mode);
2133
2134/*
2135 * IEEE80211 stack callback functions.
2136 */
2137static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
2138 u32 *iv32, u16 *iv16)
2139{
2140 struct rt2x00_dev *rt2x00dev = hw->priv;
2141 struct mac_iveiv_entry iveiv_entry;
2142 u32 offset;
2143
2144 offset = MAC_IVEIV_ENTRY(hw_key_idx);
2145 rt2800_register_multiread(rt2x00dev, offset,
2146 &iveiv_entry, sizeof(iveiv_entry));
2147
2148 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
2149 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
2150}
2151
2152static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2153{
2154 struct rt2x00_dev *rt2x00dev = hw->priv;
2155 u32 reg;
2156 bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
2157
2158 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
2159 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
2160 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
2161
2162 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
2163 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
2164 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
2165
2166 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
2167 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
2168 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
2169
2170 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
2171 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
2172 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
2173
2174 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
2175 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
2176 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
2177
2178 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
2179 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
2180 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
2181
2182 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
2183 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
2184 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
2185
2186 return 0;
2187}
2188
2189static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2190 const struct ieee80211_tx_queue_params *params)
2191{
2192 struct rt2x00_dev *rt2x00dev = hw->priv;
2193 struct data_queue *queue;
2194 struct rt2x00_field32 field;
2195 int retval;
2196 u32 reg;
2197 u32 offset;
2198
2199 /*
2200 * First pass the configuration through rt2x00lib, that will
2201 * update the queue settings and validate the input. After that
2202 * we are free to update the registers based on the value
2203 * in the queue parameter.
2204 */
2205 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2206 if (retval)
2207 return retval;
2208
2209 /*
2210 * We only need to perform additional register initialization
2211 * for WMM queues/
2212 */
2213 if (queue_idx >= 4)
2214 return 0;
2215
2216 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2217
2218 /* Update WMM TXOP register */
2219 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
2220 field.bit_offset = (queue_idx & 1) * 16;
2221 field.bit_mask = 0xffff << field.bit_offset;
2222
2223 rt2800_register_read(rt2x00dev, offset, &reg);
2224 rt2x00_set_field32(&reg, field, queue->txop);
2225 rt2800_register_write(rt2x00dev, offset, reg);
2226
2227 /* Update WMM registers */
2228 field.bit_offset = queue_idx * 4;
2229 field.bit_mask = 0xf << field.bit_offset;
2230
2231 rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
2232 rt2x00_set_field32(&reg, field, queue->aifs);
2233 rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
2234
2235 rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
2236 rt2x00_set_field32(&reg, field, queue->cw_min);
2237 rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
2238
2239 rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
2240 rt2x00_set_field32(&reg, field, queue->cw_max);
2241 rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
2242
2243 /* Update EDCA registers */
2244 offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
2245
2246 rt2800_register_read(rt2x00dev, offset, &reg);
2247 rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
2248 rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
2249 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
2250 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
2251 rt2800_register_write(rt2x00dev, offset, reg);
2252
2253 return 0;
2254}
2255
2256static u64 rt2800_get_tsf(struct ieee80211_hw *hw)
2257{
2258 struct rt2x00_dev *rt2x00dev = hw->priv;
2259 u64 tsf;
2260 u32 reg;
2261
2262 rt2800_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
2263 tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
2264 rt2800_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
2265 tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
2266
2267 return tsf;
2268}
2269
2270const struct ieee80211_ops rt2800_mac80211_ops = {
2271 .tx = rt2x00mac_tx,
2272 .start = rt2x00mac_start,
2273 .stop = rt2x00mac_stop,
2274 .add_interface = rt2x00mac_add_interface,
2275 .remove_interface = rt2x00mac_remove_interface,
2276 .config = rt2x00mac_config,
2277 .configure_filter = rt2x00mac_configure_filter,
2278 .set_tim = rt2x00mac_set_tim,
2279 .set_key = rt2x00mac_set_key,
2280 .get_stats = rt2x00mac_get_stats,
2281 .get_tkip_seq = rt2800_get_tkip_seq,
2282 .set_rts_threshold = rt2800_set_rts_threshold,
2283 .bss_info_changed = rt2x00mac_bss_info_changed,
2284 .conf_tx = rt2800_conf_tx,
2285 .get_tx_stats = rt2x00mac_get_tx_stats,
2286 .get_tsf = rt2800_get_tsf,
2287 .rfkill_poll = rt2x00mac_rfkill_poll,
2288};
2289EXPORT_SYMBOL_GPL(rt2800_mac80211_ops);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
new file mode 100644
index 000000000000..535ce22f2ac8
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -0,0 +1,151 @@
1/*
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the
16 Free Software Foundation, Inc.,
17 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef RT2800LIB_H
21#define RT2800LIB_H
22
23struct rt2800_ops {
24 void (*register_read)(struct rt2x00_dev *rt2x00dev,
25 const unsigned int offset, u32 *value);
26 void (*register_read_lock)(struct rt2x00_dev *rt2x00dev,
27 const unsigned int offset, u32 *value);
28 void (*register_write)(struct rt2x00_dev *rt2x00dev,
29 const unsigned int offset, u32 value);
30 void (*register_write_lock)(struct rt2x00_dev *rt2x00dev,
31 const unsigned int offset, u32 value);
32
33 void (*register_multiread)(struct rt2x00_dev *rt2x00dev,
34 const unsigned int offset,
35 void *value, const u32 length);
36 void (*register_multiwrite)(struct rt2x00_dev *rt2x00dev,
37 const unsigned int offset,
38 const void *value, const u32 length);
39
40 int (*regbusy_read)(struct rt2x00_dev *rt2x00dev,
41 const unsigned int offset,
42 const struct rt2x00_field32 field, u32 *reg);
43};
44
45static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
46 const unsigned int offset,
47 u32 *value)
48{
49 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
50
51 rt2800ops->register_read(rt2x00dev, offset, value);
52}
53
54static inline void rt2800_register_read_lock(struct rt2x00_dev *rt2x00dev,
55 const unsigned int offset,
56 u32 *value)
57{
58 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
59
60 rt2800ops->register_read_lock(rt2x00dev, offset, value);
61}
62
63static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev,
64 const unsigned int offset,
65 u32 value)
66{
67 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
68
69 rt2800ops->register_write(rt2x00dev, offset, value);
70}
71
72static inline void rt2800_register_write_lock(struct rt2x00_dev *rt2x00dev,
73 const unsigned int offset,
74 u32 value)
75{
76 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
77
78 rt2800ops->register_write_lock(rt2x00dev, offset, value);
79}
80
81static inline void rt2800_register_multiread(struct rt2x00_dev *rt2x00dev,
82 const unsigned int offset,
83 void *value, const u32 length)
84{
85 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
86
87 rt2800ops->register_multiread(rt2x00dev, offset, value, length);
88}
89
90static inline void rt2800_register_multiwrite(struct rt2x00_dev *rt2x00dev,
91 const unsigned int offset,
92 const void *value,
93 const u32 length)
94{
95 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
96
97 rt2800ops->register_multiwrite(rt2x00dev, offset, value, length);
98}
99
100static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
101 const unsigned int offset,
102 const struct rt2x00_field32 field,
103 u32 *reg)
104{
105 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
106
107 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
108}
109
110void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
111 const u8 command, const u8 token,
112 const u8 arg0, const u8 arg1);
113
114extern const struct rt2x00debug rt2800_rt2x00debug;
115
116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
117void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
118 struct rt2x00_led *led, enum led_type type);
119int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
120 struct rt2x00lib_crypto *crypto,
121 struct ieee80211_key_conf *key);
122int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
123 struct rt2x00lib_crypto *crypto,
124 struct ieee80211_key_conf *key);
125void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
126 const unsigned int filter_flags);
127void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
128 struct rt2x00intf_conf *conf, const unsigned int flags);
129void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp);
130void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant);
131void rt2800_config(struct rt2x00_dev *rt2x00dev,
132 struct rt2x00lib_conf *libconf,
133 const unsigned int flags);
134void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
135void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
136void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
137 const u32 count);
138
139int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
140int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
141int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
142
143int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
144void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
145int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev);
146int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev);
147int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev);
148
149extern const struct ieee80211_ops rt2800_mac80211_ops;
150
151#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
new file mode 100644
index 000000000000..87a5094ae953
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -0,0 +1,1321 @@
1/*
2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
6 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
7 Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
8 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
9 Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
10 <http://rt2x00.serialmonkey.com>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the
24 Free Software Foundation, Inc.,
25 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27
28/*
29 Module: rt2800pci
30 Abstract: rt2800pci device specific routines.
31 Supported chipsets: RT2800E & RT2800ED.
32 */
33
34#include <linux/crc-ccitt.h>
35#include <linux/delay.h>
36#include <linux/etherdevice.h>
37#include <linux/init.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/platform_device.h>
42#include <linux/eeprom_93cx6.h>
43
44#include "rt2x00.h"
45#include "rt2x00pci.h"
46#include "rt2x00soc.h"
47#include "rt2800lib.h"
48#include "rt2800.h"
49#include "rt2800pci.h"
50
51#ifdef CONFIG_RT2800PCI_PCI_MODULE
52#define CONFIG_RT2800PCI_PCI
53#endif
54
55#ifdef CONFIG_RT2800PCI_WISOC_MODULE
56#define CONFIG_RT2800PCI_WISOC
57#endif
58
59/*
60 * Allow hardware encryption to be disabled.
61 */
62static int modparam_nohwcrypt = 1;
63module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
64MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
65
66static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
67{
68 unsigned int i;
69 u32 reg;
70
71 for (i = 0; i < 200; i++) {
72 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
73
74 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
75 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
76 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) ||
77 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token))
78 break;
79
80 udelay(REGISTER_BUSY_DELAY);
81 }
82
83 if (i == 200)
84 ERROR(rt2x00dev, "MCU request failed, no response from hardware\n");
85
86 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
87 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
88}
89
90#ifdef CONFIG_RT2800PCI_WISOC
91static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
92{
93 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
94
95 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
96}
97#else
98static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
99{
100}
101#endif /* CONFIG_RT2800PCI_WISOC */
102
103#ifdef CONFIG_RT2800PCI_PCI
104static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
105{
106 struct rt2x00_dev *rt2x00dev = eeprom->data;
107 u32 reg;
108
109 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
110
111 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
112 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
113 eeprom->reg_data_clock =
114 !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK);
115 eeprom->reg_chip_select =
116 !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT);
117}
118
119static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
120{
121 struct rt2x00_dev *rt2x00dev = eeprom->data;
122 u32 reg = 0;
123
124 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in);
125 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out);
126 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK,
127 !!eeprom->reg_data_clock);
128 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
129 !!eeprom->reg_chip_select);
130
131 rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
132}
133
134static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
135{
136 struct eeprom_93cx6 eeprom;
137 u32 reg;
138
139 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
140
141 eeprom.data = rt2x00dev;
142 eeprom.register_read = rt2800pci_eepromregister_read;
143 eeprom.register_write = rt2800pci_eepromregister_write;
144 eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
145 PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
146 eeprom.reg_data_in = 0;
147 eeprom.reg_data_out = 0;
148 eeprom.reg_data_clock = 0;
149 eeprom.reg_chip_select = 0;
150
151 eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom,
152 EEPROM_SIZE / sizeof(u16));
153}
154
155static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
156{
157 return rt2800_efuse_detect(rt2x00dev);
158}
159
160static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
161{
162 rt2800_read_eeprom_efuse(rt2x00dev);
163}
164#else
165static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
166{
167}
168
169static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
170{
171 return 0;
172}
173
174static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
175{
176}
177#endif /* CONFIG_RT2800PCI_PCI */
178
179/*
180 * Firmware functions
181 */
182static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
183{
184 return FIRMWARE_RT2860;
185}
186
187static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev,
188 const u8 *data, const size_t len)
189{
190 u16 fw_crc;
191 u16 crc;
192
193 /*
194 * Only support 8kb firmware files.
195 */
196 if (len != 8192)
197 return FW_BAD_LENGTH;
198
199 /*
200 * The last 2 bytes in the firmware array are the crc checksum itself,
201 * this means that we should never pass those 2 bytes to the crc
202 * algorithm.
203 */
204 fw_crc = (data[len - 2] << 8 | data[len - 1]);
205
206 /*
207 * Use the crc ccitt algorithm.
208 * This will return the same value as the legacy driver which
209 * used bit ordering reversion on the both the firmware bytes
210 * before input input as well as on the final output.
211 * Obviously using crc ccitt directly is much more efficient.
212 */
213 crc = crc_ccitt(~0, data, len - 2);
214
215 /*
216 * There is a small difference between the crc-itu-t + bitrev and
217 * the crc-ccitt crc calculation. In the latter method the 2 bytes
218 * will be swapped, use swab16 to convert the crc to the correct
219 * value.
220 */
221 crc = swab16(crc);
222
223 return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
224}
225
226static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
227 const u8 *data, const size_t len)
228{
229 unsigned int i;
230 u32 reg;
231
232 /*
233 * Wait for stable hardware.
234 */
235 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
236 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
237 if (reg && reg != ~0)
238 break;
239 msleep(1);
240 }
241
242 if (i == REGISTER_BUSY_COUNT) {
243 ERROR(rt2x00dev, "Unstable hardware.\n");
244 return -EBUSY;
245 }
246
247 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
248 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
249
250 /*
251 * Disable DMA, will be reenabled later when enabling
252 * the radio.
253 */
254 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
255 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
256 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
257 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
258 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
259 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
260 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
261
262 /*
263 * enable Host program ram write selection
264 */
265 reg = 0;
266 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
267 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
268
269 /*
270 * Write firmware to device.
271 */
272 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
273 data, len);
274
275 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
276 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
277
278 /*
279 * Wait for device to stabilize.
280 */
281 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
282 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
283 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
284 break;
285 msleep(1);
286 }
287
288 if (i == REGISTER_BUSY_COUNT) {
289 ERROR(rt2x00dev, "PBF system register not ready.\n");
290 return -EBUSY;
291 }
292
293 /*
294 * Disable interrupts
295 */
296 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
297
298 /*
299 * Initialize BBP R/W access agent
300 */
301 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
302 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
303
304 return 0;
305}
306
307/*
308 * Initialization functions.
309 */
310static bool rt2800pci_get_entry_state(struct queue_entry *entry)
311{
312 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
313 u32 word;
314
315 if (entry->queue->qid == QID_RX) {
316 rt2x00_desc_read(entry_priv->desc, 1, &word);
317
318 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
319 } else {
320 rt2x00_desc_read(entry_priv->desc, 1, &word);
321
322 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
323 }
324}
325
326static void rt2800pci_clear_entry(struct queue_entry *entry)
327{
328 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
329 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
330 u32 word;
331
332 if (entry->queue->qid == QID_RX) {
333 rt2x00_desc_read(entry_priv->desc, 0, &word);
334 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
335 rt2x00_desc_write(entry_priv->desc, 0, word);
336
337 rt2x00_desc_read(entry_priv->desc, 1, &word);
338 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
339 rt2x00_desc_write(entry_priv->desc, 1, word);
340 } else {
341 rt2x00_desc_read(entry_priv->desc, 1, &word);
342 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
343 rt2x00_desc_write(entry_priv->desc, 1, word);
344 }
345}
346
347static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
348{
349 struct queue_entry_priv_pci *entry_priv;
350 u32 reg;
351
352 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
353 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
354 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
355 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
356 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
357 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
358 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
359 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
360 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
361
362 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
363 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
364
365 /*
366 * Initialize registers.
367 */
368 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
369 rt2800_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma);
370 rt2800_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit);
371 rt2800_register_write(rt2x00dev, TX_CTX_IDX0, 0);
372 rt2800_register_write(rt2x00dev, TX_DTX_IDX0, 0);
373
374 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
375 rt2800_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma);
376 rt2800_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit);
377 rt2800_register_write(rt2x00dev, TX_CTX_IDX1, 0);
378 rt2800_register_write(rt2x00dev, TX_DTX_IDX1, 0);
379
380 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
381 rt2800_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma);
382 rt2800_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit);
383 rt2800_register_write(rt2x00dev, TX_CTX_IDX2, 0);
384 rt2800_register_write(rt2x00dev, TX_DTX_IDX2, 0);
385
386 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
387 rt2800_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma);
388 rt2800_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit);
389 rt2800_register_write(rt2x00dev, TX_CTX_IDX3, 0);
390 rt2800_register_write(rt2x00dev, TX_DTX_IDX3, 0);
391
392 entry_priv = rt2x00dev->rx->entries[0].priv_data;
393 rt2800_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
394 rt2800_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit);
395 rt2800_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1);
396 rt2800_register_write(rt2x00dev, RX_DRX_IDX, 0);
397
398 /*
399 * Enable global DMA configuration
400 */
401 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
402 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
403 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
404 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
405 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
406
407 rt2800_register_write(rt2x00dev, DELAY_INT_CFG, 0);
408
409 return 0;
410}
411
412/*
413 * Device state switch handlers.
414 */
415static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
416 enum dev_state state)
417{
418 u32 reg;
419
420 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
421 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
422 (state == STATE_RADIO_RX_ON) ||
423 (state == STATE_RADIO_RX_ON_LINK));
424 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
425}
426
427static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
428 enum dev_state state)
429{
430 int mask = (state == STATE_RADIO_IRQ_ON);
431 u32 reg;
432
433 /*
434 * When interrupts are being enabled, the interrupt registers
435 * should clear the register to assure a clean state.
436 */
437 if (state == STATE_RADIO_IRQ_ON) {
438 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
439 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
440 }
441
442 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
443 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask);
444 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask);
445 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
446 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask);
447 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask);
448 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask);
449 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask);
450 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask);
451 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask);
452 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask);
453 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask);
454 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
455 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
456 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
457 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
458 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask);
459 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask);
460 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask);
461 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
462}
463
464static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
465{
466 unsigned int i;
467 u32 reg;
468
469 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
470 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
471 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
472 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
473 return 0;
474
475 msleep(1);
476 }
477
478 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
479 return -EACCES;
480}
481
482static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
483{
484 u32 reg;
485 u16 word;
486
487 /*
488 * Initialize all registers.
489 */
490 if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
491 rt2800pci_init_queues(rt2x00dev) ||
492 rt2800_init_registers(rt2x00dev) ||
493 rt2800pci_wait_wpdma_ready(rt2x00dev) ||
494 rt2800_init_bbp(rt2x00dev) ||
495 rt2800_init_rfcsr(rt2x00dev)))
496 return -EIO;
497
498 /*
499 * Send signal to firmware during boot time.
500 */
501 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
502
503 /*
504 * Enable RX.
505 */
506 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
507 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
508 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
509 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
510
511 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
512 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
513 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
514 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
515 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
516 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
517
518 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
519 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
520 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
521 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
522
523 /*
524 * Initialize LED control
525 */
526 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
527 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
528 word & 0xff, (word >> 8) & 0xff);
529
530 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
531 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
532 word & 0xff, (word >> 8) & 0xff);
533
534 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
535 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
536 word & 0xff, (word >> 8) & 0xff);
537
538 return 0;
539}
540
541static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
542{
543 u32 reg;
544
545 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
546 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
547 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
548 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
549 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
550 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
551 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
552
553 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
554 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
555 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
556
557 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
558
559 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
560 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
561 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
562 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
563 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
564 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
565 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
566 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
567 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
568
569 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
570 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
571
572 /* Wait for DMA, ignore error */
573 rt2800pci_wait_wpdma_ready(rt2x00dev);
574}
575
576static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
577 enum dev_state state)
578{
579 /*
580 * Always put the device to sleep (even when we intend to wakeup!)
581 * if the device is booting and wasn't asleep it will return
582 * failure when attempting to wakeup.
583 */
584 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
585
586 if (state == STATE_AWAKE) {
587 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
588 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
589 }
590
591 return 0;
592}
593
594static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
595 enum dev_state state)
596{
597 int retval = 0;
598
599 switch (state) {
600 case STATE_RADIO_ON:
601 /*
602 * Before the radio can be enabled, the device first has
603 * to be woken up. After that it needs a bit of time
604 * to be fully awake and then the radio can be enabled.
605 */
606 rt2800pci_set_state(rt2x00dev, STATE_AWAKE);
607 msleep(1);
608 retval = rt2800pci_enable_radio(rt2x00dev);
609 break;
610 case STATE_RADIO_OFF:
611 /*
612 * After the radio has been disabled, the device should
613 * be put to sleep for powersaving.
614 */
615 rt2800pci_disable_radio(rt2x00dev);
616 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
617 break;
618 case STATE_RADIO_RX_ON:
619 case STATE_RADIO_RX_ON_LINK:
620 case STATE_RADIO_RX_OFF:
621 case STATE_RADIO_RX_OFF_LINK:
622 rt2800pci_toggle_rx(rt2x00dev, state);
623 break;
624 case STATE_RADIO_IRQ_ON:
625 case STATE_RADIO_IRQ_OFF:
626 rt2800pci_toggle_irq(rt2x00dev, state);
627 break;
628 case STATE_DEEP_SLEEP:
629 case STATE_SLEEP:
630 case STATE_STANDBY:
631 case STATE_AWAKE:
632 retval = rt2800pci_set_state(rt2x00dev, state);
633 break;
634 default:
635 retval = -ENOTSUPP;
636 break;
637 }
638
639 if (unlikely(retval))
640 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
641 state, retval);
642
643 return retval;
644}
645
646/*
647 * TX descriptor initialization
648 */
649static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
650 struct sk_buff *skb,
651 struct txentry_desc *txdesc)
652{
653 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
654 __le32 *txd = skbdesc->desc;
655 __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->hw->extra_tx_headroom);
656 u32 word;
657
658 /*
659 * Initialize TX Info descriptor
660 */
661 rt2x00_desc_read(txwi, 0, &word);
662 rt2x00_set_field32(&word, TXWI_W0_FRAG,
663 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
664 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
665 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
666 rt2x00_set_field32(&word, TXWI_W0_TS,
667 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
668 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
669 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
670 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
671 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
672 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
673 rt2x00_set_field32(&word, TXWI_W0_BW,
674 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
675 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
676 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
677 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
678 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
679 rt2x00_desc_write(txwi, 0, word);
680
681 rt2x00_desc_read(txwi, 1, &word);
682 rt2x00_set_field32(&word, TXWI_W1_ACK,
683 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
684 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
685 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
686 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
687 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
688 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
689 txdesc->key_idx : 0xff);
690 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
691 skb->len - txdesc->l2pad);
692 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
693 skbdesc->entry->queue->qid + 1);
694 rt2x00_desc_write(txwi, 1, word);
695
696 /*
697 * Always write 0 to IV/EIV fields, hardware will insert the IV
698 * from the IVEIV register when TXD_W3_WIV is set to 0.
699 * When TXD_W3_WIV is set to 1 it will use the IV data
700 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
701 * crypto entry in the registers should be used to encrypt the frame.
702 */
703 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
704 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
705
706 /*
707 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
708 * must contains a TXWI structure + 802.11 header + padding + 802.11
709 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
710 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
711 * data. It means that LAST_SEC0 is always 0.
712 */
713
714 /*
715 * Initialize TX descriptor
716 */
717 rt2x00_desc_read(txd, 0, &word);
718 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
719 rt2x00_desc_write(txd, 0, word);
720
721 rt2x00_desc_read(txd, 1, &word);
722 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len);
723 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
724 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
725 rt2x00_set_field32(&word, TXD_W1_BURST,
726 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
727 rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
728 rt2x00dev->hw->extra_tx_headroom);
729 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
730 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
731 rt2x00_desc_write(txd, 1, word);
732
733 rt2x00_desc_read(txd, 2, &word);
734 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
735 skbdesc->skb_dma + rt2x00dev->hw->extra_tx_headroom);
736 rt2x00_desc_write(txd, 2, word);
737
738 rt2x00_desc_read(txd, 3, &word);
739 rt2x00_set_field32(&word, TXD_W3_WIV,
740 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
741 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
742 rt2x00_desc_write(txd, 3, word);
743}
744
745/*
746 * TX data initialization
747 */
748static void rt2800pci_write_beacon(struct queue_entry *entry)
749{
750 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
751 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
752 unsigned int beacon_base;
753 u32 reg;
754
755 /*
756 * Disable beaconing while we are reloading the beacon data,
757 * otherwise we might be sending out invalid data.
758 */
759 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
760 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
761 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
762
763 /*
764 * Write entire beacon with descriptor to register.
765 */
766 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
767 rt2800_register_multiwrite(rt2x00dev,
768 beacon_base,
769 skbdesc->desc, skbdesc->desc_len);
770 rt2800_register_multiwrite(rt2x00dev,
771 beacon_base + skbdesc->desc_len,
772 entry->skb->data, entry->skb->len);
773
774 /*
775 * Clean up beacon skb.
776 */
777 dev_kfree_skb_any(entry->skb);
778 entry->skb = NULL;
779}
780
781static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
782 const enum data_queue_qid queue_idx)
783{
784 struct data_queue *queue;
785 unsigned int idx, qidx = 0;
786 u32 reg;
787
788 if (queue_idx == QID_BEACON) {
789 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
790 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
791 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
792 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
793 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
794 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
795 }
796 return;
797 }
798
799 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
800 return;
801
802 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
803 idx = queue->index[Q_INDEX];
804
805 if (queue_idx == QID_MGMT)
806 qidx = 5;
807 else
808 qidx = queue_idx;
809
810 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx);
811}
812
813static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
814 const enum data_queue_qid qid)
815{
816 u32 reg;
817
818 if (qid == QID_BEACON) {
819 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
820 return;
821 }
822
823 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
824 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE));
825 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK));
826 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI));
827 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO));
828 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
829}
830
831/*
832 * RX control handlers
833 */
834static void rt2800pci_fill_rxdone(struct queue_entry *entry,
835 struct rxdone_entry_desc *rxdesc)
836{
837 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
838 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
839 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
840 __le32 *rxd = entry_priv->desc;
841 __le32 *rxwi = (__le32 *)entry->skb->data;
842 u32 rxd3;
843 u32 rxwi0;
844 u32 rxwi1;
845 u32 rxwi2;
846 u32 rxwi3;
847
848 rt2x00_desc_read(rxd, 3, &rxd3);
849 rt2x00_desc_read(rxwi, 0, &rxwi0);
850 rt2x00_desc_read(rxwi, 1, &rxwi1);
851 rt2x00_desc_read(rxwi, 2, &rxwi2);
852 rt2x00_desc_read(rxwi, 3, &rxwi3);
853
854 if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
855 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
856
857 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
858 /*
859 * Unfortunately we don't know the cipher type used during
860 * decryption. This prevents us from correct providing
861 * correct statistics through debugfs.
862 */
863 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
864 rxdesc->cipher_status =
865 rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
866 }
867
868 if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
869 /*
870 * Hardware has stripped IV/EIV data from 802.11 frame during
871 * decryption. Unfortunately the descriptor doesn't contain
872 * any fields with the EIV/IV data either, so they can't
873 * be restored by rt2x00lib.
874 */
875 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
876
877 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
878 rxdesc->flags |= RX_FLAG_DECRYPTED;
879 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
880 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
881 }
882
883 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
884 rxdesc->dev_flags |= RXDONE_MY_BSS;
885
886 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
887 rxdesc->dev_flags |= RXDONE_L2PAD;
888 skbdesc->flags |= SKBDESC_L2_PADDED;
889 }
890
891 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
892 rxdesc->flags |= RX_FLAG_SHORT_GI;
893
894 if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
895 rxdesc->flags |= RX_FLAG_40MHZ;
896
897 /*
898 * Detect RX rate, always use MCS as signal type.
899 */
900 rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
901 rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
902 rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
903
904 /*
905 * Mask of 0x8 bit to remove the short preamble flag.
906 */
907 if (rxdesc->rate_mode == RATE_MODE_CCK)
908 rxdesc->signal &= ~0x8;
909
910 rxdesc->rssi =
911 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
912 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
913
914 rxdesc->noise =
915 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
916 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
917
918 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
919
920 /*
921 * Set RX IDX in register to inform hardware that we have handled
922 * this entry and it is available for reuse again.
923 */
924 rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
925
926 /*
927 * Remove TXWI descriptor from start of buffer.
928 */
929 skb_pull(entry->skb, RXWI_DESC_SIZE);
930 skb_trim(entry->skb, rxdesc->size);
931}
932
933/*
934 * Interrupt functions.
935 */
936static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
937{
938 struct data_queue *queue;
939 struct queue_entry *entry;
940 struct queue_entry *entry_done;
941 struct queue_entry_priv_pci *entry_priv;
942 struct txdone_entry_desc txdesc;
943 u32 word;
944 u32 reg;
945 u32 old_reg;
946 unsigned int type;
947 unsigned int index;
948 u16 mcs, real_mcs;
949
950 /*
951 * During each loop we will compare the freshly read
952 * TX_STA_FIFO register value with the value read from
953 * the previous loop. If the 2 values are equal then
954 * we should stop processing because the chance it
955 * quite big that the device has been unplugged and
956 * we risk going into an endless loop.
957 */
958 old_reg = 0;
959
960 while (1) {
961 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
962 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
963 break;
964
965 if (old_reg == reg)
966 break;
967 old_reg = reg;
968
969 /*
970 * Skip this entry when it contains an invalid
971 * queue identication number.
972 */
973 type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
974 if (type >= QID_RX)
975 continue;
976
977 queue = rt2x00queue_get_queue(rt2x00dev, type);
978 if (unlikely(!queue))
979 continue;
980
981 /*
982 * Skip this entry when it contains an invalid
983 * index number.
984 */
985 index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
986 if (unlikely(index >= queue->limit))
987 continue;
988
989 entry = &queue->entries[index];
990 entry_priv = entry->priv_data;
991 rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
992
993 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
994 while (entry != entry_done) {
995 /*
996 * Catch up.
997 * Just report any entries we missed as failed.
998 */
999 WARNING(rt2x00dev,
1000 "TX status report missed for entry %d\n",
1001 entry_done->entry_idx);
1002
1003 txdesc.flags = 0;
1004 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
1005 txdesc.retry = 0;
1006
1007 rt2x00lib_txdone(entry_done, &txdesc);
1008 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1009 }
1010
1011 /*
1012 * Obtain the status about this packet.
1013 */
1014 txdesc.flags = 0;
1015 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
1016 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1017 else
1018 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1019
1020 /*
1021 * Ralink has a retry mechanism using a global fallback
1022 * table. We setup this fallback table to try immediate
1023 * lower rate for all rates. In the TX_STA_FIFO,
1024 * the MCS field contains the MCS used for the successfull
1025 * transmission. If the first transmission succeed,
1026 * we have mcs == tx_mcs. On the second transmission,
1027 * we have mcs = tx_mcs - 1. So the number of
1028 * retry is (tx_mcs - mcs).
1029 */
1030 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
1031 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
1032 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
1033 txdesc.retry = mcs - min(mcs, real_mcs);
1034
1035 rt2x00lib_txdone(entry, &txdesc);
1036 }
1037}
1038
1039static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1040{
1041 struct rt2x00_dev *rt2x00dev = dev_instance;
1042 u32 reg;
1043
1044 /* Read status and ACK all interrupts */
1045 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
1046 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
1047
1048 if (!reg)
1049 return IRQ_NONE;
1050
1051 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1052 return IRQ_HANDLED;
1053
1054 /*
1055 * 1 - Rx ring done interrupt.
1056 */
1057 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
1058 rt2x00pci_rxdone(rt2x00dev);
1059
1060 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
1061 rt2800pci_txdone(rt2x00dev);
1062
1063 return IRQ_HANDLED;
1064}
1065
1066/*
1067 * Device probe functions.
1068 */
1069static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1070{
1071 /*
1072 * Read EEPROM into buffer
1073 */
1074 switch (rt2x00dev->chip.rt) {
1075 case RT2880:
1076 case RT3052:
1077 rt2800pci_read_eeprom_soc(rt2x00dev);
1078 break;
1079 default:
1080 if (rt2800pci_efuse_detect(rt2x00dev))
1081 rt2800pci_read_eeprom_efuse(rt2x00dev);
1082 else
1083 rt2800pci_read_eeprom_pci(rt2x00dev);
1084 break;
1085 }
1086
1087 return rt2800_validate_eeprom(rt2x00dev);
1088}
1089
1090static const struct rt2800_ops rt2800pci_rt2800_ops = {
1091 .register_read = rt2x00pci_register_read,
1092 .register_read_lock = rt2x00pci_register_read, /* same for PCI */
1093 .register_write = rt2x00pci_register_write,
1094 .register_write_lock = rt2x00pci_register_write, /* same for PCI */
1095
1096 .register_multiread = rt2x00pci_register_multiread,
1097 .register_multiwrite = rt2x00pci_register_multiwrite,
1098
1099 .regbusy_read = rt2x00pci_regbusy_read,
1100};
1101
1102static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1103{
1104 int retval;
1105
1106 rt2x00dev->priv = (void *)&rt2800pci_rt2800_ops;
1107
1108 /*
1109 * Allocate eeprom data.
1110 */
1111 retval = rt2800pci_validate_eeprom(rt2x00dev);
1112 if (retval)
1113 return retval;
1114
1115 retval = rt2800_init_eeprom(rt2x00dev);
1116 if (retval)
1117 return retval;
1118
1119 /*
1120 * Initialize hw specifications.
1121 */
1122 retval = rt2800_probe_hw_mode(rt2x00dev);
1123 if (retval)
1124 return retval;
1125
1126 /*
1127 * This device has multiple filters for control frames
1128 * and has a separate filter for PS Poll frames.
1129 */
1130 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags);
1131 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);
1132
1133 /*
1134 * This device requires firmware.
1135 */
1136 if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
1137 !rt2x00_rt(&rt2x00dev->chip, RT3052))
1138 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1139 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1140 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
1141 if (!modparam_nohwcrypt)
1142 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1143
1144 /*
1145 * Set the rssi offset.
1146 */
1147 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
1148
1149 return 0;
1150}
1151
1152static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1153 .irq_handler = rt2800pci_interrupt,
1154 .probe_hw = rt2800pci_probe_hw,
1155 .get_firmware_name = rt2800pci_get_firmware_name,
1156 .check_firmware = rt2800pci_check_firmware,
1157 .load_firmware = rt2800pci_load_firmware,
1158 .initialize = rt2x00pci_initialize,
1159 .uninitialize = rt2x00pci_uninitialize,
1160 .get_entry_state = rt2800pci_get_entry_state,
1161 .clear_entry = rt2800pci_clear_entry,
1162 .set_device_state = rt2800pci_set_device_state,
1163 .rfkill_poll = rt2800_rfkill_poll,
1164 .link_stats = rt2800_link_stats,
1165 .reset_tuner = rt2800_reset_tuner,
1166 .link_tuner = rt2800_link_tuner,
1167 .write_tx_desc = rt2800pci_write_tx_desc,
1168 .write_tx_data = rt2x00pci_write_tx_data,
1169 .write_beacon = rt2800pci_write_beacon,
1170 .kick_tx_queue = rt2800pci_kick_tx_queue,
1171 .kill_tx_queue = rt2800pci_kill_tx_queue,
1172 .fill_rxdone = rt2800pci_fill_rxdone,
1173 .config_shared_key = rt2800_config_shared_key,
1174 .config_pairwise_key = rt2800_config_pairwise_key,
1175 .config_filter = rt2800_config_filter,
1176 .config_intf = rt2800_config_intf,
1177 .config_erp = rt2800_config_erp,
1178 .config_ant = rt2800_config_ant,
1179 .config = rt2800_config,
1180};
1181
1182static const struct data_queue_desc rt2800pci_queue_rx = {
1183 .entry_num = RX_ENTRIES,
1184 .data_size = AGGREGATION_SIZE,
1185 .desc_size = RXD_DESC_SIZE,
1186 .priv_size = sizeof(struct queue_entry_priv_pci),
1187};
1188
1189static const struct data_queue_desc rt2800pci_queue_tx = {
1190 .entry_num = TX_ENTRIES,
1191 .data_size = AGGREGATION_SIZE,
1192 .desc_size = TXD_DESC_SIZE,
1193 .priv_size = sizeof(struct queue_entry_priv_pci),
1194};
1195
1196static const struct data_queue_desc rt2800pci_queue_bcn = {
1197 .entry_num = 8 * BEACON_ENTRIES,
1198 .data_size = 0, /* No DMA required for beacons */
1199 .desc_size = TXWI_DESC_SIZE,
1200 .priv_size = sizeof(struct queue_entry_priv_pci),
1201};
1202
1203static const struct rt2x00_ops rt2800pci_ops = {
1204 .name = KBUILD_MODNAME,
1205 .max_sta_intf = 1,
1206 .max_ap_intf = 8,
1207 .eeprom_size = EEPROM_SIZE,
1208 .rf_size = RF_SIZE,
1209 .tx_queues = NUM_TX_QUEUES,
1210 .rx = &rt2800pci_queue_rx,
1211 .tx = &rt2800pci_queue_tx,
1212 .bcn = &rt2800pci_queue_bcn,
1213 .lib = &rt2800pci_rt2x00_ops,
1214 .hw = &rt2800_mac80211_ops,
1215#ifdef CONFIG_RT2X00_LIB_DEBUGFS
1216 .debugfs = &rt2800_rt2x00debug,
1217#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
1218};
1219
1220/*
1221 * RT2800pci module information.
1222 */
1223static struct pci_device_id rt2800pci_device_table[] = {
1224 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1225 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1226 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
1227 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
1228 { PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) },
1229 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
1230 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
1231 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
1232 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1233 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
1234 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
1235 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
1236 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
1237 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1238 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1239 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1240 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1241 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
1242 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1243 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1244 { 0, }
1245};
1246
1247MODULE_AUTHOR(DRV_PROJECT);
1248MODULE_VERSION(DRV_VERSION);
1249MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
1250MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
1251#ifdef CONFIG_RT2800PCI_PCI
1252MODULE_FIRMWARE(FIRMWARE_RT2860);
1253MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1254#endif /* CONFIG_RT2800PCI_PCI */
1255MODULE_LICENSE("GPL");
1256
1257#ifdef CONFIG_RT2800PCI_WISOC
1258#if defined(CONFIG_RALINK_RT288X)
1259__rt2x00soc_probe(RT2880, &rt2800pci_ops);
1260#elif defined(CONFIG_RALINK_RT305X)
1261__rt2x00soc_probe(RT3052, &rt2800pci_ops);
1262#endif
1263
1264static struct platform_driver rt2800soc_driver = {
1265 .driver = {
1266 .name = "rt2800_wmac",
1267 .owner = THIS_MODULE,
1268 .mod_name = KBUILD_MODNAME,
1269 },
1270 .probe = __rt2x00soc_probe,
1271 .remove = __devexit_p(rt2x00soc_remove),
1272 .suspend = rt2x00soc_suspend,
1273 .resume = rt2x00soc_resume,
1274};
1275#endif /* CONFIG_RT2800PCI_WISOC */
1276
1277#ifdef CONFIG_RT2800PCI_PCI
1278static struct pci_driver rt2800pci_driver = {
1279 .name = KBUILD_MODNAME,
1280 .id_table = rt2800pci_device_table,
1281 .probe = rt2x00pci_probe,
1282 .remove = __devexit_p(rt2x00pci_remove),
1283 .suspend = rt2x00pci_suspend,
1284 .resume = rt2x00pci_resume,
1285};
1286#endif /* CONFIG_RT2800PCI_PCI */
1287
1288static int __init rt2800pci_init(void)
1289{
1290 int ret = 0;
1291
1292#ifdef CONFIG_RT2800PCI_WISOC
1293 ret = platform_driver_register(&rt2800soc_driver);
1294 if (ret)
1295 return ret;
1296#endif
1297#ifdef CONFIG_RT2800PCI_PCI
1298 ret = pci_register_driver(&rt2800pci_driver);
1299 if (ret) {
1300#ifdef CONFIG_RT2800PCI_WISOC
1301 platform_driver_unregister(&rt2800soc_driver);
1302#endif
1303 return ret;
1304 }
1305#endif
1306
1307 return ret;
1308}
1309
1310static void __exit rt2800pci_exit(void)
1311{
1312#ifdef CONFIG_RT2800PCI_PCI
1313 pci_unregister_driver(&rt2800pci_driver);
1314#endif
1315#ifdef CONFIG_RT2800PCI_WISOC
1316 platform_driver_unregister(&rt2800soc_driver);
1317#endif
1318}
1319
1320module_init(rt2800pci_init);
1321module_exit(rt2800pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
new file mode 100644
index 000000000000..afc8e7da27cb
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -0,0 +1,159 @@
1/*
2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Alban Browaeys <prahal@yahoo.com>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Luis Correia <luis.f.correia@gmail.com>
6 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
7 Copyright (C) 2009 Mark Asselstine <asselsm@gmail.com>
8 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
9 Copyright (C) 2009 Bart Zolnierkiewicz <bzolnier@gmail.com>
10 <http://rt2x00.serialmonkey.com>
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the
24 Free Software Foundation, Inc.,
25 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 */
27
28/*
29 Module: rt2800pci
30 Abstract: Data structures and registers for the rt2800pci module.
31 Supported chipsets: RT2800E & RT2800ED.
32 */
33
34#ifndef RT2800PCI_H
35#define RT2800PCI_H
36
37/*
38 * PCI registers.
39 */
40
41/*
42 * E2PROM_CSR: EEPROM control register.
43 * RELOAD: Write 1 to reload eeprom content.
44 * TYPE: 0: 93c46, 1:93c66.
45 * LOAD_STATUS: 1:loading, 0:done.
46 */
47#define E2PROM_CSR 0x0004
48#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
49#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
50#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
51#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
52#define E2PROM_CSR_TYPE FIELD32(0x00000030)
53#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
54#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
55
56/*
57 * Queue register offset macros
58 */
59#define TX_QUEUE_REG_OFFSET 0x10
60#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
61#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
62#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
63#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
64
65/*
66 * 8051 firmware image.
67 */
68#define FIRMWARE_RT2860 "rt2860.bin"
69#define FIRMWARE_IMAGE_BASE 0x2000
70
71/*
72 * DMA descriptor defines.
73 */
74#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
75#define RXD_DESC_SIZE ( 4 * sizeof(__le32) )
76
77/*
78 * TX descriptor format for TX, PRIO and Beacon Ring.
79 */
80
81/*
82 * Word0
83 */
84#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
85
86/*
87 * Word1
88 */
89#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
90#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
91#define TXD_W1_BURST FIELD32(0x00008000)
92#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
93#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
94#define TXD_W1_DMA_DONE FIELD32(0x80000000)
95
96/*
97 * Word2
98 */
99#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
100
101/*
102 * Word3
103 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
104 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
105 * 0:MGMT, 1:HCCA 2:EDCA
106 */
107#define TXD_W3_WIV FIELD32(0x01000000)
108#define TXD_W3_QSEL FIELD32(0x06000000)
109#define TXD_W3_TCO FIELD32(0x20000000)
110#define TXD_W3_UCO FIELD32(0x40000000)
111#define TXD_W3_ICO FIELD32(0x80000000)
112
113/*
114 * RX descriptor format for RX Ring.
115 */
116
117/*
118 * Word0
119 */
120#define RXD_W0_SDP0 FIELD32(0xffffffff)
121
122/*
123 * Word1
124 */
125#define RXD_W1_SDL1 FIELD32(0x00003fff)
126#define RXD_W1_SDL0 FIELD32(0x3fff0000)
127#define RXD_W1_LS0 FIELD32(0x40000000)
128#define RXD_W1_DMA_DONE FIELD32(0x80000000)
129
130/*
131 * Word2
132 */
133#define RXD_W2_SDP1 FIELD32(0xffffffff)
134
135/*
136 * Word3
137 * AMSDU: RX with 802.3 header, not 802.11 header.
138 * DECRYPTED: This frame is being decrypted.
139 */
140#define RXD_W3_BA FIELD32(0x00000001)
141#define RXD_W3_DATA FIELD32(0x00000002)
142#define RXD_W3_NULLDATA FIELD32(0x00000004)
143#define RXD_W3_FRAG FIELD32(0x00000008)
144#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
145#define RXD_W3_MULTICAST FIELD32(0x00000020)
146#define RXD_W3_BROADCAST FIELD32(0x00000040)
147#define RXD_W3_MY_BSS FIELD32(0x00000080)
148#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
149#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
150#define RXD_W3_AMSDU FIELD32(0x00000800)
151#define RXD_W3_HTC FIELD32(0x00001000)
152#define RXD_W3_RSSI FIELD32(0x00002000)
153#define RXD_W3_L2PAD FIELD32(0x00004000)
154#define RXD_W3_AMPDU FIELD32(0x00008000)
155#define RXD_W3_DECRYPTED FIELD32(0x00010000)
156#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
157#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
158
159#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 9fe770f7d7bb..9ab15c480701 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1,5 +1,9 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
6 Copyright (C) 2009 Axel Kollhofer <rain_maker@root-forum.org>
3 <http://rt2x00.serialmonkey.com> 7 <http://rt2x00.serialmonkey.com>
4 8
5 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
@@ -34,6 +38,8 @@
34 38
35#include "rt2x00.h" 39#include "rt2x00.h"
36#include "rt2x00usb.h" 40#include "rt2x00usb.h"
41#include "rt2800lib.h"
42#include "rt2800.h"
37#include "rt2800usb.h" 43#include "rt2800usb.h"
38 44
39/* 45/*
@@ -44,1027 +50,6 @@ module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
45 51
46/* 52/*
47 * Register access.
48 * All access to the CSR registers will go through the methods
49 * rt2x00usb_register_read and rt2x00usb_register_write.
50 * BBP and RF register require indirect register access,
51 * and use the CSR registers BBPCSR and RFCSR to achieve this.
52 * These indirect registers work with busy bits,
53 * and we will try maximal REGISTER_BUSY_COUNT times to access
54 * the register while taking a REGISTER_BUSY_DELAY us delay
55 * between each attampt. When the busy bit is still set at that time,
56 * the access attempt is considered to have failed,
57 * and we will print an error.
58 * The _lock versions must be used if you already hold the csr_mutex
59 */
60#define WAIT_FOR_BBP(__dev, __reg) \
61 rt2x00usb_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
62#define WAIT_FOR_RFCSR(__dev, __reg) \
63 rt2x00usb_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
64#define WAIT_FOR_RF(__dev, __reg) \
65 rt2x00usb_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
66#define WAIT_FOR_MCU(__dev, __reg) \
67 rt2x00usb_regbusy_read((__dev), H2M_MAILBOX_CSR, \
68 H2M_MAILBOX_CSR_OWNER, (__reg))
69
70static void rt2800usb_bbp_write(struct rt2x00_dev *rt2x00dev,
71 const unsigned int word, const u8 value)
72{
73 u32 reg;
74
75 mutex_lock(&rt2x00dev->csr_mutex);
76
77 /*
78 * Wait until the BBP becomes available, afterwards we
79 * can safely write the new data into the register.
80 */
81 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
82 reg = 0;
83 rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
84 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
85 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
86 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
87
88 rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
89 }
90
91 mutex_unlock(&rt2x00dev->csr_mutex);
92}
93
94static void rt2800usb_bbp_read(struct rt2x00_dev *rt2x00dev,
95 const unsigned int word, u8 *value)
96{
97 u32 reg;
98
99 mutex_lock(&rt2x00dev->csr_mutex);
100
101 /*
102 * Wait until the BBP becomes available, afterwards we
103 * can safely write the read request into the register.
104 * After the data has been written, we wait until hardware
105 * returns the correct value, if at any time the register
106 * doesn't become available in time, reg will be 0xffffffff
107 * which means we return 0xff to the caller.
108 */
109 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
110 reg = 0;
111 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
112 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
113 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
114
115 rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
116
117 WAIT_FOR_BBP(rt2x00dev, &reg);
118 }
119
120 *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
121
122 mutex_unlock(&rt2x00dev->csr_mutex);
123}
124
125static void rt2800usb_rfcsr_write(struct rt2x00_dev *rt2x00dev,
126 const unsigned int word, const u8 value)
127{
128 u32 reg;
129
130 mutex_lock(&rt2x00dev->csr_mutex);
131
132 /*
133 * Wait until the RFCSR becomes available, afterwards we
134 * can safely write the new data into the register.
135 */
136 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
137 reg = 0;
138 rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
139 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
140 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
141 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
142
143 rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
144 }
145
146 mutex_unlock(&rt2x00dev->csr_mutex);
147}
148
149static void rt2800usb_rfcsr_read(struct rt2x00_dev *rt2x00dev,
150 const unsigned int word, u8 *value)
151{
152 u32 reg;
153
154 mutex_lock(&rt2x00dev->csr_mutex);
155
156 /*
157 * Wait until the RFCSR becomes available, afterwards we
158 * can safely write the read request into the register.
159 * After the data has been written, we wait until hardware
160 * returns the correct value, if at any time the register
161 * doesn't become available in time, reg will be 0xffffffff
162 * which means we return 0xff to the caller.
163 */
164 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
165 reg = 0;
166 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
167 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
168 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
169
170 rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
171
172 WAIT_FOR_RFCSR(rt2x00dev, &reg);
173 }
174
175 *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
176
177 mutex_unlock(&rt2x00dev->csr_mutex);
178}
179
180static void rt2800usb_rf_write(struct rt2x00_dev *rt2x00dev,
181 const unsigned int word, const u32 value)
182{
183 u32 reg;
184
185 mutex_lock(&rt2x00dev->csr_mutex);
186
187 /*
188 * Wait until the RF becomes available, afterwards we
189 * can safely write the new data into the register.
190 */
191 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
192 reg = 0;
193 rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
194 rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
195 rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
196 rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
197
198 rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
199 rt2x00_rf_write(rt2x00dev, word, value);
200 }
201
202 mutex_unlock(&rt2x00dev->csr_mutex);
203}
204
205static void rt2800usb_mcu_request(struct rt2x00_dev *rt2x00dev,
206 const u8 command, const u8 token,
207 const u8 arg0, const u8 arg1)
208{
209 u32 reg;
210
211 mutex_lock(&rt2x00dev->csr_mutex);
212
213 /*
214 * Wait until the MCU becomes available, afterwards we
215 * can safely write the new data into the register.
216 */
217 if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
218 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
219 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
220 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
221 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
222 rt2x00usb_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
223
224 reg = 0;
225 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
226 rt2x00usb_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
227 }
228
229 mutex_unlock(&rt2x00dev->csr_mutex);
230}
231
232#ifdef CONFIG_RT2X00_LIB_DEBUGFS
233static const struct rt2x00debug rt2800usb_rt2x00debug = {
234 .owner = THIS_MODULE,
235 .csr = {
236 .read = rt2x00usb_register_read,
237 .write = rt2x00usb_register_write,
238 .flags = RT2X00DEBUGFS_OFFSET,
239 .word_base = CSR_REG_BASE,
240 .word_size = sizeof(u32),
241 .word_count = CSR_REG_SIZE / sizeof(u32),
242 },
243 .eeprom = {
244 .read = rt2x00_eeprom_read,
245 .write = rt2x00_eeprom_write,
246 .word_base = EEPROM_BASE,
247 .word_size = sizeof(u16),
248 .word_count = EEPROM_SIZE / sizeof(u16),
249 },
250 .bbp = {
251 .read = rt2800usb_bbp_read,
252 .write = rt2800usb_bbp_write,
253 .word_base = BBP_BASE,
254 .word_size = sizeof(u8),
255 .word_count = BBP_SIZE / sizeof(u8),
256 },
257 .rf = {
258 .read = rt2x00_rf_read,
259 .write = rt2800usb_rf_write,
260 .word_base = RF_BASE,
261 .word_size = sizeof(u32),
262 .word_count = RF_SIZE / sizeof(u32),
263 },
264};
265#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
266
267static int rt2800usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
268{
269 u32 reg;
270
271 rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
272 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
273}
274
275#ifdef CONFIG_RT2X00_LIB_LEDS
276static void rt2800usb_brightness_set(struct led_classdev *led_cdev,
277 enum led_brightness brightness)
278{
279 struct rt2x00_led *led =
280 container_of(led_cdev, struct rt2x00_led, led_dev);
281 unsigned int enabled = brightness != LED_OFF;
282 unsigned int bg_mode =
283 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
284 unsigned int polarity =
285 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
286 EEPROM_FREQ_LED_POLARITY);
287 unsigned int ledmode =
288 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
289 EEPROM_FREQ_LED_MODE);
290
291 if (led->type == LED_TYPE_RADIO) {
292 rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
293 enabled ? 0x20 : 0);
294 } else if (led->type == LED_TYPE_ASSOC) {
295 rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
296 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
297 } else if (led->type == LED_TYPE_QUALITY) {
298 /*
299 * The brightness is divided into 6 levels (0 - 5),
300 * The specs tell us the following levels:
301 * 0, 1 ,3, 7, 15, 31
302 * to determine the level in a simple way we can simply
303 * work with bitshifting:
304 * (1 << level) - 1
305 */
306 rt2800usb_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
307 (1 << brightness / (LED_FULL / 6)) - 1,
308 polarity);
309 }
310}
311
312static int rt2800usb_blink_set(struct led_classdev *led_cdev,
313 unsigned long *delay_on,
314 unsigned long *delay_off)
315{
316 struct rt2x00_led *led =
317 container_of(led_cdev, struct rt2x00_led, led_dev);
318 u32 reg;
319
320 rt2x00usb_register_read(led->rt2x00dev, LED_CFG, &reg);
321 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
322 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
323 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
324 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
325 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
326 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
327 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
328 rt2x00usb_register_write(led->rt2x00dev, LED_CFG, reg);
329
330 return 0;
331}
332
333static void rt2800usb_init_led(struct rt2x00_dev *rt2x00dev,
334 struct rt2x00_led *led,
335 enum led_type type)
336{
337 led->rt2x00dev = rt2x00dev;
338 led->type = type;
339 led->led_dev.brightness_set = rt2800usb_brightness_set;
340 led->led_dev.blink_set = rt2800usb_blink_set;
341 led->flags = LED_INITIALIZED;
342}
343#endif /* CONFIG_RT2X00_LIB_LEDS */
344
345/*
346 * Configuration handlers.
347 */
348static void rt2800usb_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
349 struct rt2x00lib_crypto *crypto,
350 struct ieee80211_key_conf *key)
351{
352 struct mac_wcid_entry wcid_entry;
353 struct mac_iveiv_entry iveiv_entry;
354 u32 offset;
355 u32 reg;
356
357 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
358
359 rt2x00usb_register_read(rt2x00dev, offset, &reg);
360 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
361 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
362 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
363 (crypto->cmd == SET_KEY) * crypto->cipher);
364 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
365 (crypto->cmd == SET_KEY) * crypto->bssidx);
366 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
367 rt2x00usb_register_write(rt2x00dev, offset, reg);
368
369 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
370
371 memset(&iveiv_entry, 0, sizeof(iveiv_entry));
372 if ((crypto->cipher == CIPHER_TKIP) ||
373 (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
374 (crypto->cipher == CIPHER_AES))
375 iveiv_entry.iv[3] |= 0x20;
376 iveiv_entry.iv[3] |= key->keyidx << 6;
377 rt2x00usb_register_multiwrite(rt2x00dev, offset,
378 &iveiv_entry, sizeof(iveiv_entry));
379
380 offset = MAC_WCID_ENTRY(key->hw_key_idx);
381
382 memset(&wcid_entry, 0, sizeof(wcid_entry));
383 if (crypto->cmd == SET_KEY)
384 memcpy(&wcid_entry, crypto->address, ETH_ALEN);
385 rt2x00usb_register_multiwrite(rt2x00dev, offset,
386 &wcid_entry, sizeof(wcid_entry));
387}
388
389static int rt2800usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
390 struct rt2x00lib_crypto *crypto,
391 struct ieee80211_key_conf *key)
392{
393 struct hw_key_entry key_entry;
394 struct rt2x00_field32 field;
395 int timeout;
396 u32 offset;
397 u32 reg;
398
399 if (crypto->cmd == SET_KEY) {
400 key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
401
402 memcpy(key_entry.key, crypto->key,
403 sizeof(key_entry.key));
404 memcpy(key_entry.tx_mic, crypto->tx_mic,
405 sizeof(key_entry.tx_mic));
406 memcpy(key_entry.rx_mic, crypto->rx_mic,
407 sizeof(key_entry.rx_mic));
408
409 offset = SHARED_KEY_ENTRY(key->hw_key_idx);
410 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
411 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
412 USB_VENDOR_REQUEST_OUT,
413 offset, &key_entry,
414 sizeof(key_entry),
415 timeout);
416 }
417
418 /*
419 * The cipher types are stored over multiple registers
420 * starting with SHARED_KEY_MODE_BASE each word will have
421 * 32 bits and contains the cipher types for 2 bssidx each.
422 * Using the correct defines correctly will cause overhead,
423 * so just calculate the correct offset.
424 */
425 field.bit_offset = 4 * (key->hw_key_idx % 8);
426 field.bit_mask = 0x7 << field.bit_offset;
427
428 offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
429
430 rt2x00usb_register_read(rt2x00dev, offset, &reg);
431 rt2x00_set_field32(&reg, field,
432 (crypto->cmd == SET_KEY) * crypto->cipher);
433 rt2x00usb_register_write(rt2x00dev, offset, reg);
434
435 /*
436 * Update WCID information
437 */
438 rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
439
440 return 0;
441}
442
443static int rt2800usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
444 struct rt2x00lib_crypto *crypto,
445 struct ieee80211_key_conf *key)
446{
447 struct hw_key_entry key_entry;
448 int timeout;
449 u32 offset;
450
451 if (crypto->cmd == SET_KEY) {
452 /*
453 * 1 pairwise key is possible per AID, this means that the AID
454 * equals our hw_key_idx. Make sure the WCID starts _after_ the
455 * last possible shared key entry.
456 */
457 if (crypto->aid > (256 - 32))
458 return -ENOSPC;
459
460 key->hw_key_idx = 32 + crypto->aid;
461
462 memcpy(key_entry.key, crypto->key,
463 sizeof(key_entry.key));
464 memcpy(key_entry.tx_mic, crypto->tx_mic,
465 sizeof(key_entry.tx_mic));
466 memcpy(key_entry.rx_mic, crypto->rx_mic,
467 sizeof(key_entry.rx_mic));
468
469 offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
470 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
471 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
472 USB_VENDOR_REQUEST_OUT,
473 offset, &key_entry,
474 sizeof(key_entry),
475 timeout);
476 }
477
478 /*
479 * Update WCID information
480 */
481 rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
482
483 return 0;
484}
485
486static void rt2800usb_config_filter(struct rt2x00_dev *rt2x00dev,
487 const unsigned int filter_flags)
488{
489 u32 reg;
490
491 /*
492 * Start configuration steps.
493 * Note that the version error will always be dropped
494 * and broadcast frames will always be accepted since
495 * there is no filter for it at this time.
496 */
497 rt2x00usb_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
498 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
499 !(filter_flags & FIF_FCSFAIL));
500 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
501 !(filter_flags & FIF_PLCPFAIL));
502 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
503 !(filter_flags & FIF_PROMISC_IN_BSS));
504 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
505 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
506 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
507 !(filter_flags & FIF_ALLMULTI));
508 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
509 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
510 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
511 !(filter_flags & FIF_CONTROL));
512 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
513 !(filter_flags & FIF_CONTROL));
514 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
515 !(filter_flags & FIF_CONTROL));
516 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
517 !(filter_flags & FIF_CONTROL));
518 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
519 !(filter_flags & FIF_CONTROL));
520 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
521 !(filter_flags & FIF_PSPOLL));
522 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
523 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
524 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
525 !(filter_flags & FIF_CONTROL));
526 rt2x00usb_register_write(rt2x00dev, RX_FILTER_CFG, reg);
527}
528
529static void rt2800usb_config_intf(struct rt2x00_dev *rt2x00dev,
530 struct rt2x00_intf *intf,
531 struct rt2x00intf_conf *conf,
532 const unsigned int flags)
533{
534 unsigned int beacon_base;
535 u32 reg;
536
537 if (flags & CONFIG_UPDATE_TYPE) {
538 /*
539 * Clear current synchronisation setup.
540 * For the Beacon base registers we only need to clear
541 * the first byte since that byte contains the VALID and OWNER
542 * bits which (when set to 0) will invalidate the entire beacon.
543 */
544 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
545 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
546
547 /*
548 * Enable synchronisation.
549 */
550 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
551 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
552 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
553 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
554 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
555 }
556
557 if (flags & CONFIG_UPDATE_MAC) {
558 reg = le32_to_cpu(conf->mac[1]);
559 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
560 conf->mac[1] = cpu_to_le32(reg);
561
562 rt2x00usb_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
563 conf->mac, sizeof(conf->mac));
564 }
565
566 if (flags & CONFIG_UPDATE_BSSID) {
567 reg = le32_to_cpu(conf->bssid[1]);
568 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
569 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
570 conf->bssid[1] = cpu_to_le32(reg);
571
572 rt2x00usb_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
573 conf->bssid, sizeof(conf->bssid));
574 }
575}
576
577static void rt2800usb_config_erp(struct rt2x00_dev *rt2x00dev,
578 struct rt2x00lib_erp *erp)
579{
580 u32 reg;
581
582 rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
583 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
584 rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
585
586 rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
587 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
588 !!erp->short_preamble);
589 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
590 !!erp->short_preamble);
591 rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
592
593 rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
594 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
595 erp->cts_protection ? 2 : 0);
596 rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
597
598 rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE,
599 erp->basic_rates);
600 rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
601
602 rt2x00usb_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
603 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
604 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
605 rt2x00usb_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
606
607 rt2x00usb_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
608 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
609 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
610 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
611 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
612 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
613 rt2x00usb_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
614
615 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
616 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
617 erp->beacon_int * 16);
618 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
619}
620
621static void rt2800usb_config_ant(struct rt2x00_dev *rt2x00dev,
622 struct antenna_setup *ant)
623{
624 u8 r1;
625 u8 r3;
626
627 rt2800usb_bbp_read(rt2x00dev, 1, &r1);
628 rt2800usb_bbp_read(rt2x00dev, 3, &r3);
629
630 /*
631 * Configure the TX antenna.
632 */
633 switch ((int)ant->tx) {
634 case 1:
635 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
636 break;
637 case 2:
638 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
639 break;
640 case 3:
641 /* Do nothing */
642 break;
643 }
644
645 /*
646 * Configure the RX antenna.
647 */
648 switch ((int)ant->rx) {
649 case 1:
650 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
651 break;
652 case 2:
653 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
654 break;
655 case 3:
656 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
657 break;
658 }
659
660 rt2800usb_bbp_write(rt2x00dev, 3, r3);
661 rt2800usb_bbp_write(rt2x00dev, 1, r1);
662}
663
664static void rt2800usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
665 struct rt2x00lib_conf *libconf)
666{
667 u16 eeprom;
668 short lna_gain;
669
670 if (libconf->rf.channel <= 14) {
671 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
672 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
673 } else if (libconf->rf.channel <= 64) {
674 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
675 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
676 } else if (libconf->rf.channel <= 128) {
677 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
678 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
679 } else {
680 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
681 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
682 }
683
684 rt2x00dev->lna_gain = lna_gain;
685}
686
687static void rt2800usb_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
688 struct ieee80211_conf *conf,
689 struct rf_channel *rf,
690 struct channel_info *info)
691{
692 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
693
694 if (rt2x00dev->default_ant.tx == 1)
695 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
696
697 if (rt2x00dev->default_ant.rx == 1) {
698 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
699 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
700 } else if (rt2x00dev->default_ant.rx == 2)
701 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
702
703 if (rf->channel > 14) {
704 /*
705 * When TX power is below 0, we should increase it by 7 to
706 * make it a positive value (Minumum value is -7).
707 * However this means that values between 0 and 7 have
708 * double meaning, and we should set a 7DBm boost flag.
709 */
710 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
711 (info->tx_power1 >= 0));
712
713 if (info->tx_power1 < 0)
714 info->tx_power1 += 7;
715
716 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
717 TXPOWER_A_TO_DEV(info->tx_power1));
718
719 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
720 (info->tx_power2 >= 0));
721
722 if (info->tx_power2 < 0)
723 info->tx_power2 += 7;
724
725 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
726 TXPOWER_A_TO_DEV(info->tx_power2));
727 } else {
728 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
729 TXPOWER_G_TO_DEV(info->tx_power1));
730 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
731 TXPOWER_G_TO_DEV(info->tx_power2));
732 }
733
734 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
735
736 rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
737 rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
738 rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
739 rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
740
741 udelay(200);
742
743 rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
744 rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
745 rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
746 rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
747
748 udelay(200);
749
750 rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
751 rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
752 rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
753 rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
754}
755
756static void rt2800usb_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
757 struct ieee80211_conf *conf,
758 struct rf_channel *rf,
759 struct channel_info *info)
760{
761 u8 rfcsr;
762
763 rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf1);
764 rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf3);
765
766 rt2800usb_rfcsr_read(rt2x00dev, 6, &rfcsr);
767 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
768 rt2800usb_rfcsr_write(rt2x00dev, 6, rfcsr);
769
770 rt2800usb_rfcsr_read(rt2x00dev, 12, &rfcsr);
771 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
772 TXPOWER_G_TO_DEV(info->tx_power1));
773 rt2800usb_rfcsr_write(rt2x00dev, 12, rfcsr);
774
775 rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
776 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
777 rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
778
779 rt2800usb_rfcsr_write(rt2x00dev, 24,
780 rt2x00dev->calibration[conf_is_ht40(conf)]);
781
782 rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
783 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
784 rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
785}
786
787static void rt2800usb_config_channel(struct rt2x00_dev *rt2x00dev,
788 struct ieee80211_conf *conf,
789 struct rf_channel *rf,
790 struct channel_info *info)
791{
792 u32 reg;
793 unsigned int tx_pin;
794 u8 bbp;
795
796 if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
797 rt2800usb_config_channel_rt2x(rt2x00dev, conf, rf, info);
798 else
799 rt2800usb_config_channel_rt3x(rt2x00dev, conf, rf, info);
800
801 /*
802 * Change BBP settings
803 */
804 rt2800usb_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
805 rt2800usb_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
806 rt2800usb_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
807 rt2800usb_bbp_write(rt2x00dev, 86, 0);
808
809 if (rf->channel <= 14) {
810 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
811 rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
812 rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
813 } else {
814 rt2800usb_bbp_write(rt2x00dev, 82, 0x84);
815 rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
816 }
817 } else {
818 rt2800usb_bbp_write(rt2x00dev, 82, 0xf2);
819
820 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
821 rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
822 else
823 rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
824 }
825
826 rt2x00usb_register_read(rt2x00dev, TX_BAND_CFG, &reg);
827 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
828 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
829 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
830 rt2x00usb_register_write(rt2x00dev, TX_BAND_CFG, reg);
831
832 tx_pin = 0;
833
834 /* Turn on unused PA or LNA when not using 1T or 1R */
835 if (rt2x00dev->default_ant.tx != 1) {
836 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
837 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
838 }
839
840 /* Turn on unused PA or LNA when not using 1T or 1R */
841 if (rt2x00dev->default_ant.rx != 1) {
842 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
843 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
844 }
845
846 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
847 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
848 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
849 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
850 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
851 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
852
853 rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
854
855 rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
856 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
857 rt2800usb_bbp_write(rt2x00dev, 4, bbp);
858
859 rt2800usb_bbp_read(rt2x00dev, 3, &bbp);
860 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
861 rt2800usb_bbp_write(rt2x00dev, 3, bbp);
862
863 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
864 if (conf_is_ht40(conf)) {
865 rt2800usb_bbp_write(rt2x00dev, 69, 0x1a);
866 rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
867 rt2800usb_bbp_write(rt2x00dev, 73, 0x16);
868 } else {
869 rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
870 rt2800usb_bbp_write(rt2x00dev, 70, 0x08);
871 rt2800usb_bbp_write(rt2x00dev, 73, 0x11);
872 }
873 }
874
875 msleep(1);
876}
877
878static void rt2800usb_config_txpower(struct rt2x00_dev *rt2x00dev,
879 const int txpower)
880{
881 u32 reg;
882 u32 value = TXPOWER_G_TO_DEV(txpower);
883 u8 r1;
884
885 rt2800usb_bbp_read(rt2x00dev, 1, &r1);
886 rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
887 rt2800usb_bbp_write(rt2x00dev, 1, r1);
888
889 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
890 rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
891 rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
892 rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
893 rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
894 rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
895 rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
896 rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
897 rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
898 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
899
900 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
901 rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
902 rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
903 rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
904 rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
905 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
906 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
907 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
908 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
909 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
910
911 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
912 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
913 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
914 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
915 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
916 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
917 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
918 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
919 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
920 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
921
922 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
923 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
924 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
925 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
926 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
927 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
928 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
929 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
930 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
931 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
932
933 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
934 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
935 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
936 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
937 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
938 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
939}
940
941static void rt2800usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
942 struct rt2x00lib_conf *libconf)
943{
944 u32 reg;
945
946 rt2x00usb_register_read(rt2x00dev, TX_RTY_CFG, &reg);
947 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
948 libconf->conf->short_frame_max_tx_count);
949 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
950 libconf->conf->long_frame_max_tx_count);
951 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
952 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
953 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
954 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
955 rt2x00usb_register_write(rt2x00dev, TX_RTY_CFG, reg);
956}
957
958static void rt2800usb_config_ps(struct rt2x00_dev *rt2x00dev,
959 struct rt2x00lib_conf *libconf)
960{
961 enum dev_state state =
962 (libconf->conf->flags & IEEE80211_CONF_PS) ?
963 STATE_SLEEP : STATE_AWAKE;
964 u32 reg;
965
966 if (state == STATE_SLEEP) {
967 rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
968
969 rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
970 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
971 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
972 libconf->conf->listen_interval - 1);
973 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
974 rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
975
976 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
977 } else {
978 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
979
980 rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
981 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
982 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
983 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
984 rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
985 }
986}
987
988static void rt2800usb_config(struct rt2x00_dev *rt2x00dev,
989 struct rt2x00lib_conf *libconf,
990 const unsigned int flags)
991{
992 /* Always recalculate LNA gain before changing configuration */
993 rt2800usb_config_lna_gain(rt2x00dev, libconf);
994
995 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
996 rt2800usb_config_channel(rt2x00dev, libconf->conf,
997 &libconf->rf, &libconf->channel);
998 if (flags & IEEE80211_CONF_CHANGE_POWER)
999 rt2800usb_config_txpower(rt2x00dev, libconf->conf->power_level);
1000 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1001 rt2800usb_config_retry_limit(rt2x00dev, libconf);
1002 if (flags & IEEE80211_CONF_CHANGE_PS)
1003 rt2800usb_config_ps(rt2x00dev, libconf);
1004}
1005
1006/*
1007 * Link tuning
1008 */
1009static void rt2800usb_link_stats(struct rt2x00_dev *rt2x00dev,
1010 struct link_qual *qual)
1011{
1012 u32 reg;
1013
1014 /*
1015 * Update FCS error count from register.
1016 */
1017 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1018 qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
1019}
1020
1021static u8 rt2800usb_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1022{
1023 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1024 if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
1025 return 0x1c + (2 * rt2x00dev->lna_gain);
1026 else
1027 return 0x2e + rt2x00dev->lna_gain;
1028 }
1029
1030 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1031 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
1032 else
1033 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
1034}
1035
1036static inline void rt2800usb_set_vgc(struct rt2x00_dev *rt2x00dev,
1037 struct link_qual *qual, u8 vgc_level)
1038{
1039 if (qual->vgc_level != vgc_level) {
1040 rt2800usb_bbp_write(rt2x00dev, 66, vgc_level);
1041 qual->vgc_level = vgc_level;
1042 qual->vgc_level_reg = vgc_level;
1043 }
1044}
1045
1046static void rt2800usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
1047 struct link_qual *qual)
1048{
1049 rt2800usb_set_vgc(rt2x00dev, qual,
1050 rt2800usb_get_default_vgc(rt2x00dev));
1051}
1052
1053static void rt2800usb_link_tuner(struct rt2x00_dev *rt2x00dev,
1054 struct link_qual *qual, const u32 count)
1055{
1056 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
1057 return;
1058
1059 /*
1060 * When RSSI is better then -80 increase VGC level with 0x10
1061 */
1062 rt2800usb_set_vgc(rt2x00dev, qual,
1063 rt2800usb_get_default_vgc(rt2x00dev) +
1064 ((qual->rssi > -80) * 0x10));
1065}
1066
1067/*
1068 * Firmware functions 53 * Firmware functions
1069 */ 54 */
1070static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 55static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1172,7 +157,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1172 * Wait for stable hardware. 157 * Wait for stable hardware.
1173 */ 158 */
1174 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 159 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1175 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg); 160 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1176 if (reg && reg != ~0) 161 if (reg && reg != ~0)
1177 break; 162 break;
1178 msleep(1); 163 msleep(1);
@@ -1192,8 +177,8 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1192 data + offset, length, 177 data + offset, length,
1193 REGISTER_TIMEOUT32(length)); 178 REGISTER_TIMEOUT32(length));
1194 179
1195 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 180 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
1196 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 181 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
1197 182
1198 /* 183 /*
1199 * Send firmware request to device to load firmware, 184 * Send firmware request to device to load firmware,
@@ -1208,18 +193,18 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1208 } 193 }
1209 194
1210 msleep(10); 195 msleep(10);
1211 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 196 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1212 197
1213 /* 198 /*
1214 * Send signal to firmware during boot time. 199 * Send signal to firmware during boot time.
1215 */ 200 */
1216 rt2800usb_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); 201 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
1217 202
1218 if ((chipset == 0x3070) || 203 if ((chipset == 0x3070) ||
1219 (chipset == 0x3071) || 204 (chipset == 0x3071) ||
1220 (chipset == 0x3572)) { 205 (chipset == 0x3572)) {
1221 udelay(200); 206 udelay(200);
1222 rt2800usb_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0); 207 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
1223 udelay(10); 208 udelay(10);
1224 } 209 }
1225 210
@@ -1227,7 +212,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1227 * Wait for device to stabilize. 212 * Wait for device to stabilize.
1228 */ 213 */
1229 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 214 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1230 rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 215 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1231 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) 216 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
1232 break; 217 break;
1233 msleep(1); 218 msleep(1);
@@ -1241,531 +226,9 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1241 /* 226 /*
1242 * Initialize firmware. 227 * Initialize firmware.
1243 */ 228 */
1244 rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 229 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1245 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 230 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1246 msleep(1);
1247
1248 return 0;
1249}
1250
1251/*
1252 * Initialization functions.
1253 */
1254static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
1255{
1256 u32 reg;
1257 unsigned int i;
1258
1259 /*
1260 * Wait untill BBP and RF are ready.
1261 */
1262 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1263 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1264 if (reg && reg != ~0)
1265 break;
1266 msleep(1);
1267 }
1268
1269 if (i == REGISTER_BUSY_COUNT) {
1270 ERROR(rt2x00dev, "Unstable hardware.\n");
1271 return -EBUSY;
1272 }
1273
1274 rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1275 rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
1276
1277 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1278 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
1279 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1280 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1281
1282 rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1283
1284 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1285 USB_MODE_RESET, REGISTER_TIMEOUT);
1286
1287 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1288
1289 rt2x00usb_register_read(rt2x00dev, BCN_OFFSET0, &reg);
1290 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
1291 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
1292 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
1293 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
1294 rt2x00usb_register_write(rt2x00dev, BCN_OFFSET0, reg);
1295
1296 rt2x00usb_register_read(rt2x00dev, BCN_OFFSET1, &reg);
1297 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
1298 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
1299 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
1300 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
1301 rt2x00usb_register_write(rt2x00dev, BCN_OFFSET1, reg);
1302
1303 rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
1304 rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1305
1306 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1307
1308 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1309 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
1310 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
1311 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
1312 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
1313 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1314 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1315 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1316
1317 if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1318 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1319 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1320 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1321 } else {
1322 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1323 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1324 }
1325
1326 rt2x00usb_register_read(rt2x00dev, TX_LINK_CFG, &reg);
1327 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
1328 rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
1329 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
1330 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
1331 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
1332 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
1333 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
1334 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
1335 rt2x00usb_register_write(rt2x00dev, TX_LINK_CFG, reg);
1336
1337 rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1338 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1339 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1340 rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1341
1342 rt2x00usb_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1343 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1344 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
1345 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
1346 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1347 else
1348 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
1349 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
1350 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1351 rt2x00usb_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1352
1353 rt2x00usb_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1354
1355 rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1356 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1357 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1358 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1359 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1360 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1361 rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1362
1363 rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1364 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
1365 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1366 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1367 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1368 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1369 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1370 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1371 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1372 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1373 rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1374
1375 rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1376 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
1377 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1378 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1379 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1380 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1381 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1382 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1383 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1384 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1385 rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1386
1387 rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1388 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
1389 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
1390 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
1391 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1392 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1393 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1394 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1395 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1396 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1397 rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1398
1399 rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1400 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1401 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
1402 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1403 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1404 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1405 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1406 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1407 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1408 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1409 rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1410
1411 rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1412 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
1413 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
1414 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
1415 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1416 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1417 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1418 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1419 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1420 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1421 rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1422
1423 rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1424 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
1425 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
1426 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
1427 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1428 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1429 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1430 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1431 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1432 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1433 rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1434
1435 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
1436
1437 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1438 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1439 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1440 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1441 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1442 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
1443 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
1444 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
1445 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
1446 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
1447 rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1448
1449 rt2x00usb_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
1450 rt2x00usb_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
1451
1452 rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
1453 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
1454 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
1455 IEEE80211_MAX_RTS_THRESHOLD);
1456 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
1457 rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
1458
1459 rt2x00usb_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1460 rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1461
1462 /*
1463 * ASIC will keep garbage value after boot, clear encryption keys.
1464 */
1465 for (i = 0; i < 4; i++)
1466 rt2x00usb_register_write(rt2x00dev,
1467 SHARED_KEY_MODE_ENTRY(i), 0);
1468
1469 for (i = 0; i < 256; i++) {
1470 u32 wcid[2] = { 0xffffffff, 0x00ffffff };
1471 rt2x00usb_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
1472 wcid, sizeof(wcid));
1473
1474 rt2x00usb_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
1475 rt2x00usb_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
1476 }
1477
1478 /*
1479 * Clear all beacons
1480 * For the Beacon base registers we only need to clear
1481 * the first byte since that byte contains the VALID and OWNER
1482 * bits which (when set to 0) will invalidate the entire beacon.
1483 */
1484 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1485 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1486 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1487 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1488 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
1489 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
1490 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1491 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1492
1493 rt2x00usb_register_read(rt2x00dev, USB_CYC_CFG, &reg);
1494 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
1495 rt2x00usb_register_write(rt2x00dev, USB_CYC_CFG, reg);
1496
1497 rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
1498 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
1499 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
1500 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
1501 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
1502 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
1503 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
1504 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
1505 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
1506 rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG0, reg);
1507
1508 rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
1509 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
1510 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
1511 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
1512 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
1513 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
1514 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
1515 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
1516 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
1517 rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG1, reg);
1518
1519 rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
1520 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
1521 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
1522 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
1523 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
1524 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
1525 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
1526 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
1527 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
1528 rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG0, reg);
1529
1530 rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
1531 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
1532 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
1533 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
1534 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
1535 rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG1, reg);
1536
1537 /*
1538 * We must clear the error counters.
1539 * These registers are cleared on read,
1540 * so we may pass a useless variable to store the value.
1541 */
1542 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1543 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT1, &reg);
1544 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT2, &reg);
1545 rt2x00usb_register_read(rt2x00dev, TX_STA_CNT0, &reg);
1546 rt2x00usb_register_read(rt2x00dev, TX_STA_CNT1, &reg);
1547 rt2x00usb_register_read(rt2x00dev, TX_STA_CNT2, &reg);
1548
1549 return 0;
1550}
1551
1552static int rt2800usb_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1553{
1554 unsigned int i;
1555 u32 reg;
1556
1557 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1558 rt2x00usb_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
1559 if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
1560 return 0;
1561
1562 udelay(REGISTER_BUSY_DELAY);
1563 }
1564
1565 ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
1566 return -EACCES;
1567}
1568
1569static int rt2800usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1570{
1571 unsigned int i;
1572 u8 value;
1573
1574 /*
1575 * BBP was enabled after firmware was loaded,
1576 * but we need to reactivate it now.
1577 */
1578 rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1579 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1580 msleep(1);
1581
1582 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1583 rt2800usb_bbp_read(rt2x00dev, 0, &value);
1584 if ((value != 0xff) && (value != 0x00))
1585 return 0;
1586 udelay(REGISTER_BUSY_DELAY);
1587 }
1588
1589 ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
1590 return -EACCES;
1591}
1592
1593static int rt2800usb_init_bbp(struct rt2x00_dev *rt2x00dev)
1594{
1595 unsigned int i;
1596 u16 eeprom;
1597 u8 reg_id;
1598 u8 value;
1599
1600 if (unlikely(rt2800usb_wait_bbp_rf_ready(rt2x00dev) ||
1601 rt2800usb_wait_bbp_ready(rt2x00dev)))
1602 return -EACCES;
1603
1604 rt2800usb_bbp_write(rt2x00dev, 65, 0x2c);
1605 rt2800usb_bbp_write(rt2x00dev, 66, 0x38);
1606 rt2800usb_bbp_write(rt2x00dev, 69, 0x12);
1607 rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
1608 rt2800usb_bbp_write(rt2x00dev, 73, 0x10);
1609 rt2800usb_bbp_write(rt2x00dev, 81, 0x37);
1610 rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
1611 rt2800usb_bbp_write(rt2x00dev, 83, 0x6a);
1612 rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
1613 rt2800usb_bbp_write(rt2x00dev, 86, 0x00);
1614 rt2800usb_bbp_write(rt2x00dev, 91, 0x04);
1615 rt2800usb_bbp_write(rt2x00dev, 92, 0x00);
1616 rt2800usb_bbp_write(rt2x00dev, 103, 0x00);
1617 rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
1618
1619 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
1620 rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
1621 rt2800usb_bbp_write(rt2x00dev, 73, 0x12);
1622 }
1623
1624 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) {
1625 rt2800usb_bbp_write(rt2x00dev, 84, 0x19);
1626 }
1627
1628 if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1629 rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
1630 rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
1631 rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
1632 }
1633
1634 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1635 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1636
1637 if (eeprom != 0xffff && eeprom != 0x0000) {
1638 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1639 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1640 rt2800usb_bbp_write(rt2x00dev, reg_id, value);
1641 }
1642 }
1643
1644 return 0;
1645}
1646
1647static u8 rt2800usb_init_rx_filter(struct rt2x00_dev *rt2x00dev,
1648 bool bw40, u8 rfcsr24, u8 filter_target)
1649{
1650 unsigned int i;
1651 u8 bbp;
1652 u8 rfcsr;
1653 u8 passband;
1654 u8 stopband;
1655 u8 overtuned = 0;
1656
1657 rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
1658
1659 rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
1660 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
1661 rt2800usb_bbp_write(rt2x00dev, 4, bbp);
1662
1663 rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
1664 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
1665 rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
1666
1667 /*
1668 * Set power & frequency of passband test tone
1669 */
1670 rt2800usb_bbp_write(rt2x00dev, 24, 0);
1671
1672 for (i = 0; i < 100; i++) {
1673 rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
1674 msleep(1);
1675
1676 rt2800usb_bbp_read(rt2x00dev, 55, &passband);
1677 if (passband)
1678 break;
1679 }
1680
1681 /*
1682 * Set power & frequency of stopband test tone
1683 */
1684 rt2800usb_bbp_write(rt2x00dev, 24, 0x06);
1685
1686 for (i = 0; i < 100; i++) {
1687 rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
1688 msleep(1);
1689
1690 rt2800usb_bbp_read(rt2x00dev, 55, &stopband);
1691
1692 if ((passband - stopband) <= filter_target) {
1693 rfcsr24++;
1694 overtuned += ((passband - stopband) == filter_target);
1695 } else
1696 break;
1697
1698 rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
1699 }
1700
1701 rfcsr24 -= !!overtuned;
1702
1703 rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
1704 return rfcsr24;
1705}
1706
1707static int rt2800usb_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1708{
1709 u8 rfcsr;
1710 u8 bbp;
1711
1712 if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
1713 return 0;
1714
1715 /*
1716 * Init RF calibration.
1717 */
1718 rt2800usb_rfcsr_read(rt2x00dev, 30, &rfcsr);
1719 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1720 rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
1721 msleep(1); 231 msleep(1);
1722 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1723 rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
1724
1725 rt2800usb_rfcsr_write(rt2x00dev, 4, 0x40);
1726 rt2800usb_rfcsr_write(rt2x00dev, 5, 0x03);
1727 rt2800usb_rfcsr_write(rt2x00dev, 6, 0x02);
1728 rt2800usb_rfcsr_write(rt2x00dev, 7, 0x70);
1729 rt2800usb_rfcsr_write(rt2x00dev, 9, 0x0f);
1730 rt2800usb_rfcsr_write(rt2x00dev, 10, 0x71);
1731 rt2800usb_rfcsr_write(rt2x00dev, 11, 0x21);
1732 rt2800usb_rfcsr_write(rt2x00dev, 12, 0x7b);
1733 rt2800usb_rfcsr_write(rt2x00dev, 14, 0x90);
1734 rt2800usb_rfcsr_write(rt2x00dev, 15, 0x58);
1735 rt2800usb_rfcsr_write(rt2x00dev, 16, 0xb3);
1736 rt2800usb_rfcsr_write(rt2x00dev, 17, 0x92);
1737 rt2800usb_rfcsr_write(rt2x00dev, 18, 0x2c);
1738 rt2800usb_rfcsr_write(rt2x00dev, 19, 0x02);
1739 rt2800usb_rfcsr_write(rt2x00dev, 20, 0xba);
1740 rt2800usb_rfcsr_write(rt2x00dev, 21, 0xdb);
1741 rt2800usb_rfcsr_write(rt2x00dev, 24, 0x16);
1742 rt2800usb_rfcsr_write(rt2x00dev, 25, 0x01);
1743 rt2800usb_rfcsr_write(rt2x00dev, 27, 0x03);
1744 rt2800usb_rfcsr_write(rt2x00dev, 29, 0x1f);
1745
1746 /*
1747 * Set RX Filter calibration for 20MHz and 40MHz
1748 */
1749 rt2x00dev->calibration[0] =
1750 rt2800usb_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1751 rt2x00dev->calibration[1] =
1752 rt2800usb_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1753
1754 /*
1755 * Set back to initial state
1756 */
1757 rt2800usb_bbp_write(rt2x00dev, 24, 0);
1758
1759 rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
1760 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
1761 rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
1762
1763 /*
1764 * set BBP back to BW20
1765 */
1766 rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
1767 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1768 rt2800usb_bbp_write(rt2x00dev, 4, bbp);
1769 232
1770 return 0; 233 return 0;
1771} 234}
@@ -1778,11 +241,11 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
1778{ 241{
1779 u32 reg; 242 u32 reg;
1780 243
1781 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 244 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1782 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 245 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
1783 (state == STATE_RADIO_RX_ON) || 246 (state == STATE_RADIO_RX_ON) ||
1784 (state == STATE_RADIO_RX_ON_LINK)); 247 (state == STATE_RADIO_RX_ON_LINK));
1785 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 248 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1786} 249}
1787 250
1788static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) 251static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
@@ -1791,7 +254,7 @@ static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
1791 u32 reg; 254 u32 reg;
1792 255
1793 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 256 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1794 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 257 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1795 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) && 258 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
1796 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY)) 259 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
1797 return 0; 260 return 0;
@@ -1812,25 +275,25 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1812 * Initialize all registers. 275 * Initialize all registers.
1813 */ 276 */
1814 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) || 277 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
1815 rt2800usb_init_registers(rt2x00dev) || 278 rt2800_init_registers(rt2x00dev) ||
1816 rt2800usb_init_bbp(rt2x00dev) || 279 rt2800_init_bbp(rt2x00dev) ||
1817 rt2800usb_init_rfcsr(rt2x00dev))) 280 rt2800_init_rfcsr(rt2x00dev)))
1818 return -EIO; 281 return -EIO;
1819 282
1820 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 283 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1821 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1); 284 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
1822 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 285 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1823 286
1824 udelay(50); 287 udelay(50);
1825 288
1826 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 289 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1827 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); 290 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1828 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1); 291 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
1829 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1); 292 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
1830 rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 293 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1831 294
1832 295
1833 rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg); 296 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
1834 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 297 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
1835 /* Don't use bulk in aggregation when working with USB 1.1 */ 298 /* Don't use bulk in aggregation when working with USB 1.1 */
1836 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 299 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
@@ -1844,26 +307,26 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1844 ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3); 307 ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3);
1845 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); 308 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
1846 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 309 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
1847 rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg); 310 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
1848 311
1849 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 312 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1850 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1); 313 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
1851 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); 314 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
1852 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 315 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1853 316
1854 /* 317 /*
1855 * Initialize LED control 318 * Initialize LED control
1856 */ 319 */
1857 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word); 320 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
1858 rt2800usb_mcu_request(rt2x00dev, MCU_LED_1, 0xff, 321 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
1859 word & 0xff, (word >> 8) & 0xff); 322 word & 0xff, (word >> 8) & 0xff);
1860 323
1861 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word); 324 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
1862 rt2800usb_mcu_request(rt2x00dev, MCU_LED_2, 0xff, 325 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
1863 word & 0xff, (word >> 8) & 0xff); 326 word & 0xff, (word >> 8) & 0xff);
1864 327
1865 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word); 328 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
1866 rt2800usb_mcu_request(rt2x00dev, MCU_LED_3, 0xff, 329 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
1867 word & 0xff, (word >> 8) & 0xff); 330 word & 0xff, (word >> 8) & 0xff);
1868 331
1869 return 0; 332 return 0;
@@ -1873,14 +336,14 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
1873{ 336{
1874 u32 reg; 337 u32 reg;
1875 338
1876 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 339 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1877 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); 340 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1878 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); 341 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1879 rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 342 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1880 343
1881 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0); 344 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
1882 rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0); 345 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
1883 rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, 0); 346 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
1884 347
1885 /* Wait for DMA, ignore error */ 348 /* Wait for DMA, ignore error */
1886 rt2800usb_wait_wpdma_ready(rt2x00dev); 349 rt2800usb_wait_wpdma_ready(rt2x00dev);
@@ -1892,9 +355,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
1892 enum dev_state state) 355 enum dev_state state)
1893{ 356{
1894 if (state == STATE_AWAKE) 357 if (state == STATE_AWAKE)
1895 rt2800usb_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); 358 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
1896 else 359 else
1897 rt2800usb_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); 360 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
1898 361
1899 return 0; 362 return 0;
1900} 363}
@@ -2048,9 +511,9 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
2048 * Disable beaconing while we are reloading the beacon data, 511 * Disable beaconing while we are reloading the beacon data,
2049 * otherwise we might be sending out invalid data. 512 * otherwise we might be sending out invalid data.
2050 */ 513 */
2051 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 514 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
2052 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 515 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
2053 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg); 516 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
2054 517
2055 /* 518 /*
2056 * Write entire beacon with descriptor to register. 519 * Write entire beacon with descriptor to register.
@@ -2093,12 +556,12 @@ static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
2093 return; 556 return;
2094 } 557 }
2095 558
2096 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 559 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
2097 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) { 560 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
2098 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 561 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
2099 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 562 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
2100 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 563 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
2101 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg); 564 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
2102 } 565 }
2103} 566}
2104 567
@@ -2124,7 +587,7 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
2124 */ 587 */
2125 memcpy(skbdesc->desc, rxd, skbdesc->desc_len); 588 memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
2126 rxd = (__le32 *)skbdesc->desc; 589 rxd = (__le32 *)skbdesc->desc;
2127 rxwi = &rxd[RXD_DESC_SIZE / sizeof(__le32)]; 590 rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
2128 591
2129 /* 592 /*
2130 * It is now safe to read the descriptor on all architectures. 593 * It is now safe to read the descriptor on all architectures.
@@ -2135,16 +598,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
2135 rt2x00_desc_read(rxwi, 2, &rxwi2); 598 rt2x00_desc_read(rxwi, 2, &rxwi2);
2136 rt2x00_desc_read(rxwi, 3, &rxwi3); 599 rt2x00_desc_read(rxwi, 3, &rxwi3);
2137 600
2138 if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR)) 601 if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
2139 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 602 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
2140 603
2141 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 604 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
2142 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF); 605 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
2143 rxdesc->cipher_status = 606 rxdesc->cipher_status =
2144 rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR); 607 rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
2145 } 608 }
2146 609
2147 if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) { 610 if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
2148 /* 611 /*
2149 * Hardware has stripped IV/EIV data from 802.11 frame during 612 * Hardware has stripped IV/EIV data from 802.11 frame during
2150 * decryption. Unfortunately the descriptor doesn't contain 613 * decryption. Unfortunately the descriptor doesn't contain
@@ -2159,10 +622,10 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
2159 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 622 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
2160 } 623 }
2161 624
2162 if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS)) 625 if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
2163 rxdesc->dev_flags |= RXDONE_MY_BSS; 626 rxdesc->dev_flags |= RXDONE_MY_BSS;
2164 627
2165 if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD)) { 628 if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
2166 rxdesc->dev_flags |= RXDONE_L2PAD; 629 rxdesc->dev_flags |= RXDONE_L2PAD;
2167 skbdesc->flags |= SKBDESC_L2_PADDED; 630 skbdesc->flags |= SKBDESC_L2_PADDED;
2168 } 631 }
@@ -2208,402 +671,33 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
2208 */ 671 */
2209static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) 672static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2210{ 673{
2211 u16 word; 674 if (rt2800_efuse_detect(rt2x00dev))
2212 u8 *mac; 675 rt2800_read_eeprom_efuse(rt2x00dev);
2213 u8 default_lna_gain; 676 else
2214 677 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
2215 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); 678 EEPROM_SIZE);
2216
2217 /*
2218 * Start validation of the data that has been read.
2219 */
2220 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
2221 if (!is_valid_ether_addr(mac)) {
2222 random_ether_addr(mac);
2223 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
2224 }
2225
2226 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
2227 if (word == 0xffff) {
2228 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
2229 rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
2230 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
2231 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
2232 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
2233 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
2234 /*
2235 * There is a max of 2 RX streams for RT2870 series
2236 */
2237 if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
2238 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
2239 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
2240 }
2241
2242 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
2243 if (word == 0xffff) {
2244 rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
2245 rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
2246 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
2247 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
2248 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
2249 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
2250 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
2251 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
2252 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
2253 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
2254 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
2255 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
2256 }
2257
2258 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
2259 if ((word & 0x00ff) == 0x00ff) {
2260 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
2261 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
2262 LED_MODE_TXRX_ACTIVITY);
2263 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
2264 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
2265 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
2266 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
2267 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
2268 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
2269 }
2270
2271 /*
2272 * During the LNA validation we are going to use
2273 * lna0 as correct value. Note that EEPROM_LNA
2274 * is never validated.
2275 */
2276 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
2277 default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
2278
2279 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
2280 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
2281 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
2282 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
2283 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
2284 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
2285
2286 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
2287 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
2288 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
2289 if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
2290 rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
2291 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
2292 default_lna_gain);
2293 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
2294
2295 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
2296 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
2297 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
2298 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
2299 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
2300 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
2301
2302 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
2303 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
2304 rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
2305 if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
2306 rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
2307 rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
2308 default_lna_gain);
2309 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
2310 679
2311 return 0; 680 return rt2800_validate_eeprom(rt2x00dev);
2312} 681}
2313 682
2314static int rt2800usb_init_eeprom(struct rt2x00_dev *rt2x00dev) 683static const struct rt2800_ops rt2800usb_rt2800_ops = {
2315{ 684 .register_read = rt2x00usb_register_read,
2316 u32 reg; 685 .register_read_lock = rt2x00usb_register_read_lock,
2317 u16 value; 686 .register_write = rt2x00usb_register_write,
2318 u16 eeprom; 687 .register_write_lock = rt2x00usb_register_write_lock,
2319
2320 /*
2321 * Read EEPROM word for configuration.
2322 */
2323 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2324
2325 /*
2326 * Identify RF chipset.
2327 */
2328 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
2329 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
2330 rt2x00_set_chip(rt2x00dev, RT2870, value, reg);
2331
2332 /*
2333 * The check for rt2860 is not a typo, some rt2870 hardware
2334 * identifies itself as rt2860 in the CSR register.
2335 */
2336 if (!rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28600000) &&
2337 !rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28700000) &&
2338 !rt2x00_check_rev(&rt2x00dev->chip, 0xfff00000, 0x28800000) &&
2339 !rt2x00_check_rev(&rt2x00dev->chip, 0xffff0000, 0x30700000)) {
2340 ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
2341 return -ENODEV;
2342 }
2343
2344 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
2345 !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
2346 !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
2347 !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
2348 !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
2349 !rt2x00_rf(&rt2x00dev->chip, RF2020)) {
2350 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2351 return -ENODEV;
2352 }
2353
2354 /*
2355 * Identify default antenna configuration.
2356 */
2357 rt2x00dev->default_ant.tx =
2358 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
2359 rt2x00dev->default_ant.rx =
2360 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
2361
2362 /*
2363 * Read frequency offset and RF programming sequence.
2364 */
2365 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
2366 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
2367
2368 /*
2369 * Read external LNA informations.
2370 */
2371 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
2372
2373 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
2374 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
2375 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
2376 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
2377
2378 /*
2379 * Detect if this device has an hardware controlled radio.
2380 */
2381 if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
2382 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
2383
2384 /*
2385 * Store led settings, for correct led behaviour.
2386 */
2387#ifdef CONFIG_RT2X00_LIB_LEDS
2388 rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
2389 rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
2390 rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
2391
2392 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ,
2393 &rt2x00dev->led_mcu_reg);
2394#endif /* CONFIG_RT2X00_LIB_LEDS */
2395 688
2396 return 0; 689 .register_multiread = rt2x00usb_register_multiread,
2397} 690 .register_multiwrite = rt2x00usb_register_multiwrite,
2398 691
2399/* 692 .regbusy_read = rt2x00usb_regbusy_read,
2400 * RF value list for rt2870
2401 * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
2402 */
2403static const struct rf_channel rf_vals[] = {
2404 { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
2405 { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
2406 { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
2407 { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
2408 { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
2409 { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
2410 { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
2411 { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
2412 { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
2413 { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
2414 { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
2415 { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
2416 { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
2417 { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
2418
2419 /* 802.11 UNI / HyperLan 2 */
2420 { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
2421 { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
2422 { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
2423 { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
2424 { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
2425 { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
2426 { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
2427 { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
2428 { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
2429 { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
2430 { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
2431 { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
2432
2433 /* 802.11 HyperLan 2 */
2434 { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
2435 { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
2436 { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
2437 { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
2438 { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
2439 { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
2440 { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
2441 { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
2442 { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
2443 { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
2444 { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
2445 { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
2446 { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
2447 { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
2448 { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
2449 { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
2450
2451 /* 802.11 UNII */
2452 { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
2453 { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
2454 { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
2455 { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
2456 { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
2457 { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
2458 { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
2459 { 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f },
2460 { 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 },
2461 { 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 },
2462 { 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f },
2463
2464 /* 802.11 Japan */
2465 { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
2466 { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
2467 { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
2468 { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
2469 { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
2470 { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
2471 { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
2472};
2473
2474/*
2475 * RF value list for rt3070
2476 * Supports: 2.4 GHz
2477 */
2478static const struct rf_channel rf_vals_3070[] = {
2479 {1, 241, 2, 2 },
2480 {2, 241, 2, 7 },
2481 {3, 242, 2, 2 },
2482 {4, 242, 2, 7 },
2483 {5, 243, 2, 2 },
2484 {6, 243, 2, 7 },
2485 {7, 244, 2, 2 },
2486 {8, 244, 2, 7 },
2487 {9, 245, 2, 2 },
2488 {10, 245, 2, 7 },
2489 {11, 246, 2, 2 },
2490 {12, 246, 2, 7 },
2491 {13, 247, 2, 2 },
2492 {14, 248, 2, 4 },
2493}; 693};
2494 694
2495static int rt2800usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2496{
2497 struct hw_mode_spec *spec = &rt2x00dev->spec;
2498 struct channel_info *info;
2499 char *tx_power1;
2500 char *tx_power2;
2501 unsigned int i;
2502 u16 eeprom;
2503
2504 /*
2505 * Initialize all hw fields.
2506 */
2507 rt2x00dev->hw->flags =
2508 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2509 IEEE80211_HW_SIGNAL_DBM |
2510 IEEE80211_HW_SUPPORTS_PS |
2511 IEEE80211_HW_PS_NULLFUNC_STACK;
2512 rt2x00dev->hw->extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
2513
2514 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
2515 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
2516 rt2x00_eeprom_addr(rt2x00dev,
2517 EEPROM_MAC_ADDR_0));
2518
2519 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2520
2521 /*
2522 * Initialize HT information.
2523 */
2524 spec->ht.ht_supported = true;
2525 spec->ht.cap =
2526 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
2527 IEEE80211_HT_CAP_GRN_FLD |
2528 IEEE80211_HT_CAP_SGI_20 |
2529 IEEE80211_HT_CAP_SGI_40 |
2530 IEEE80211_HT_CAP_TX_STBC |
2531 IEEE80211_HT_CAP_RX_STBC |
2532 IEEE80211_HT_CAP_PSMP_SUPPORT;
2533 spec->ht.ampdu_factor = 3;
2534 spec->ht.ampdu_density = 4;
2535 spec->ht.mcs.tx_params =
2536 IEEE80211_HT_MCS_TX_DEFINED |
2537 IEEE80211_HT_MCS_TX_RX_DIFF |
2538 ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
2539 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
2540
2541 switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
2542 case 3:
2543 spec->ht.mcs.rx_mask[2] = 0xff;
2544 case 2:
2545 spec->ht.mcs.rx_mask[1] = 0xff;
2546 case 1:
2547 spec->ht.mcs.rx_mask[0] = 0xff;
2548 spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
2549 break;
2550 }
2551
2552 /*
2553 * Initialize hw_mode information.
2554 */
2555 spec->supported_bands = SUPPORT_BAND_2GHZ;
2556 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2557
2558 if (rt2x00_rf(&rt2x00dev->chip, RF2820) ||
2559 rt2x00_rf(&rt2x00dev->chip, RF2720)) {
2560 spec->num_channels = 14;
2561 spec->channels = rf_vals;
2562 } else if (rt2x00_rf(&rt2x00dev->chip, RF2850) ||
2563 rt2x00_rf(&rt2x00dev->chip, RF2750)) {
2564 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2565 spec->num_channels = ARRAY_SIZE(rf_vals);
2566 spec->channels = rf_vals;
2567 } else if (rt2x00_rf(&rt2x00dev->chip, RF3020) ||
2568 rt2x00_rf(&rt2x00dev->chip, RF2020)) {
2569 spec->num_channels = ARRAY_SIZE(rf_vals_3070);
2570 spec->channels = rf_vals_3070;
2571 }
2572
2573 /*
2574 * Create channel information array
2575 */
2576 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
2577 if (!info)
2578 return -ENOMEM;
2579
2580 spec->channels_info = info;
2581
2582 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
2583 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
2584
2585 for (i = 0; i < 14; i++) {
2586 info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
2587 info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
2588 }
2589
2590 if (spec->num_channels > 14) {
2591 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
2592 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
2593
2594 for (i = 14; i < spec->num_channels; i++) {
2595 info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
2596 info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
2597 }
2598 }
2599
2600 return 0;
2601}
2602
2603static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) 695static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2604{ 696{
2605 int retval; 697 int retval;
2606 698
699 rt2x00dev->priv = (void *)&rt2800usb_rt2800_ops;
700
2607 /* 701 /*
2608 * Allocate eeprom data. 702 * Allocate eeprom data.
2609 */ 703 */
@@ -2611,14 +705,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2611 if (retval) 705 if (retval)
2612 return retval; 706 return retval;
2613 707
2614 retval = rt2800usb_init_eeprom(rt2x00dev); 708 retval = rt2800_init_eeprom(rt2x00dev);
2615 if (retval) 709 if (retval)
2616 return retval; 710 return retval;
2617 711
2618 /* 712 /*
2619 * Initialize hw specifications. 713 * Initialize hw specifications.
2620 */ 714 */
2621 retval = rt2800usb_probe_hw_mode(rt2x00dev); 715 retval = rt2800_probe_hw_mode(rt2x00dev);
2622 if (retval) 716 if (retval)
2623 return retval; 717 return retval;
2624 718
@@ -2645,162 +739,6 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2645 return 0; 739 return 0;
2646} 740}
2647 741
2648/*
2649 * IEEE80211 stack callback functions.
2650 */
2651static void rt2800usb_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
2652 u32 *iv32, u16 *iv16)
2653{
2654 struct rt2x00_dev *rt2x00dev = hw->priv;
2655 struct mac_iveiv_entry iveiv_entry;
2656 u32 offset;
2657
2658 offset = MAC_IVEIV_ENTRY(hw_key_idx);
2659 rt2x00usb_register_multiread(rt2x00dev, offset,
2660 &iveiv_entry, sizeof(iveiv_entry));
2661
2662 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
2663 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
2664}
2665
2666static int rt2800usb_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2667{
2668 struct rt2x00_dev *rt2x00dev = hw->priv;
2669 u32 reg;
2670 bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
2671
2672 rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
2673 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
2674 rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
2675
2676 rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
2677 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
2678 rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
2679
2680 rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
2681 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
2682 rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
2683
2684 rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
2685 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
2686 rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
2687
2688 rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
2689 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
2690 rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
2691
2692 rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
2693 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
2694 rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
2695
2696 rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
2697 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
2698 rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
2699
2700 return 0;
2701}
2702
2703static int rt2800usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2704 const struct ieee80211_tx_queue_params *params)
2705{
2706 struct rt2x00_dev *rt2x00dev = hw->priv;
2707 struct data_queue *queue;
2708 struct rt2x00_field32 field;
2709 int retval;
2710 u32 reg;
2711 u32 offset;
2712
2713 /*
2714 * First pass the configuration through rt2x00lib, that will
2715 * update the queue settings and validate the input. After that
2716 * we are free to update the registers based on the value
2717 * in the queue parameter.
2718 */
2719 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2720 if (retval)
2721 return retval;
2722
2723 /*
2724 * We only need to perform additional register initialization
2725 * for WMM queues/
2726 */
2727 if (queue_idx >= 4)
2728 return 0;
2729
2730 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2731
2732 /* Update WMM TXOP register */
2733 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
2734 field.bit_offset = (queue_idx & 1) * 16;
2735 field.bit_mask = 0xffff << field.bit_offset;
2736
2737 rt2x00usb_register_read(rt2x00dev, offset, &reg);
2738 rt2x00_set_field32(&reg, field, queue->txop);
2739 rt2x00usb_register_write(rt2x00dev, offset, reg);
2740
2741 /* Update WMM registers */
2742 field.bit_offset = queue_idx * 4;
2743 field.bit_mask = 0xf << field.bit_offset;
2744
2745 rt2x00usb_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
2746 rt2x00_set_field32(&reg, field, queue->aifs);
2747 rt2x00usb_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
2748
2749 rt2x00usb_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
2750 rt2x00_set_field32(&reg, field, queue->cw_min);
2751 rt2x00usb_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
2752
2753 rt2x00usb_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
2754 rt2x00_set_field32(&reg, field, queue->cw_max);
2755 rt2x00usb_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
2756
2757 /* Update EDCA registers */
2758 offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
2759
2760 rt2x00usb_register_read(rt2x00dev, offset, &reg);
2761 rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
2762 rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
2763 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
2764 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
2765 rt2x00usb_register_write(rt2x00dev, offset, reg);
2766
2767 return 0;
2768}
2769
2770static u64 rt2800usb_get_tsf(struct ieee80211_hw *hw)
2771{
2772 struct rt2x00_dev *rt2x00dev = hw->priv;
2773 u64 tsf;
2774 u32 reg;
2775
2776 rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
2777 tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
2778 rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
2779 tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
2780
2781 return tsf;
2782}
2783
2784static const struct ieee80211_ops rt2800usb_mac80211_ops = {
2785 .tx = rt2x00mac_tx,
2786 .start = rt2x00mac_start,
2787 .stop = rt2x00mac_stop,
2788 .add_interface = rt2x00mac_add_interface,
2789 .remove_interface = rt2x00mac_remove_interface,
2790 .config = rt2x00mac_config,
2791 .configure_filter = rt2x00mac_configure_filter,
2792 .set_tim = rt2x00mac_set_tim,
2793 .set_key = rt2x00mac_set_key,
2794 .get_stats = rt2x00mac_get_stats,
2795 .get_tkip_seq = rt2800usb_get_tkip_seq,
2796 .set_rts_threshold = rt2800usb_set_rts_threshold,
2797 .bss_info_changed = rt2x00mac_bss_info_changed,
2798 .conf_tx = rt2800usb_conf_tx,
2799 .get_tx_stats = rt2x00mac_get_tx_stats,
2800 .get_tsf = rt2800usb_get_tsf,
2801 .rfkill_poll = rt2x00mac_rfkill_poll,
2802};
2803
2804static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { 742static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
2805 .probe_hw = rt2800usb_probe_hw, 743 .probe_hw = rt2800usb_probe_hw,
2806 .get_firmware_name = rt2800usb_get_firmware_name, 744 .get_firmware_name = rt2800usb_get_firmware_name,
@@ -2810,10 +748,10 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
2810 .uninitialize = rt2x00usb_uninitialize, 748 .uninitialize = rt2x00usb_uninitialize,
2811 .clear_entry = rt2x00usb_clear_entry, 749 .clear_entry = rt2x00usb_clear_entry,
2812 .set_device_state = rt2800usb_set_device_state, 750 .set_device_state = rt2800usb_set_device_state,
2813 .rfkill_poll = rt2800usb_rfkill_poll, 751 .rfkill_poll = rt2800_rfkill_poll,
2814 .link_stats = rt2800usb_link_stats, 752 .link_stats = rt2800_link_stats,
2815 .reset_tuner = rt2800usb_reset_tuner, 753 .reset_tuner = rt2800_reset_tuner,
2816 .link_tuner = rt2800usb_link_tuner, 754 .link_tuner = rt2800_link_tuner,
2817 .write_tx_desc = rt2800usb_write_tx_desc, 755 .write_tx_desc = rt2800usb_write_tx_desc,
2818 .write_tx_data = rt2x00usb_write_tx_data, 756 .write_tx_data = rt2x00usb_write_tx_data,
2819 .write_beacon = rt2800usb_write_beacon, 757 .write_beacon = rt2800usb_write_beacon,
@@ -2821,19 +759,19 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
2821 .kick_tx_queue = rt2800usb_kick_tx_queue, 759 .kick_tx_queue = rt2800usb_kick_tx_queue,
2822 .kill_tx_queue = rt2x00usb_kill_tx_queue, 760 .kill_tx_queue = rt2x00usb_kill_tx_queue,
2823 .fill_rxdone = rt2800usb_fill_rxdone, 761 .fill_rxdone = rt2800usb_fill_rxdone,
2824 .config_shared_key = rt2800usb_config_shared_key, 762 .config_shared_key = rt2800_config_shared_key,
2825 .config_pairwise_key = rt2800usb_config_pairwise_key, 763 .config_pairwise_key = rt2800_config_pairwise_key,
2826 .config_filter = rt2800usb_config_filter, 764 .config_filter = rt2800_config_filter,
2827 .config_intf = rt2800usb_config_intf, 765 .config_intf = rt2800_config_intf,
2828 .config_erp = rt2800usb_config_erp, 766 .config_erp = rt2800_config_erp,
2829 .config_ant = rt2800usb_config_ant, 767 .config_ant = rt2800_config_ant,
2830 .config = rt2800usb_config, 768 .config = rt2800_config,
2831}; 769};
2832 770
2833static const struct data_queue_desc rt2800usb_queue_rx = { 771static const struct data_queue_desc rt2800usb_queue_rx = {
2834 .entry_num = RX_ENTRIES, 772 .entry_num = RX_ENTRIES,
2835 .data_size = AGGREGATION_SIZE, 773 .data_size = AGGREGATION_SIZE,
2836 .desc_size = RXD_DESC_SIZE + RXWI_DESC_SIZE, 774 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
2837 .priv_size = sizeof(struct queue_entry_priv_usb), 775 .priv_size = sizeof(struct queue_entry_priv_usb),
2838}; 776};
2839 777
@@ -2862,9 +800,9 @@ static const struct rt2x00_ops rt2800usb_ops = {
2862 .tx = &rt2800usb_queue_tx, 800 .tx = &rt2800usb_queue_tx,
2863 .bcn = &rt2800usb_queue_bcn, 801 .bcn = &rt2800usb_queue_bcn,
2864 .lib = &rt2800usb_rt2x00_ops, 802 .lib = &rt2800usb_rt2x00_ops,
2865 .hw = &rt2800usb_mac80211_ops, 803 .hw = &rt2800_mac80211_ops,
2866#ifdef CONFIG_RT2X00_LIB_DEBUGFS 804#ifdef CONFIG_RT2X00_LIB_DEBUGFS
2867 .debugfs = &rt2800usb_rt2x00debug, 805 .debugfs = &rt2800_rt2x00debug,
2868#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 806#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
2869}; 807};
2870 808
@@ -2886,17 +824,23 @@ static struct usb_device_id rt2800usb_device_table[] = {
2886 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, 824 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
2887 /* Amit */ 825 /* Amit */
2888 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 826 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
827 /* Askey */
828 { USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) },
829 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
830 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
2889 /* ASUS */ 831 /* ASUS */
2890 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, 832 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
2891 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 833 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
2892 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 834 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
2893 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 835 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
2894 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 836 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
837 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
2895 /* AzureWave */ 838 /* AzureWave */
2896 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 839 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
2897 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 840 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
2898 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, 841 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
2899 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 842 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
843 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
2900 /* Belkin */ 844 /* Belkin */
2901 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, 845 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
2902 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, 846 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2905,6 +849,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
2905 /* Buffalo */ 849 /* Buffalo */
2906 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) }, 850 { USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
2907 { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) }, 851 { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
852 /* Cisco */
853 { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
2908 /* Conceptronic */ 854 /* Conceptronic */
2909 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, 855 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
2910 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, 856 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2920,6 +866,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
2920 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, 866 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
2921 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, 867 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
2922 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 868 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
869 { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
870 { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
2923 { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 871 { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
2924 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) }, 872 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
2925 /* D-Link */ 873 /* D-Link */
@@ -2931,18 +879,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
2931 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, 879 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
2932 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, 880 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
2933 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 881 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
882 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
2934 /* Edimax */ 883 /* Edimax */
2935 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, 884 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
2936 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, 885 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
2937 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, 886 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
2938 /* Encore */ 887 /* Encore */
2939 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, 888 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
889 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
890 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
2940 /* EnGenius */ 891 /* EnGenius */
2941 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, 892 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
2942 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, 893 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
2943 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, 894 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
2944 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, 895 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
2945 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, 896 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
897 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
898 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
899 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
2946 { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) }, 900 { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
2947 /* Gemtek */ 901 /* Gemtek */
2948 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 902 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2956,7 +910,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
2956 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, 910 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
2957 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, 911 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
2958 /* I-O DATA */ 912 /* I-O DATA */
913 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
2959 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, 914 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
915 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
916 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
2960 /* LevelOne */ 917 /* LevelOne */
2961 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
2962 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 919 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2971,8 +928,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
2971 /* Motorola */ 928 /* Motorola */
2972 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 929 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
2973 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, 930 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
931 /* MSI */
932 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
933 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
934 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
935 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
936 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
937 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
938 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
2974 /* Ovislink */ 939 /* Ovislink */
2975 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 940 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
941 /* Para */
942 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
2976 /* Pegatron */ 943 /* Pegatron */
2977 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 944 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
2978 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 945 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -2988,8 +955,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
2988 /* Quanta */ 955 /* Quanta */
2989 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) }, 956 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
2990 /* Ralink */ 957 /* Ralink */
2991 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
2992 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
2993 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 958 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
2994 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 959 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
2995 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 960 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -3013,7 +978,12 @@ static struct usb_device_id rt2800usb_device_table[] = {
3013 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 978 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
3014 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 979 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
3015 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) }, 980 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
981 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
3016 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, 982 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
983 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
984 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
985 { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
986 { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
3017 /* SMC */ 987 /* SMC */
3018 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 988 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
3019 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, 989 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -3022,6 +992,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
3022 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, 992 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
3023 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, 993 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
3024 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, 994 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
995 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
996 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
3025 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, 997 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
3026 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, 998 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
3027 /* Sparklan */ 999 /* Sparklan */
@@ -3039,6 +1011,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
3039 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, 1011 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
3040 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, 1012 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
3041 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, 1013 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
1014 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
3042 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, 1015 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
3043 /* Zyxel */ 1016 /* Zyxel */
3044 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, 1017 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 4d9991c9a51c..1e4340a182ef 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -1,5 +1,9 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2009 Mattias Nissler <mattias.nissler@gmx.de>
4 Copyright (C) 2009 Felix Fietkau <nbd@openwrt.org>
5 Copyright (C) 2009 Xose Vazquez Perez <xose.vazquez@gmail.com>
6 Copyright (C) 2009 Axel Kollhofer <rain_maker@root-forum.org>
3 <http://rt2x00.serialmonkey.com> 7 <http://rt2x00.serialmonkey.com>
4 8
5 This program is free software; you can redistribute it and/or modify 9 This program is free software; you can redistribute it and/or modify
@@ -28,288 +32,10 @@
28#define RT2800USB_H 32#define RT2800USB_H
29 33
30/* 34/*
31 * RF chip defines.
32 *
33 * RF2820 2.4G 2T3R
34 * RF2850 2.4G/5G 2T3R
35 * RF2720 2.4G 1T2R
36 * RF2750 2.4G/5G 1T2R
37 * RF3020 2.4G 1T1R
38 * RF2020 2.4G B/G
39 * RF3021 2.4G 1T2R
40 * RF3022 2.4G 2T2R
41 * RF3052 2.4G 2T2R
42 */
43#define RF2820 0x0001
44#define RF2850 0x0002
45#define RF2720 0x0003
46#define RF2750 0x0004
47#define RF3020 0x0005
48#define RF2020 0x0006
49#define RF3021 0x0007
50#define RF3022 0x0008
51#define RF3052 0x0009
52
53/*
54 * RT2870 version
55 */
56#define RT2860C_VERSION 0x28600100
57#define RT2860D_VERSION 0x28600101
58#define RT2880E_VERSION 0x28720200
59#define RT2883_VERSION 0x28830300
60#define RT3070_VERSION 0x30700200
61
62/*
63 * Signal information.
64 * Defaul offset is required for RSSI <-> dBm conversion.
65 */
66#define DEFAULT_RSSI_OFFSET 120 /* FIXME */
67
68/*
69 * Register layout information.
70 */
71#define CSR_REG_BASE 0x1000
72#define CSR_REG_SIZE 0x0800
73#define EEPROM_BASE 0x0000
74#define EEPROM_SIZE 0x0110
75#define BBP_BASE 0x0000
76#define BBP_SIZE 0x0080
77#define RF_BASE 0x0004
78#define RF_SIZE 0x0010
79
80/*
81 * Number of TX queues.
82 */
83#define NUM_TX_QUEUES 4
84
85/*
86 * USB registers. 35 * USB registers.
87 */ 36 */
88 37
89/* 38/*
90 * HOST-MCU shared memory
91 */
92#define HOST_CMD_CSR 0x0404
93#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff)
94
95/*
96 * INT_SOURCE_CSR: Interrupt source register.
97 * Write one to clear corresponding bit.
98 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
99 */
100#define INT_SOURCE_CSR 0x0200
101#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
102#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002)
103#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004)
104#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008)
105#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010)
106#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020)
107#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040)
108#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
109#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
110#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200)
111#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400)
112#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800)
113#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000)
114#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
115#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000)
116#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000)
117#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000)
118#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000)
119
120/*
121 * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
122 */
123#define INT_MASK_CSR 0x0204
124#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001)
125#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002)
126#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004)
127#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008)
128#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010)
129#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020)
130#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040)
131#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
132#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
133#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200)
134#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400)
135#define INT_MASK_CSR_TBTT FIELD32(0x00000800)
136#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000)
137#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
138#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000)
139#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000)
140#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000)
141#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000)
142
143/*
144 * WPDMA_GLO_CFG
145 */
146#define WPDMA_GLO_CFG 0x0208
147#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001)
148#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002)
149#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004)
150#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008)
151#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030)
152#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040)
153#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080)
154#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00)
155#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000)
156
157/*
158 * WPDMA_RST_IDX
159 */
160#define WPDMA_RST_IDX 0x020c
161#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001)
162#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002)
163#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004)
164#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008)
165#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010)
166#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020)
167#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000)
168
169/*
170 * DELAY_INT_CFG
171 */
172#define DELAY_INT_CFG 0x0210
173#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff)
174#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00)
175#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000)
176#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000)
177#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000)
178#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000)
179
180/*
181 * WMM_AIFSN_CFG: Aifsn for each EDCA AC
182 * AIFSN0: AC_BE
183 * AIFSN1: AC_BK
184 * AIFSN1: AC_VI
185 * AIFSN1: AC_VO
186 */
187#define WMM_AIFSN_CFG 0x0214
188#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
189#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0)
190#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00)
191#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000)
192
193/*
194 * WMM_CWMIN_CSR: CWmin for each EDCA AC
195 * CWMIN0: AC_BE
196 * CWMIN1: AC_BK
197 * CWMIN1: AC_VI
198 * CWMIN1: AC_VO
199 */
200#define WMM_CWMIN_CFG 0x0218
201#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
202#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0)
203#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00)
204#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000)
205
206/*
207 * WMM_CWMAX_CSR: CWmax for each EDCA AC
208 * CWMAX0: AC_BE
209 * CWMAX1: AC_BK
210 * CWMAX1: AC_VI
211 * CWMAX1: AC_VO
212 */
213#define WMM_CWMAX_CFG 0x021c
214#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
215#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0)
216#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00)
217#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
218
219/*
220 * AC_TXOP0: AC_BK/AC_BE TXOP register
221 * AC0TXOP: AC_BK in unit of 32us
222 * AC1TXOP: AC_BE in unit of 32us
223 */
224#define WMM_TXOP0_CFG 0x0220
225#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
226#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
227
228/*
229 * AC_TXOP1: AC_VO/AC_VI TXOP register
230 * AC2TXOP: AC_VI in unit of 32us
231 * AC3TXOP: AC_VO in unit of 32us
232 */
233#define WMM_TXOP1_CFG 0x0224
234#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
235#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
236
237/*
238 * GPIO_CTRL_CFG:
239 */
240#define GPIO_CTRL_CFG 0x0228
241#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
242#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
243#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
244#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
245#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
246#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
247#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
248#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
249#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
250
251/*
252 * MCU_CMD_CFG
253 */
254#define MCU_CMD_CFG 0x022c
255
256/*
257 * AC_BK register offsets
258 */
259#define TX_BASE_PTR0 0x0230
260#define TX_MAX_CNT0 0x0234
261#define TX_CTX_IDX0 0x0238
262#define TX_DTX_IDX0 0x023c
263
264/*
265 * AC_BE register offsets
266 */
267#define TX_BASE_PTR1 0x0240
268#define TX_MAX_CNT1 0x0244
269#define TX_CTX_IDX1 0x0248
270#define TX_DTX_IDX1 0x024c
271
272/*
273 * AC_VI register offsets
274 */
275#define TX_BASE_PTR2 0x0250
276#define TX_MAX_CNT2 0x0254
277#define TX_CTX_IDX2 0x0258
278#define TX_DTX_IDX2 0x025c
279
280/*
281 * AC_VO register offsets
282 */
283#define TX_BASE_PTR3 0x0260
284#define TX_MAX_CNT3 0x0264
285#define TX_CTX_IDX3 0x0268
286#define TX_DTX_IDX3 0x026c
287
288/*
289 * HCCA register offsets
290 */
291#define TX_BASE_PTR4 0x0270
292#define TX_MAX_CNT4 0x0274
293#define TX_CTX_IDX4 0x0278
294#define TX_DTX_IDX4 0x027c
295
296/*
297 * MGMT register offsets
298 */
299#define TX_BASE_PTR5 0x0280
300#define TX_MAX_CNT5 0x0284
301#define TX_CTX_IDX5 0x0288
302#define TX_DTX_IDX5 0x028c
303
304/*
305 * RX register offsets
306 */
307#define RX_BASE_PTR 0x0290
308#define RX_MAX_CNT 0x0294
309#define RX_CRX_IDX 0x0298
310#define RX_DRX_IDX 0x029c
311
312/*
313 * USB_DMA_CFG 39 * USB_DMA_CFG
314 * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns. 40 * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
315 * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes. 41 * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
@@ -343,1448 +69,16 @@
343#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff) 69#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff)
344 70
345/* 71/*
346 * PBF_SYS_CTRL
347 * HOST_RAM_WRITE: enable Host program ram write selection
348 */
349#define PBF_SYS_CTRL 0x0400
350#define PBF_SYS_CTRL_READY FIELD32(0x00000080)
351#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000)
352
353/*
354 * PBF registers
355 * Most are for debug. Driver doesn't touch PBF register.
356 */
357#define PBF_CFG 0x0408
358#define PBF_MAX_PCNT 0x040c
359#define PBF_CTRL 0x0410
360#define PBF_INT_STA 0x0414
361#define PBF_INT_ENA 0x0418
362
363/*
364 * BCN_OFFSET0:
365 */
366#define BCN_OFFSET0 0x042c
367#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff)
368#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00)
369#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000)
370#define BCN_OFFSET0_BCN3 FIELD32(0xff000000)
371
372/*
373 * BCN_OFFSET1:
374 */
375#define BCN_OFFSET1 0x0430
376#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff)
377#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00)
378#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000)
379#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
380
381/*
382 * PBF registers
383 * Most are for debug. Driver doesn't touch PBF register.
384 */
385#define TXRXQ_PCNT 0x0438
386#define PBF_DBG 0x043c
387
388/*
389 * RF registers
390 */
391#define RF_CSR_CFG 0x0500
392#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
393#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
394#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
395#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
396
397/*
398 * MAC Control/Status Registers(CSR).
399 * Some values are set in TU, whereas 1 TU == 1024 us.
400 */
401
402/*
403 * MAC_CSR0: ASIC revision number.
404 * ASIC_REV: 0
405 * ASIC_VER: 2870
406 */
407#define MAC_CSR0 0x1000
408#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
409#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
410
411/*
412 * MAC_SYS_CTRL:
413 */
414#define MAC_SYS_CTRL 0x1004
415#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001)
416#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002)
417#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004)
418#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008)
419#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010)
420#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020)
421#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040)
422#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080)
423
424/*
425 * MAC_ADDR_DW0: STA MAC register 0
426 */
427#define MAC_ADDR_DW0 0x1008
428#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff)
429#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00)
430#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000)
431#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000)
432
433/*
434 * MAC_ADDR_DW1: STA MAC register 1
435 * UNICAST_TO_ME_MASK:
436 * Used to mask off bits from byte 5 of the MAC address
437 * to determine the UNICAST_TO_ME bit for RX frames.
438 * The full mask is complemented by BSS_ID_MASK:
439 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
440 */
441#define MAC_ADDR_DW1 0x100c
442#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff)
443#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00)
444#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000)
445
446/*
447 * MAC_BSSID_DW0: BSSID register 0
448 */
449#define MAC_BSSID_DW0 0x1010
450#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff)
451#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00)
452#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000)
453#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000)
454
455/*
456 * MAC_BSSID_DW1: BSSID register 1
457 * BSS_ID_MASK:
458 * 0: 1-BSSID mode (BSS index = 0)
459 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
460 * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
461 * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
462 * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
463 * BSSID. This will make sure that those bits will be ignored
464 * when determining the MY_BSS of RX frames.
465 */
466#define MAC_BSSID_DW1 0x1014
467#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff)
468#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00)
469#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000)
470#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000)
471
472/*
473 * MAX_LEN_CFG: Maximum frame length register.
474 * MAX_MPDU: rt2860b max 16k bytes
475 * MAX_PSDU: Maximum PSDU length
476 * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
477 */
478#define MAX_LEN_CFG 0x1018
479#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff)
480#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000)
481#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000)
482#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000)
483
484/*
485 * BBP_CSR_CFG: BBP serial control register
486 * VALUE: Register value to program into BBP
487 * REG_NUM: Selected BBP register
488 * READ_CONTROL: 0 write BBP, 1 read BBP
489 * BUSY: ASIC is busy executing BBP commands
490 * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
491 * BBP_RW_MODE: 0 serial, 1 paralell
492 */
493#define BBP_CSR_CFG 0x101c
494#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
495#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00)
496#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000)
497#define BBP_CSR_CFG_BUSY FIELD32(0x00020000)
498#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000)
499#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000)
500
501/*
502 * RF_CSR_CFG0: RF control register
503 * REGID_AND_VALUE: Register value to program into RF
504 * BITWIDTH: Selected RF register
505 * STANDBYMODE: 0 high when standby, 1 low when standby
506 * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
507 * BUSY: ASIC is busy executing RF commands
508 */
509#define RF_CSR_CFG0 0x1020
510#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff)
511#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000)
512#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff)
513#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000)
514#define RF_CSR_CFG0_SEL FIELD32(0x40000000)
515#define RF_CSR_CFG0_BUSY FIELD32(0x80000000)
516
517/*
518 * RF_CSR_CFG1: RF control register
519 * REGID_AND_VALUE: Register value to program into RF
520 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
521 * 0: 3 system clock cycle (37.5usec)
522 * 1: 5 system clock cycle (62.5usec)
523 */
524#define RF_CSR_CFG1 0x1024
525#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff)
526#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000)
527
528/*
529 * RF_CSR_CFG2: RF control register
530 * VALUE: Register value to program into RF
531 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
532 * 0: 3 system clock cycle (37.5usec)
533 * 1: 5 system clock cycle (62.5usec)
534 */
535#define RF_CSR_CFG2 0x1028
536#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff)
537
538/*
539 * LED_CFG: LED control
540 * color LED's:
541 * 0: off
542 * 1: blinking upon TX2
543 * 2: periodic slow blinking
544 * 3: always on
545 * LED polarity:
546 * 0: active low
547 * 1: active high
548 */
549#define LED_CFG 0x102c
550#define LED_CFG_ON_PERIOD FIELD32(0x000000ff)
551#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00)
552#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000)
553#define LED_CFG_R_LED_MODE FIELD32(0x03000000)
554#define LED_CFG_G_LED_MODE FIELD32(0x0c000000)
555#define LED_CFG_Y_LED_MODE FIELD32(0x30000000)
556#define LED_CFG_LED_POLAR FIELD32(0x40000000)
557
558/*
559 * XIFS_TIME_CFG: MAC timing
560 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
561 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
562 * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
563 * when MAC doesn't reference BBP signal BBRXEND
564 * EIFS: unit 1us
565 * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
566 *
567 */
568#define XIFS_TIME_CFG 0x1100
569#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff)
570#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00)
571#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000)
572#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000)
573#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000)
574
575/*
576 * BKOFF_SLOT_CFG:
577 */
578#define BKOFF_SLOT_CFG 0x1104
579#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff)
580#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00)
581
582/*
583 * NAV_TIME_CFG:
584 */
585#define NAV_TIME_CFG 0x1108
586#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff)
587#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00)
588#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000)
589#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000)
590
591/*
592 * CH_TIME_CFG: count as channel busy
593 */
594#define CH_TIME_CFG 0x110c
595
596/*
597 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
598 */
599#define PBF_LIFE_TIMER 0x1110
600
601/*
602 * BCN_TIME_CFG:
603 * BEACON_INTERVAL: in unit of 1/16 TU
604 * TSF_TICKING: Enable TSF auto counting
605 * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
606 * BEACON_GEN: Enable beacon generator
607 */
608#define BCN_TIME_CFG 0x1114
609#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff)
610#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000)
611#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000)
612#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000)
613#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000)
614#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000)
615
616/*
617 * TBTT_SYNC_CFG:
618 */
619#define TBTT_SYNC_CFG 0x1118
620
621/*
622 * TSF_TIMER_DW0: Local lsb TSF timer, read-only
623 */
624#define TSF_TIMER_DW0 0x111c
625#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff)
626
627/*
628 * TSF_TIMER_DW1: Local msb TSF timer, read-only
629 */
630#define TSF_TIMER_DW1 0x1120
631#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff)
632
633/*
634 * TBTT_TIMER: TImer remains till next TBTT, read-only
635 */
636#define TBTT_TIMER 0x1124
637
638/*
639 * INT_TIMER_CFG:
640 */
641#define INT_TIMER_CFG 0x1128
642
643/*
644 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
645 */
646#define INT_TIMER_EN 0x112c
647
648/*
649 * CH_IDLE_STA: channel idle time
650 */
651#define CH_IDLE_STA 0x1130
652
653/*
654 * CH_BUSY_STA: channel busy time
655 */
656#define CH_BUSY_STA 0x1134
657
658/*
659 * MAC_STATUS_CFG:
660 * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
661 * if 1 or higher one of the 2 registers is busy.
662 */
663#define MAC_STATUS_CFG 0x1200
664#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
665
666/*
667 * PWR_PIN_CFG:
668 */
669#define PWR_PIN_CFG 0x1204
670
671/*
672 * AUTOWAKEUP_CFG: Manual power control / status register
673 * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
674 * AUTOWAKE: 0:sleep, 1:awake
675 */
676#define AUTOWAKEUP_CFG 0x1208
677#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff)
678#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00)
679#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
680
681/*
682 * EDCA_AC0_CFG:
683 */
684#define EDCA_AC0_CFG 0x1300
685#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff)
686#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00)
687#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000)
688#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000)
689
690/*
691 * EDCA_AC1_CFG:
692 */
693#define EDCA_AC1_CFG 0x1304
694#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff)
695#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00)
696#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000)
697#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000)
698
699/*
700 * EDCA_AC2_CFG:
701 */
702#define EDCA_AC2_CFG 0x1308
703#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff)
704#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00)
705#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000)
706#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000)
707
708/*
709 * EDCA_AC3_CFG:
710 */
711#define EDCA_AC3_CFG 0x130c
712#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff)
713#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00)
714#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000)
715#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000)
716
717/*
718 * EDCA_TID_AC_MAP:
719 */
720#define EDCA_TID_AC_MAP 0x1310
721
722/*
723 * TX_PWR_CFG_0:
724 */
725#define TX_PWR_CFG_0 0x1314
726#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f)
727#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0)
728#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00)
729#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000)
730#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000)
731#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
732#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
733#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
734
735/*
736 * TX_PWR_CFG_1:
737 */
738#define TX_PWR_CFG_1 0x1318
739#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f)
740#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0)
741#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00)
742#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000)
743#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000)
744#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
745#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
746#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
747
748/*
749 * TX_PWR_CFG_2:
750 */
751#define TX_PWR_CFG_2 0x131c
752#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f)
753#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0)
754#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00)
755#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000)
756#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000)
757#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
758#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
759#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
760
761/*
762 * TX_PWR_CFG_3:
763 */
764#define TX_PWR_CFG_3 0x1320
765#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f)
766#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
767#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
768#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
769#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
770#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
771#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
772#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
773
774/*
775 * TX_PWR_CFG_4:
776 */
777#define TX_PWR_CFG_4 0x1324
778#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
779#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
780#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
781#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
782
783/*
784 * TX_PIN_CFG:
785 */
786#define TX_PIN_CFG 0x1328
787#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
788#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
789#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
790#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008)
791#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010)
792#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020)
793#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040)
794#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080)
795#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100)
796#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200)
797#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400)
798#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800)
799#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000)
800#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000)
801#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000)
802#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000)
803#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000)
804#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
805#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
806#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
807
808/*
809 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
810 */
811#define TX_BAND_CFG 0x132c
812#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
813#define TX_BAND_CFG_A FIELD32(0x00000002)
814#define TX_BAND_CFG_BG FIELD32(0x00000004)
815
816/*
817 * TX_SW_CFG0:
818 */
819#define TX_SW_CFG0 0x1330
820
821/*
822 * TX_SW_CFG1:
823 */
824#define TX_SW_CFG1 0x1334
825
826/*
827 * TX_SW_CFG2:
828 */
829#define TX_SW_CFG2 0x1338
830
831/*
832 * TXOP_THRES_CFG:
833 */
834#define TXOP_THRES_CFG 0x133c
835
836/*
837 * TXOP_CTRL_CFG:
838 */
839#define TXOP_CTRL_CFG 0x1340
840
841/*
842 * TX_RTS_CFG:
843 * RTS_THRES: unit:byte
844 * RTS_FBK_EN: enable rts rate fallback
845 */
846#define TX_RTS_CFG 0x1344
847#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff)
848#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00)
849#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000)
850
851/*
852 * TX_TIMEOUT_CFG:
853 * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
854 * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
855 * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
856 * it is recommended that:
857 * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
858 */
859#define TX_TIMEOUT_CFG 0x1348
860#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0)
861#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00)
862#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000)
863
864/*
865 * TX_RTY_CFG:
866 * SHORT_RTY_LIMIT: short retry limit
867 * LONG_RTY_LIMIT: long retry limit
868 * LONG_RTY_THRE: Long retry threshoold
869 * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
870 * 0:expired by retry limit, 1: expired by mpdu life timer
871 * AGG_RTY_MODE: Aggregate MPDU retry mode
872 * 0:expired by retry limit, 1: expired by mpdu life timer
873 * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
874 */
875#define TX_RTY_CFG 0x134c
876#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff)
877#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00)
878#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000)
879#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000)
880#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000)
881#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000)
882
883/*
884 * TX_LINK_CFG:
885 * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
886 * MFB_ENABLE: TX apply remote MFB 1:enable
887 * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable
888 * 0: not apply remote remote unsolicit (MFS=7)
889 * TX_MRQ_EN: MCS request TX enable
890 * TX_RDG_EN: RDG TX enable
891 * TX_CF_ACK_EN: Piggyback CF-ACK enable
892 * REMOTE_MFB: remote MCS feedback
893 * REMOTE_MFS: remote MCS feedback sequence number
894 */
895#define TX_LINK_CFG 0x1350
896#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff)
897#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100)
898#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200)
899#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400)
900#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800)
901#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000)
902#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000)
903#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000)
904
905/*
906 * HT_FBK_CFG0:
907 */
908#define HT_FBK_CFG0 0x1354
909#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f)
910#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0)
911#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00)
912#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000)
913#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000)
914#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000)
915#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000)
916#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000)
917
918/*
919 * HT_FBK_CFG1:
920 */
921#define HT_FBK_CFG1 0x1358
922#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f)
923#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0)
924#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00)
925#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000)
926#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000)
927#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000)
928#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000)
929#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000)
930
931/*
932 * LG_FBK_CFG0:
933 */
934#define LG_FBK_CFG0 0x135c
935#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f)
936#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0)
937#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00)
938#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000)
939#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000)
940#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000)
941#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000)
942#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000)
943
944/*
945 * LG_FBK_CFG1:
946 */
947#define LG_FBK_CFG1 0x1360
948#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f)
949#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0)
950#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00)
951#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000)
952
953/*
954 * CCK_PROT_CFG: CCK Protection
955 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
956 * PROTECT_CTRL: Protection control frame type for CCK TX
957 * 0:none, 1:RTS/CTS, 2:CTS-to-self
958 * PROTECT_NAV: TXOP protection type for CCK TX
959 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
960 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
961 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
962 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
963 * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
964 * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
965 * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
966 * RTS_TH_EN: RTS threshold enable on CCK TX
967 */
968#define CCK_PROT_CFG 0x1364
969#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
970#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
971#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
972#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
973#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
974#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
975#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
976#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
977#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
978#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
979
980/*
981 * OFDM_PROT_CFG: OFDM Protection
982 */
983#define OFDM_PROT_CFG 0x1368
984#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
985#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
986#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
987#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
988#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
989#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
990#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
991#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
992#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
993#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
994
995/*
996 * MM20_PROT_CFG: MM20 Protection
997 */
998#define MM20_PROT_CFG 0x136c
999#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1000#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1001#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1002#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1003#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1004#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1005#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1006#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1007#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1008#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1009
1010/*
1011 * MM40_PROT_CFG: MM40 Protection
1012 */
1013#define MM40_PROT_CFG 0x1370
1014#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1015#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1016#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1017#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1018#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1019#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1020#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1021#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1022#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1023#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1024
1025/*
1026 * GF20_PROT_CFG: GF20 Protection
1027 */
1028#define GF20_PROT_CFG 0x1374
1029#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1030#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1031#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1032#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1033#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1034#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1035#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1036#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1037#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1038#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1039
1040/*
1041 * GF40_PROT_CFG: GF40 Protection
1042 */
1043#define GF40_PROT_CFG 0x1378
1044#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1045#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1046#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1047#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1048#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1049#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1050#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1051#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1052#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1053#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1054
1055/*
1056 * EXP_CTS_TIME:
1057 */
1058#define EXP_CTS_TIME 0x137c
1059
1060/*
1061 * EXP_ACK_TIME:
1062 */
1063#define EXP_ACK_TIME 0x1380
1064
1065/*
1066 * RX_FILTER_CFG: RX configuration register.
1067 */
1068#define RX_FILTER_CFG 0x1400
1069#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001)
1070#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002)
1071#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004)
1072#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008)
1073#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010)
1074#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020)
1075#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040)
1076#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080)
1077#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100)
1078#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200)
1079#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400)
1080#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800)
1081#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000)
1082#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000)
1083#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000)
1084#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000)
1085#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000)
1086
1087/*
1088 * AUTO_RSP_CFG:
1089 * AUTORESPONDER: 0: disable, 1: enable
1090 * BAC_ACK_POLICY: 0:long, 1:short preamble
1091 * CTS_40_MMODE: Response CTS 40MHz duplicate mode
1092 * CTS_40_MREF: Response CTS 40MHz duplicate mode
1093 * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
1094 * DUAL_CTS_EN: Power bit value in control frame
1095 * ACK_CTS_PSM_BIT:Power bit value in control frame
1096 */
1097#define AUTO_RSP_CFG 0x1404
1098#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001)
1099#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002)
1100#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004)
1101#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008)
1102#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010)
1103#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040)
1104#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080)
1105
1106/*
1107 * LEGACY_BASIC_RATE:
1108 */
1109#define LEGACY_BASIC_RATE 0x1408
1110
1111/*
1112 * HT_BASIC_RATE:
1113 */
1114#define HT_BASIC_RATE 0x140c
1115
1116/*
1117 * HT_CTRL_CFG:
1118 */
1119#define HT_CTRL_CFG 0x1410
1120
1121/*
1122 * SIFS_COST_CFG:
1123 */
1124#define SIFS_COST_CFG 0x1414
1125
1126/*
1127 * RX_PARSER_CFG:
1128 * Set NAV for all received frames
1129 */
1130#define RX_PARSER_CFG 0x1418
1131
1132/*
1133 * TX_SEC_CNT0:
1134 */
1135#define TX_SEC_CNT0 0x1500
1136
1137/*
1138 * RX_SEC_CNT0:
1139 */
1140#define RX_SEC_CNT0 0x1504
1141
1142/*
1143 * CCMP_FC_MUTE:
1144 */
1145#define CCMP_FC_MUTE 0x1508
1146
1147/*
1148 * TXOP_HLDR_ADDR0:
1149 */
1150#define TXOP_HLDR_ADDR0 0x1600
1151
1152/*
1153 * TXOP_HLDR_ADDR1:
1154 */
1155#define TXOP_HLDR_ADDR1 0x1604
1156
1157/*
1158 * TXOP_HLDR_ET:
1159 */
1160#define TXOP_HLDR_ET 0x1608
1161
1162/*
1163 * QOS_CFPOLL_RA_DW0:
1164 */
1165#define QOS_CFPOLL_RA_DW0 0x160c
1166
1167/*
1168 * QOS_CFPOLL_RA_DW1:
1169 */
1170#define QOS_CFPOLL_RA_DW1 0x1610
1171
1172/*
1173 * QOS_CFPOLL_QC:
1174 */
1175#define QOS_CFPOLL_QC 0x1614
1176
1177/*
1178 * RX_STA_CNT0: RX PLCP error count & RX CRC error count
1179 */
1180#define RX_STA_CNT0 0x1700
1181#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff)
1182#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000)
1183
1184/*
1185 * RX_STA_CNT1: RX False CCA count & RX LONG frame count
1186 */
1187#define RX_STA_CNT1 0x1704
1188#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff)
1189#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000)
1190
1191/*
1192 * RX_STA_CNT2:
1193 */
1194#define RX_STA_CNT2 0x1708
1195#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff)
1196#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000)
1197
1198/*
1199 * TX_STA_CNT0: TX Beacon count
1200 */
1201#define TX_STA_CNT0 0x170c
1202#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff)
1203#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000)
1204
1205/*
1206 * TX_STA_CNT1: TX tx count
1207 */
1208#define TX_STA_CNT1 0x1710
1209#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff)
1210#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000)
1211
1212/*
1213 * TX_STA_CNT2: TX tx count
1214 */
1215#define TX_STA_CNT2 0x1714
1216#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff)
1217#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1218
1219/*
1220 * TX_STA_FIFO: TX Result for specific PID status fifo register
1221 */
1222#define TX_STA_FIFO 0x1718
1223#define TX_STA_FIFO_VALID FIELD32(0x00000001)
1224#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
1225#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
1226#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
1227#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
1228#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
1229#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000)
1230
1231/*
1232 * TX_AGG_CNT: Debug counter
1233 */
1234#define TX_AGG_CNT 0x171c
1235#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff)
1236#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000)
1237
1238/*
1239 * TX_AGG_CNT0:
1240 */
1241#define TX_AGG_CNT0 0x1720
1242#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff)
1243#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000)
1244
1245/*
1246 * TX_AGG_CNT1:
1247 */
1248#define TX_AGG_CNT1 0x1724
1249#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff)
1250#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000)
1251
1252/*
1253 * TX_AGG_CNT2:
1254 */
1255#define TX_AGG_CNT2 0x1728
1256#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff)
1257#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000)
1258
1259/*
1260 * TX_AGG_CNT3:
1261 */
1262#define TX_AGG_CNT3 0x172c
1263#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff)
1264#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000)
1265
1266/*
1267 * TX_AGG_CNT4:
1268 */
1269#define TX_AGG_CNT4 0x1730
1270#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff)
1271#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000)
1272
1273/*
1274 * TX_AGG_CNT5:
1275 */
1276#define TX_AGG_CNT5 0x1734
1277#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff)
1278#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000)
1279
1280/*
1281 * TX_AGG_CNT6:
1282 */
1283#define TX_AGG_CNT6 0x1738
1284#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff)
1285#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000)
1286
1287/*
1288 * TX_AGG_CNT7:
1289 */
1290#define TX_AGG_CNT7 0x173c
1291#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff)
1292#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000)
1293
1294/*
1295 * MPDU_DENSITY_CNT:
1296 * TX_ZERO_DEL: TX zero length delimiter count
1297 * RX_ZERO_DEL: RX zero length delimiter count
1298 */
1299#define MPDU_DENSITY_CNT 0x1740
1300#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff)
1301#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000)
1302
1303/*
1304 * Security key table memory.
1305 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
1306 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
1307 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
1308 * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
1309 * SHARED_KEY_TABLE_BASE: 32 bytes * 32-entry
1310 * SHARED_KEY_MODE_BASE: 4 bits * 32-entry
1311 */
1312#define MAC_WCID_BASE 0x1800
1313#define PAIRWISE_KEY_TABLE_BASE 0x4000
1314#define MAC_IVEIV_TABLE_BASE 0x6000
1315#define MAC_WCID_ATTRIBUTE_BASE 0x6800
1316#define SHARED_KEY_TABLE_BASE 0x6c00
1317#define SHARED_KEY_MODE_BASE 0x7000
1318
1319#define MAC_WCID_ENTRY(__idx) \
1320 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
1321#define PAIRWISE_KEY_ENTRY(__idx) \
1322 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1323#define MAC_IVEIV_ENTRY(__idx) \
1324 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
1325#define MAC_WCID_ATTR_ENTRY(__idx) \
1326 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1327#define SHARED_KEY_ENTRY(__idx) \
1328 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1329#define SHARED_KEY_MODE_ENTRY(__idx) \
1330 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
1331
1332struct mac_wcid_entry {
1333 u8 mac[6];
1334 u8 reserved[2];
1335} __attribute__ ((packed));
1336
1337struct hw_key_entry {
1338 u8 key[16];
1339 u8 tx_mic[8];
1340 u8 rx_mic[8];
1341} __attribute__ ((packed));
1342
1343struct mac_iveiv_entry {
1344 u8 iv[8];
1345} __attribute__ ((packed));
1346
1347/*
1348 * MAC_WCID_ATTRIBUTE:
1349 */
1350#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001)
1351#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
1352#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
1353#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
1354
1355/*
1356 * SHARED_KEY_MODE:
1357 */
1358#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007)
1359#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070)
1360#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700)
1361#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000)
1362#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000)
1363#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000)
1364#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000)
1365#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000)
1366
1367/*
1368 * HOST-MCU communication
1369 */
1370
1371/*
1372 * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
1373 */
1374#define H2M_MAILBOX_CSR 0x7010
1375#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
1376#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00)
1377#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000)
1378#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000)
1379
1380/*
1381 * H2M_MAILBOX_CID:
1382 */
1383#define H2M_MAILBOX_CID 0x7014
1384#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
1385#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00)
1386#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000)
1387#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000)
1388
1389/*
1390 * H2M_MAILBOX_STATUS:
1391 */
1392#define H2M_MAILBOX_STATUS 0x701c
1393
1394/*
1395 * H2M_INT_SRC:
1396 */
1397#define H2M_INT_SRC 0x7024
1398
1399/*
1400 * H2M_BBP_AGENT:
1401 */
1402#define H2M_BBP_AGENT 0x7028
1403
1404/*
1405 * MCU_LEDCS: LED control for MCU Mailbox.
1406 */
1407#define MCU_LEDCS_LED_MODE FIELD8(0x1f)
1408#define MCU_LEDCS_POLARITY FIELD8(0x01)
1409
1410/*
1411 * HW_CS_CTS_BASE:
1412 * Carrier-sense CTS frame base address.
1413 * It's where mac stores carrier-sense frame for carrier-sense function.
1414 */
1415#define HW_CS_CTS_BASE 0x7700
1416
1417/*
1418 * HW_DFS_CTS_BASE:
1419 * FS CTS frame base address. It's where mac stores CTS frame for DFS.
1420 */
1421#define HW_DFS_CTS_BASE 0x7780
1422
1423/*
1424 * TXRX control registers - base address 0x3000
1425 */
1426
1427/*
1428 * TXRX_CSR1:
1429 * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first..
1430 */
1431#define TXRX_CSR1 0x77d0
1432
1433/*
1434 * HW_DEBUG_SETTING_BASE:
1435 * since NULL frame won't be that long (256 byte)
1436 * We steal 16 tail bytes to save debugging settings
1437 */
1438#define HW_DEBUG_SETTING_BASE 0x77f0
1439#define HW_DEBUG_SETTING_BASE2 0x7770
1440
1441/*
1442 * HW_BEACON_BASE
1443 * In order to support maximum 8 MBSS and its maximum length
1444 * is 512 bytes for each beacon
1445 * Three section discontinue memory segments will be used.
1446 * 1. The original region for BCN 0~3
1447 * 2. Extract memory from FCE table for BCN 4~5
1448 * 3. Extract memory from Pair-wise key table for BCN 6~7
1449 * It occupied those memory of wcid 238~253 for BCN 6
1450 * and wcid 222~237 for BCN 7
1451 *
1452 * IMPORTANT NOTE: Not sure why legacy driver does this,
1453 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
1454 */
1455#define HW_BEACON_BASE0 0x7800
1456#define HW_BEACON_BASE1 0x7a00
1457#define HW_BEACON_BASE2 0x7c00
1458#define HW_BEACON_BASE3 0x7e00
1459#define HW_BEACON_BASE4 0x7200
1460#define HW_BEACON_BASE5 0x7400
1461#define HW_BEACON_BASE6 0x5dc0
1462#define HW_BEACON_BASE7 0x5bc0
1463
1464#define HW_BEACON_OFFSET(__index) \
1465 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
1466 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
1467 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
1468
1469/*
1470 * 8051 firmware image. 72 * 8051 firmware image.
1471 */ 73 */
1472#define FIRMWARE_RT2870 "rt2870.bin" 74#define FIRMWARE_RT2870 "rt2870.bin"
1473#define FIRMWARE_IMAGE_BASE 0x3000 75#define FIRMWARE_IMAGE_BASE 0x3000
1474 76
1475/* 77/*
1476 * BBP registers.
1477 * The wordsize of the BBP is 8 bits.
1478 */
1479
1480/*
1481 * BBP 1: TX Antenna
1482 */
1483#define BBP1_TX_POWER FIELD8(0x07)
1484#define BBP1_TX_ANTENNA FIELD8(0x18)
1485
1486/*
1487 * BBP 3: RX Antenna
1488 */
1489#define BBP3_RX_ANTENNA FIELD8(0x18)
1490#define BBP3_HT40_PLUS FIELD8(0x20)
1491
1492/*
1493 * BBP 4: Bandwidth
1494 */
1495#define BBP4_TX_BF FIELD8(0x01)
1496#define BBP4_BANDWIDTH FIELD8(0x18)
1497
1498/*
1499 * RFCSR registers
1500 * The wordsize of the RFCSR is 8 bits.
1501 */
1502
1503/*
1504 * RFCSR 6:
1505 */
1506#define RFCSR6_R FIELD8(0x03)
1507
1508/*
1509 * RFCSR 7:
1510 */
1511#define RFCSR7_RF_TUNING FIELD8(0x01)
1512
1513/*
1514 * RFCSR 12:
1515 */
1516#define RFCSR12_TX_POWER FIELD8(0x1f)
1517
1518/*
1519 * RFCSR 22:
1520 */
1521#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
1522
1523/*
1524 * RFCSR 23:
1525 */
1526#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1527
1528/*
1529 * RFCSR 30:
1530 */
1531#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1532
1533/*
1534 * RF registers
1535 */
1536
1537/*
1538 * RF 2
1539 */
1540#define RF2_ANTENNA_RX2 FIELD32(0x00000040)
1541#define RF2_ANTENNA_TX1 FIELD32(0x00004000)
1542#define RF2_ANTENNA_RX1 FIELD32(0x00020000)
1543
1544/*
1545 * RF 3
1546 */
1547#define RF3_TXPOWER_G FIELD32(0x00003e00)
1548#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200)
1549#define RF3_TXPOWER_A FIELD32(0x00003c00)
1550
1551/*
1552 * RF 4
1553 */
1554#define RF4_TXPOWER_G FIELD32(0x000007c0)
1555#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040)
1556#define RF4_TXPOWER_A FIELD32(0x00000780)
1557#define RF4_FREQ_OFFSET FIELD32(0x001f8000)
1558#define RF4_HT40 FIELD32(0x00200000)
1559
1560/*
1561 * EEPROM content.
1562 * The wordsize of the EEPROM is 16 bits.
1563 */
1564
1565/*
1566 * EEPROM Version
1567 */
1568#define EEPROM_VERSION 0x0001
1569#define EEPROM_VERSION_FAE FIELD16(0x00ff)
1570#define EEPROM_VERSION_VERSION FIELD16(0xff00)
1571
1572/*
1573 * HW MAC address.
1574 */
1575#define EEPROM_MAC_ADDR_0 0x0002
1576#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
1577#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
1578#define EEPROM_MAC_ADDR_1 0x0003
1579#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
1580#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
1581#define EEPROM_MAC_ADDR_2 0x0004
1582#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
1583#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1584
1585/*
1586 * EEPROM ANTENNA config
1587 * RXPATH: 1: 1R, 2: 2R, 3: 3R
1588 * TXPATH: 1: 1T, 2: 2T
1589 */
1590#define EEPROM_ANTENNA 0x001a
1591#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
1592#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
1593#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
1594
1595/*
1596 * EEPROM NIC config
1597 * CARDBUS_ACCEL: 0 - enable, 1 - disable
1598 */
1599#define EEPROM_NIC 0x001b
1600#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
1601#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
1602#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
1603#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
1604#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
1605#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
1606#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
1607#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1608#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1609#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1610
1611/*
1612 * EEPROM frequency
1613 */
1614#define EEPROM_FREQ 0x001d
1615#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
1616#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
1617#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
1618
1619/*
1620 * EEPROM LED
1621 * POLARITY_RDY_G: Polarity RDY_G setting.
1622 * POLARITY_RDY_A: Polarity RDY_A setting.
1623 * POLARITY_ACT: Polarity ACT setting.
1624 * POLARITY_GPIO_0: Polarity GPIO0 setting.
1625 * POLARITY_GPIO_1: Polarity GPIO1 setting.
1626 * POLARITY_GPIO_2: Polarity GPIO2 setting.
1627 * POLARITY_GPIO_3: Polarity GPIO3 setting.
1628 * POLARITY_GPIO_4: Polarity GPIO4 setting.
1629 * LED_MODE: Led mode.
1630 */
1631#define EEPROM_LED1 0x001e
1632#define EEPROM_LED2 0x001f
1633#define EEPROM_LED3 0x0020
1634#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
1635#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
1636#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
1637#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008)
1638#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010)
1639#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020)
1640#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040)
1641#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080)
1642#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
1643
1644/*
1645 * EEPROM LNA
1646 */
1647#define EEPROM_LNA 0x0022
1648#define EEPROM_LNA_BG FIELD16(0x00ff)
1649#define EEPROM_LNA_A0 FIELD16(0xff00)
1650
1651/*
1652 * EEPROM RSSI BG offset
1653 */
1654#define EEPROM_RSSI_BG 0x0023
1655#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
1656#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
1657
1658/*
1659 * EEPROM RSSI BG2 offset
1660 */
1661#define EEPROM_RSSI_BG2 0x0024
1662#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
1663#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1664
1665/*
1666 * EEPROM RSSI A offset
1667 */
1668#define EEPROM_RSSI_A 0x0025
1669#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
1670#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
1671
1672/*
1673 * EEPROM RSSI A2 offset
1674 */
1675#define EEPROM_RSSI_A2 0x0026
1676#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
1677#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1678
1679/*
1680 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1681 * This is delta in 40MHZ.
1682 * VALUE: Tx Power dalta value (MAX=4)
1683 * TYPE: 1: Plus the delta value, 0: minus the delta value
1684 * TXPOWER: Enable:
1685 */
1686#define EEPROM_TXPOWER_DELTA 0x0028
1687#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
1688#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
1689#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
1690
1691/*
1692 * EEPROM TXPOWER 802.11BG
1693 */
1694#define EEPROM_TXPOWER_BG1 0x0029
1695#define EEPROM_TXPOWER_BG2 0x0030
1696#define EEPROM_TXPOWER_BG_SIZE 7
1697#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
1698#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
1699
1700/*
1701 * EEPROM TXPOWER 802.11A
1702 */
1703#define EEPROM_TXPOWER_A1 0x003c
1704#define EEPROM_TXPOWER_A2 0x0053
1705#define EEPROM_TXPOWER_A_SIZE 6
1706#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
1707#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
1708
1709/*
1710 * EEPROM TXpower byrate: 20MHZ power
1711 */
1712#define EEPROM_TXPOWER_BYRATE 0x006f
1713
1714/*
1715 * EEPROM BBP.
1716 */
1717#define EEPROM_BBP_START 0x0078
1718#define EEPROM_BBP_SIZE 16
1719#define EEPROM_BBP_VALUE FIELD16(0x00ff)
1720#define EEPROM_BBP_REG_ID FIELD16(0xff00)
1721
1722/*
1723 * MCU mailbox commands.
1724 */
1725#define MCU_SLEEP 0x30
1726#define MCU_WAKEUP 0x31
1727#define MCU_RADIO_OFF 0x35
1728#define MCU_CURRENT 0x36
1729#define MCU_LED 0x50
1730#define MCU_LED_STRENGTH 0x51
1731#define MCU_LED_1 0x52
1732#define MCU_LED_2 0x53
1733#define MCU_LED_3 0x54
1734#define MCU_RADAR 0x60
1735#define MCU_BOOT_SIGNAL 0x72
1736#define MCU_BBP_SIGNAL 0x80
1737#define MCU_POWER_SAVE 0x83
1738
1739/*
1740 * MCU mailbox tokens
1741 */
1742#define TOKEN_WAKUP 3
1743
1744/*
1745 * DMA descriptor defines. 78 * DMA descriptor defines.
1746 */ 79 */
1747#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
1748#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
1749#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) ) 81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
1750#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
1751#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1752
1753/*
1754 * TX descriptor format for TX, PRIO and Beacon Ring.
1755 */
1756
1757/*
1758 * Word0
1759 */
1760#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
1761
1762/*
1763 * Word1
1764 */
1765#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
1766#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
1767#define TXD_W1_BURST FIELD32(0x00008000)
1768#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
1769#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
1770#define TXD_W1_DMA_DONE FIELD32(0x80000000)
1771
1772/*
1773 * Word2
1774 */
1775#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
1776
1777/*
1778 * Word3
1779 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
1780 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
1781 * 0:MGMT, 1:HCCA 2:EDCA
1782 */
1783#define TXD_W3_WIV FIELD32(0x01000000)
1784#define TXD_W3_QSEL FIELD32(0x06000000)
1785#define TXD_W3_TCO FIELD32(0x20000000)
1786#define TXD_W3_UCO FIELD32(0x40000000)
1787#define TXD_W3_ICO FIELD32(0x80000000)
1788 82
1789/* 83/*
1790 * TX Info structure 84 * TX Info structure
@@ -1807,52 +101,6 @@ struct mac_iveiv_entry {
1807#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000) 101#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
1808 102
1809/* 103/*
1810 * TX WI structure
1811 */
1812
1813/*
1814 * Word0
1815 * FRAG: 1 To inform TKIP engine this is a fragment.
1816 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
1817 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1818 * BW: Channel bandwidth 20MHz or 40 MHz
1819 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1820 */
1821#define TXWI_W0_FRAG FIELD32(0x00000001)
1822#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
1823#define TXWI_W0_CF_ACK FIELD32(0x00000004)
1824#define TXWI_W0_TS FIELD32(0x00000008)
1825#define TXWI_W0_AMPDU FIELD32(0x00000010)
1826#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0)
1827#define TXWI_W0_TX_OP FIELD32(0x00000300)
1828#define TXWI_W0_MCS FIELD32(0x007f0000)
1829#define TXWI_W0_BW FIELD32(0x00800000)
1830#define TXWI_W0_SHORT_GI FIELD32(0x01000000)
1831#define TXWI_W0_STBC FIELD32(0x06000000)
1832#define TXWI_W0_IFS FIELD32(0x08000000)
1833#define TXWI_W0_PHYMODE FIELD32(0xc0000000)
1834
1835/*
1836 * Word1
1837 */
1838#define TXWI_W1_ACK FIELD32(0x00000001)
1839#define TXWI_W1_NSEQ FIELD32(0x00000002)
1840#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc)
1841#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
1842#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1843#define TXWI_W1_PACKETID FIELD32(0xf0000000)
1844
1845/*
1846 * Word2
1847 */
1848#define TXWI_W2_IV FIELD32(0xffffffff)
1849
1850/*
1851 * Word3
1852 */
1853#define TXWI_W3_EIV FIELD32(0xffffffff)
1854
1855/*
1856 * RX descriptor format for RX Ring. 104 * RX descriptor format for RX Ring.
1857 */ 105 */
1858 106
@@ -1867,85 +115,25 @@ struct mac_iveiv_entry {
1867 * AMSDU: rx with 802.3 header, not 802.11 header. 115 * AMSDU: rx with 802.3 header, not 802.11 header.
1868 */ 116 */
1869 117
1870#define RXD_W0_BA FIELD32(0x00000001) 118#define RXINFO_W0_BA FIELD32(0x00000001)
1871#define RXD_W0_DATA FIELD32(0x00000002) 119#define RXINFO_W0_DATA FIELD32(0x00000002)
1872#define RXD_W0_NULLDATA FIELD32(0x00000004) 120#define RXINFO_W0_NULLDATA FIELD32(0x00000004)
1873#define RXD_W0_FRAG FIELD32(0x00000008) 121#define RXINFO_W0_FRAG FIELD32(0x00000008)
1874#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010) 122#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010)
1875#define RXD_W0_MULTICAST FIELD32(0x00000020) 123#define RXINFO_W0_MULTICAST FIELD32(0x00000020)
1876#define RXD_W0_BROADCAST FIELD32(0x00000040) 124#define RXINFO_W0_BROADCAST FIELD32(0x00000040)
1877#define RXD_W0_MY_BSS FIELD32(0x00000080) 125#define RXINFO_W0_MY_BSS FIELD32(0x00000080)
1878#define RXD_W0_CRC_ERROR FIELD32(0x00000100) 126#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100)
1879#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600) 127#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600)
1880#define RXD_W0_AMSDU FIELD32(0x00000800) 128#define RXINFO_W0_AMSDU FIELD32(0x00000800)
1881#define RXD_W0_HTC FIELD32(0x00001000) 129#define RXINFO_W0_HTC FIELD32(0x00001000)
1882#define RXD_W0_RSSI FIELD32(0x00002000) 130#define RXINFO_W0_RSSI FIELD32(0x00002000)
1883#define RXD_W0_L2PAD FIELD32(0x00004000) 131#define RXINFO_W0_L2PAD FIELD32(0x00004000)
1884#define RXD_W0_AMPDU FIELD32(0x00008000) 132#define RXINFO_W0_AMPDU FIELD32(0x00008000)
1885#define RXD_W0_DECRYPTED FIELD32(0x00010000) 133#define RXINFO_W0_DECRYPTED FIELD32(0x00010000)
1886#define RXD_W0_PLCP_RSSI FIELD32(0x00020000) 134#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000)
1887#define RXD_W0_CIPHER_ALG FIELD32(0x00040000) 135#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000)
1888#define RXD_W0_LAST_AMSDU FIELD32(0x00080000) 136#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000)
1889#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000) 137#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000)
1890
1891/*
1892 * RX WI structure
1893 */
1894
1895/*
1896 * Word0
1897 */
1898#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
1899#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
1900#define RXWI_W0_BSSID FIELD32(0x00001c00)
1901#define RXWI_W0_UDF FIELD32(0x0000e000)
1902#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1903#define RXWI_W0_TID FIELD32(0xf0000000)
1904
1905/*
1906 * Word1
1907 */
1908#define RXWI_W1_FRAG FIELD32(0x0000000f)
1909#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
1910#define RXWI_W1_MCS FIELD32(0x007f0000)
1911#define RXWI_W1_BW FIELD32(0x00800000)
1912#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
1913#define RXWI_W1_STBC FIELD32(0x06000000)
1914#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
1915
1916/*
1917 * Word2
1918 */
1919#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
1920#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
1921#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
1922
1923/*
1924 * Word3
1925 */
1926#define RXWI_W3_SNR0 FIELD32(0x000000ff)
1927#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
1928
1929/*
1930 * Macros for converting txpower from EEPROM to mac80211 value
1931 * and from mac80211 value to register value.
1932 */
1933#define MIN_G_TXPOWER 0
1934#define MIN_A_TXPOWER -7
1935#define MAX_G_TXPOWER 31
1936#define MAX_A_TXPOWER 15
1937#define DEFAULT_TXPOWER 5
1938
1939#define TXPOWER_G_FROM_DEV(__txpower) \
1940 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1941
1942#define TXPOWER_G_TO_DEV(__txpower) \
1943 clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
1944
1945#define TXPOWER_A_FROM_DEV(__txpower) \
1946 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1947
1948#define TXPOWER_A_TO_DEV(__txpower) \
1949 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
1950 138
1951#endif /* RT2800USB_H */ 139#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 27bc6b7fbfde..1cbb7ac2f32f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -144,6 +145,11 @@ struct avg_val {
144 int avg_weight; 145 int avg_weight;
145}; 146};
146 147
148enum rt2x00_chip_intf {
149 RT2X00_CHIP_INTF_PCI,
150 RT2X00_CHIP_INTF_USB,
151};
152
147/* 153/*
148 * Chipset identification 154 * Chipset identification
149 * The chipset on the device is composed of a RT and RF chip. 155 * The chipset on the device is composed of a RT and RF chip.
@@ -158,10 +164,20 @@ struct rt2x00_chip {
158#define RT2561 0x0302 164#define RT2561 0x0302
159#define RT2661 0x0401 165#define RT2661 0x0401
160#define RT2571 0x1300 166#define RT2571 0x1300
167#define RT2860 0x0601 /* 2.4GHz PCI/CB */
168#define RT2860D 0x0681 /* 2.4GHz, 5GHz PCI/CB */
169#define RT2890 0x0701 /* 2.4GHz PCIe */
170#define RT2890D 0x0781 /* 2.4GHz, 5GHz PCIe */
171#define RT2880 0x2880 /* WSOC */
172#define RT3052 0x3052 /* WSOC */
173#define RT3090 0x3090 /* 2.4GHz PCIe */
161#define RT2870 0x1600 174#define RT2870 0x1600
175#define RT3070 0x1800
162 176
163 u16 rf; 177 u16 rf;
164 u32 rev; 178 u32 rev;
179
180 enum rt2x00_chip_intf intf;
165}; 181};
166 182
167/* 183/*
@@ -299,13 +315,6 @@ struct link {
299 struct avg_val avg_rssi; 315 struct avg_val avg_rssi;
300 316
301 /* 317 /*
302 * Currently precalculated percentages of successful
303 * TX and RX frames.
304 */
305 int rx_percentage;
306 int tx_percentage;
307
308 /*
309 * Work structure for scheduling periodic link tuning. 318 * Work structure for scheduling periodic link tuning.
310 */ 319 */
311 struct delayed_work work; 320 struct delayed_work work;
@@ -835,9 +844,23 @@ struct rt2x00_dev {
835 * Firmware image. 844 * Firmware image.
836 */ 845 */
837 const struct firmware *fw; 846 const struct firmware *fw;
847
848 /*
849 * Driver specific data.
850 */
851 void *priv;
838}; 852};
839 853
840/* 854/*
855 * Register defines.
856 * Some registers require multiple attempts before success,
857 * in those cases REGISTER_BUSY_COUNT attempts should be
858 * taken with a REGISTER_BUSY_DELAY interval.
859 */
860#define REGISTER_BUSY_COUNT 5
861#define REGISTER_BUSY_DELAY 100
862
863/*
841 * Generic RF access. 864 * Generic RF access.
842 * The RF is being accessed by word index. 865 * The RF is being accessed by word index.
843 */ 866 */
@@ -883,10 +906,6 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
883static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev, 906static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
884 const u16 rt, const u16 rf, const u32 rev) 907 const u16 rt, const u16 rf, const u32 rev)
885{ 908{
886 INFO(rt2x00dev,
887 "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n",
888 rt, rf, rev);
889
890 rt2x00dev->chip.rt = rt; 909 rt2x00dev->chip.rt = rt;
891 rt2x00dev->chip.rf = rf; 910 rt2x00dev->chip.rf = rf;
892 rt2x00dev->chip.rev = rev; 911 rt2x00dev->chip.rev = rev;
@@ -904,6 +923,13 @@ static inline void rt2x00_set_chip_rf(struct rt2x00_dev *rt2x00dev,
904 rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev); 923 rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev);
905} 924}
906 925
926static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
927{
928 INFO(rt2x00dev,
929 "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n",
930 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
931}
932
907static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip) 933static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
908{ 934{
909 return (chipset->rt == chip); 935 return (chipset->rt == chip);
@@ -925,6 +951,28 @@ static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
925 return ((chipset->rev & mask) == rev); 951 return ((chipset->rev & mask) == rev);
926} 952}
927 953
954static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
955 enum rt2x00_chip_intf intf)
956{
957 rt2x00dev->chip.intf = intf;
958}
959
960static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
961 enum rt2x00_chip_intf intf)
962{
963 return (chipset->intf == intf);
964}
965
966static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
967{
968 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
969}
970
971static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
972{
973 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
974}
975
928/** 976/**
929 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. 977 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
930 * @rt2x00dev: Pointer to &struct rt2x00_dev. 978 * @rt2x00dev: Pointer to &struct rt2x00_dev.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 40a201e2e151..098315a271ca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index de36837dcf86..d291c7862e10 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 68bc9bb1dbf9..7d323a763b54 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index 035cbc98c593..fa11409cb5c6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 73bbec58341e..4a4b7e42fe6e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -205,6 +205,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
205 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); 205 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb);
206 unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 206 unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
207 u8 rate_idx, rate_flags, retry_rates; 207 u8 rate_idx, rate_flags, retry_rates;
208 u8 skbdesc_flags = skbdesc->flags;
208 unsigned int i; 209 unsigned int i;
209 bool success; 210 bool success;
210 211
@@ -287,12 +288,12 @@ void rt2x00lib_txdone(struct queue_entry *entry,
287 } 288 }
288 289
289 /* 290 /*
290 * Only send the status report to mac80211 when TX status was 291 * Only send the status report to mac80211 when it's a frame
291 * requested by it. If this was a extra frame coming through 292 * that originated in mac80211. If this was a extra frame coming
292 * a mac80211 library call (RTS/CTS) then we should not send the 293 * through a mac80211 library call (RTS/CTS) then we should not
293 * status report back. 294 * send the status report back.
294 */ 295 */
295 if (tx_info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) 296 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211))
296 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb); 297 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb);
297 else 298 else
298 dev_kfree_skb_irq(entry->skb); 299 dev_kfree_skb_irq(entry->skb);
@@ -430,7 +431,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
430 431
431 rx_status->mactime = rxdesc.timestamp; 432 rx_status->mactime = rxdesc.timestamp;
432 rx_status->rate_idx = rate_idx; 433 rx_status->rate_idx = rate_idx;
433 rx_status->qual = rt2x00link_calculate_signal(rt2x00dev, rxdesc.rssi);
434 rx_status->signal = rxdesc.rssi; 434 rx_status->signal = rxdesc.rssi;
435 rx_status->noise = rxdesc.noise; 435 rx_status->noise = rxdesc.noise;
436 rx_status->flag = rxdesc.flags; 436 rx_status->flag = rxdesc.flags;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index fdedb5122928..727019a748e7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index d2deea2f2679..34beb00c4347 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index e3cec839e540..1056c92143a8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index 49671fed91d7..ca585e34d00e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 1046977e6a12..3b46f0c3332a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -33,8 +33,6 @@ enum led_type {
33 LED_TYPE_QUALITY, 33 LED_TYPE_QUALITY,
34}; 34};
35 35
36#ifdef CONFIG_RT2X00_LIB_LEDS
37
38struct rt2x00_led { 36struct rt2x00_led {
39 struct rt2x00_dev *rt2x00dev; 37 struct rt2x00_dev *rt2x00dev;
40 struct led_classdev led_dev; 38 struct led_classdev led_dev;
@@ -45,6 +43,4 @@ struct rt2x00_led {
45#define LED_REGISTERED ( 1 << 1 ) 43#define LED_REGISTERED ( 1 << 1 )
46}; 44};
47 45
48#endif /* CONFIG_RT2X00_LIB_LEDS */
49
50#endif /* RT2X00LEDS_H */ 46#endif /* RT2X00LEDS_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 567f029a8cda..be2e37fb4071 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -161,8 +162,10 @@ void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length);
161 * rt2x00queue_write_tx_frame - Write TX frame to hardware 162 * rt2x00queue_write_tx_frame - Write TX frame to hardware
162 * @queue: Queue over which the frame should be send 163 * @queue: Queue over which the frame should be send
163 * @skb: The skb to send 164 * @skb: The skb to send
165 * @local: frame is not from mac80211
164 */ 166 */
165int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb); 167int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
168 bool local);
166 169
167/** 170/**
168 * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware 171 * rt2x00queue_update_beacon - Send new beacon from mac80211 to hardware
@@ -223,19 +226,6 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
223 struct rxdone_entry_desc *rxdesc); 226 struct rxdone_entry_desc *rxdesc);
224 227
225/** 228/**
226 * rt2x00link_calculate_signal - Calculate signal quality
227 * @rt2x00dev: Pointer to &struct rt2x00_dev.
228 * @rssi: RX Frame RSSI
229 *
230 * Calculate the signal quality of a frame based on the rssi
231 * measured during the receiving of the frame and the global
232 * link quality statistics measured since the start of the
233 * link tuning. The result is a value between 0 and 100 which
234 * is an indication of the signal quality.
235 */
236int rt2x00link_calculate_signal(struct rt2x00_dev *rt2x00dev, int rssi);
237
238/**
239 * rt2x00link_start_tuner - Start periodic link tuner work 229 * rt2x00link_start_tuner - Start periodic link tuner work
240 * @rt2x00dev: Pointer to &struct rt2x00_dev. 230 * @rt2x00dev: Pointer to &struct rt2x00_dev.
241 * 231 *
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index c708d0be9155..0efbf5a6c254 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -36,24 +36,6 @@
36#define DEFAULT_RSSI -128 36#define DEFAULT_RSSI -128
37 37
38/* 38/*
39 * When no TX/RX percentage could be calculated due to lack of
40 * frames on the air, we fallback to a percentage of 50%.
41 * This will assure we will get at least get some decent value
42 * when the link tuner starts.
43 * The value will be dropped and overwritten with the correct (measured)
44 * value anyway during the first run of the link tuner.
45 */
46#define DEFAULT_PERCENTAGE 50
47
48/*
49 * Small helper macro for percentage calculation
50 * This is a very simple macro with the only catch that it will
51 * produce a default value in case no total value was provided.
52 */
53#define PERCENTAGE(__value, __total) \
54 ( (__total) ? (((__value) * 100) / (__total)) : (DEFAULT_PERCENTAGE) )
55
56/*
57 * Helper struct and macro to work with moving/walking averages. 39 * Helper struct and macro to work with moving/walking averages.
58 * When adding a value to the average value the following calculation 40 * When adding a value to the average value the following calculation
59 * is needed: 41 * is needed:
@@ -91,27 +73,6 @@
91 __new; \ 73 __new; \
92}) 74})
93 75
94/*
95 * For calculating the Signal quality we have determined
96 * the total number of success and failed RX and TX frames.
97 * With the addition of the average RSSI value we can determine
98 * the link quality using the following algorithm:
99 *
100 * rssi_percentage = (avg_rssi * 100) / rssi_offset
101 * rx_percentage = (rx_success * 100) / rx_total
102 * tx_percentage = (tx_success * 100) / tx_total
103 * avg_signal = ((WEIGHT_RSSI * avg_rssi) +
104 * (WEIGHT_TX * tx_percentage) +
105 * (WEIGHT_RX * rx_percentage)) / 100
106 *
107 * This value should then be checked to not be greater then 100.
108 * This means the values of WEIGHT_RSSI, WEIGHT_RX, WEIGHT_TX must
109 * sum up to 100 as well.
110 */
111#define WEIGHT_RSSI 20
112#define WEIGHT_RX 40
113#define WEIGHT_TX 40
114
115static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev) 76static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev)
116{ 77{
117 struct link_ant *ant = &rt2x00dev->link.ant; 78 struct link_ant *ant = &rt2x00dev->link.ant;
@@ -304,46 +265,6 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
304 ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi); 265 ant->rssi_ant = MOVING_AVERAGE(ant->rssi_ant, rxdesc->rssi);
305} 266}
306 267
307static void rt2x00link_precalculate_signal(struct rt2x00_dev *rt2x00dev)
308{
309 struct link *link = &rt2x00dev->link;
310 struct link_qual *qual = &rt2x00dev->link.qual;
311
312 link->rx_percentage =
313 PERCENTAGE(qual->rx_success, qual->rx_failed + qual->rx_success);
314 link->tx_percentage =
315 PERCENTAGE(qual->tx_success, qual->tx_failed + qual->tx_success);
316}
317
318int rt2x00link_calculate_signal(struct rt2x00_dev *rt2x00dev, int rssi)
319{
320 struct link *link = &rt2x00dev->link;
321 int rssi_percentage = 0;
322 int signal;
323
324 /*
325 * We need a positive value for the RSSI.
326 */
327 if (rssi < 0)
328 rssi += rt2x00dev->rssi_offset;
329
330 /*
331 * Calculate the different percentages,
332 * which will be used for the signal.
333 */
334 rssi_percentage = PERCENTAGE(rssi, rt2x00dev->rssi_offset);
335
336 /*
337 * Add the individual percentages and use the WEIGHT
338 * defines to calculate the current link signal.
339 */
340 signal = ((WEIGHT_RSSI * rssi_percentage) +
341 (WEIGHT_TX * link->tx_percentage) +
342 (WEIGHT_RX * link->rx_percentage)) / 100;
343
344 return max_t(int, signal, 100);
345}
346
347void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) 268void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
348{ 269{
349 struct link *link = &rt2x00dev->link; 270 struct link *link = &rt2x00dev->link;
@@ -357,9 +278,6 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
357 if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) 278 if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)
358 return; 279 return;
359 280
360 link->rx_percentage = DEFAULT_PERCENTAGE;
361 link->tx_percentage = DEFAULT_PERCENTAGE;
362
363 rt2x00link_reset_tuner(rt2x00dev, false); 281 rt2x00link_reset_tuner(rt2x00dev, false);
364 282
365 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 283 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
@@ -448,12 +366,6 @@ static void rt2x00link_tuner(struct work_struct *work)
448 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); 366 rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count);
449 367
450 /* 368 /*
451 * Precalculate a portion of the link signal which is
452 * in based on the tx/rx success/failure counters.
453 */
454 rt2x00link_precalculate_signal(rt2x00dev);
455
456 /*
457 * Send a signal to the led to update the led signal strength. 369 * Send a signal to the led to update the led signal strength.
458 */ 370 */
459 rt2x00leds_led_quality(rt2x00dev, qual->rssi); 371 rt2x00leds_led_quality(rt2x00dev, qual->rssi);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 929b85f34f38..9c90ceb0ffcc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -66,7 +66,6 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
66 rts_info = IEEE80211_SKB_CB(skb); 66 rts_info = IEEE80211_SKB_CB(skb);
67 rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; 67 rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
68 rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT; 68 rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;
69 rts_info->flags &= ~IEEE80211_TX_CTL_REQ_TX_STATUS;
70 69
71 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) 70 if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
72 rts_info->flags |= IEEE80211_TX_CTL_NO_ACK; 71 rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
@@ -91,7 +90,7 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
91 frag_skb->data, data_length, tx_info, 90 frag_skb->data, data_length, tx_info,
92 (struct ieee80211_rts *)(skb->data)); 91 (struct ieee80211_rts *)(skb->data));
93 92
94 retval = rt2x00queue_write_tx_frame(queue, skb); 93 retval = rt2x00queue_write_tx_frame(queue, skb, true);
95 if (retval) { 94 if (retval) {
96 dev_kfree_skb_any(skb); 95 dev_kfree_skb_any(skb);
97 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 96 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
@@ -153,7 +152,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
153 goto exit_fail; 152 goto exit_fail;
154 } 153 }
155 154
156 if (rt2x00queue_write_tx_frame(queue, skb)) 155 if (rt2x00queue_write_tx_frame(queue, skb, false))
157 goto exit_fail; 156 goto exit_fail;
158 157
159 if (rt2x00queue_threshold(queue)) 158 if (rt2x00queue_threshold(queue))
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cdd5154bd4c0..0feb4d0e4668 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -310,6 +310,8 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
310 rt2x00dev->irq = pci_dev->irq; 310 rt2x00dev->irq = pci_dev->irq;
311 rt2x00dev->name = pci_name(pci_dev); 311 rt2x00dev->name = pci_name(pci_dev);
312 312
313 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
314
313 /* 315 /*
314 * Determine RT chipset by reading PCI header. 316 * Determine RT chipset by reading PCI header.
315 */ 317 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 15a12487e04b..d4f9449ab0a4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -35,15 +35,6 @@
35#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) 35#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
36 36
37/* 37/*
38 * Register defines.
39 * Some registers require multiple attempts before success,
40 * in those cases REGISTER_BUSY_COUNT attempts should be
41 * taken with a REGISTER_BUSY_DELAY interval.
42 */
43#define REGISTER_BUSY_COUNT 5
44#define REGISTER_BUSY_DELAY 100
45
46/*
47 * Register access. 38 * Register access.
48 */ 39 */
49static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, 40static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
@@ -53,10 +44,9 @@ static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
53 *value = readl(rt2x00dev->csr.base + offset); 44 *value = readl(rt2x00dev->csr.base + offset);
54} 45}
55 46
56static inline void 47static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
57rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, 48 const unsigned int offset,
58 const unsigned int offset, 49 void *value, const u32 length)
59 void *value, const u16 length)
60{ 50{
61 memcpy_fromio(value, rt2x00dev->csr.base + offset, length); 51 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
62} 52}
@@ -68,10 +58,10 @@ static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
68 writel(value, rt2x00dev->csr.base + offset); 58 writel(value, rt2x00dev->csr.base + offset);
69} 59}
70 60
71static inline void 61static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
72rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, 62 const unsigned int offset,
73 const unsigned int offset, 63 const void *value,
74 const void *value, const u16 length) 64 const u32 length)
75{ 65{
76 memcpy_toio(rt2x00dev->csr.base + offset, value, length); 66 memcpy_toio(rt2x00dev->csr.base + offset, value, length);
77} 67}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 577029efe320..eaedee8c05c8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -1,5 +1,6 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Gertjan van Wingerde <gwingerde@gmail.com>
3 <http://rt2x00.serialmonkey.com> 4 <http://rt2x00.serialmonkey.com>
4 5
5 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
@@ -453,7 +454,8 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
453 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); 454 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
454} 455}
455 456
456int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb) 457int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
458 bool local)
457{ 459{
458 struct ieee80211_tx_info *tx_info; 460 struct ieee80211_tx_info *tx_info;
459 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX); 461 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
@@ -494,6 +496,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
494 skbdesc->tx_rate_idx = rate_idx; 496 skbdesc->tx_rate_idx = rate_idx;
495 skbdesc->tx_rate_flags = rate_flags; 497 skbdesc->tx_rate_flags = rate_flags;
496 498
499 if (local)
500 skbdesc->flags |= SKBDESC_NOT_MAC80211;
501
497 /* 502 /*
498 * When hardware encryption is supported, and this frame 503 * When hardware encryption is supported, and this frame
499 * is to be encrypted, we should strip the IV/EIV data from 504 * is to be encrypted, we should strip the IV/EIV data from
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index a5591fb2b191..70775e5ba1ac 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -94,12 +94,15 @@ enum data_queue_qid {
94 * mac80211 but was stripped for processing by the driver. 94 * mac80211 but was stripped for processing by the driver.
95 * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment, 95 * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
96 * the padded bytes are located between header and payload. 96 * the padded bytes are located between header and payload.
97 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
98 * don't try to pass it back.
97 */ 99 */
98enum skb_frame_desc_flags { 100enum skb_frame_desc_flags {
99 SKBDESC_DMA_MAPPED_RX = 1 << 0, 101 SKBDESC_DMA_MAPPED_RX = 1 << 0,
100 SKBDESC_DMA_MAPPED_TX = 1 << 1, 102 SKBDESC_DMA_MAPPED_TX = 1 << 1,
101 SKBDESC_IV_STRIPPED = 1 << 2, 103 SKBDESC_IV_STRIPPED = 1 << 2,
102 SKBDESC_L2_PADDED = 1 << 3 104 SKBDESC_L2_PADDED = 1 << 3,
105 SKBDESC_NOT_MAC80211 = 1 << 4,
103}; 106};
104 107
105/** 108/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 983e52e127a7..603bfc0adaa3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
new file mode 100644
index 000000000000..19e684f8ffa1
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -0,0 +1,165 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 Copyright (C) 2004 - 2009 Felix Fietkau <nbd@openwrt.org>
4 <http://rt2x00.serialmonkey.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the
18 Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22/*
23 Module: rt2x00soc
24 Abstract: rt2x00 generic soc device routines.
25 */
26
27#include <linux/bug.h>
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/platform_device.h>
31
32#include "rt2x00.h"
33#include "rt2x00soc.h"
34
35static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
36{
37 kfree(rt2x00dev->rf);
38 rt2x00dev->rf = NULL;
39
40 kfree(rt2x00dev->eeprom);
41 rt2x00dev->eeprom = NULL;
42}
43
44static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
45{
46 struct platform_device *pdev = to_platform_device(rt2x00dev->dev);
47 struct resource *res;
48
49 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!res)
51 return -ENODEV;
52
53 rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start);
54 if (!rt2x00dev->csr.base)
55 goto exit;
56
57 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
58 if (!rt2x00dev->eeprom)
59 goto exit;
60
61 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
62 if (!rt2x00dev->rf)
63 goto exit;
64
65 return 0;
66
67exit:
68 ERROR_PROBE("Failed to allocate registers.\n");
69 rt2x00soc_free_reg(rt2x00dev);
70
71 return -ENOMEM;
72}
73
74int rt2x00soc_probe(struct platform_device *pdev,
75 const unsigned short chipset,
76 const struct rt2x00_ops *ops)
77{
78 struct ieee80211_hw *hw;
79 struct rt2x00_dev *rt2x00dev;
80 int retval;
81
82 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
83 if (!hw) {
84 ERROR_PROBE("Failed to allocate hardware.\n");
85 return -ENOMEM;
86 }
87
88 platform_set_drvdata(pdev, hw);
89
90 rt2x00dev = hw->priv;
91 rt2x00dev->dev = &pdev->dev;
92 rt2x00dev->ops = ops;
93 rt2x00dev->hw = hw;
94 rt2x00dev->irq = platform_get_irq(pdev, 0);
95 rt2x00dev->name = pdev->dev.driver->name;
96
97 /*
98 * SoC devices mimic PCI behavior.
99 */
100 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
101
102 rt2x00_set_chip_rt(rt2x00dev, chipset);
103
104 retval = rt2x00soc_alloc_reg(rt2x00dev);
105 if (retval)
106 goto exit_free_device;
107
108 retval = rt2x00lib_probe_dev(rt2x00dev);
109 if (retval)
110 goto exit_free_reg;
111
112 return 0;
113
114exit_free_reg:
115 rt2x00soc_free_reg(rt2x00dev);
116
117exit_free_device:
118 ieee80211_free_hw(hw);
119
120 return retval;
121}
122
123int rt2x00soc_remove(struct platform_device *pdev)
124{
125 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
126 struct rt2x00_dev *rt2x00dev = hw->priv;
127
128 /*
129 * Free all allocated data.
130 */
131 rt2x00lib_remove_dev(rt2x00dev);
132 rt2x00soc_free_reg(rt2x00dev);
133 ieee80211_free_hw(hw);
134
135 return 0;
136}
137EXPORT_SYMBOL_GPL(rt2x00soc_remove);
138
139#ifdef CONFIG_PM
140int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
141{
142 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
143 struct rt2x00_dev *rt2x00dev = hw->priv;
144
145 return rt2x00lib_suspend(rt2x00dev, state);
146}
147EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
148
149int rt2x00soc_resume(struct platform_device *pdev)
150{
151 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
152 struct rt2x00_dev *rt2x00dev = hw->priv;
153
154 return rt2x00lib_resume(rt2x00dev);
155}
156EXPORT_SYMBOL_GPL(rt2x00soc_resume);
157#endif /* CONFIG_PM */
158
159/*
160 * rt2x00soc module information.
161 */
162MODULE_AUTHOR(DRV_PROJECT);
163MODULE_VERSION(DRV_VERSION);
164MODULE_DESCRIPTION("rt2x00 soc library");
165MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
new file mode 100644
index 000000000000..8a3416624af5
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -0,0 +1,52 @@
1/*
2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00soc
23 Abstract: Data structures for the rt2x00soc module.
24 */
25
26#ifndef RT2X00SOC_H
27#define RT2X00SOC_H
28
29#define KSEG1ADDR(__ptr) __ptr
30
31#define __rt2x00soc_probe(__chipset, __ops) \
32static int __rt2x00soc_probe(struct platform_device *pdev) \
33{ \
34 return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
35}
36
37/*
38 * SoC driver handlers.
39 */
40int rt2x00soc_probe(struct platform_device *pdev,
41 const unsigned short chipset,
42 const struct rt2x00_ops *ops);
43int rt2x00soc_remove(struct platform_device *pdev);
44#ifdef CONFIG_PM
45int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
46int rt2x00soc_resume(struct platform_device *pdev);
47#else
48#define rt2x00soc_suspend NULL
49#define rt2x00soc_resume NULL
50#endif /* CONFIG_PM */
51
52#endif /* RT2X00SOC_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f02b48a90593..0a751e73aa0f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
160 160
161int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, 161int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
162 const unsigned int offset, 162 const unsigned int offset,
163 struct rt2x00_field32 field, 163 const struct rt2x00_field32 field,
164 u32 *reg) 164 u32 *reg)
165{ 165{
166 unsigned int i; 166 unsigned int i;
@@ -653,6 +653,8 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
653 rt2x00dev->ops = ops; 653 rt2x00dev->ops = ops;
654 rt2x00dev->hw = hw; 654 rt2x00dev->hw = hw;
655 655
656 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
657
656 retval = rt2x00usb_alloc_reg(rt2x00dev); 658 retval = rt2x00usb_alloc_reg(rt2x00dev);
657 if (retval) 659 if (retval)
658 goto exit_free_device; 660 goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index bd2d59c85f1b..3da6841b5d42 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -26,6 +26,8 @@
26#ifndef RT2X00USB_H 26#ifndef RT2X00USB_H
27#define RT2X00USB_H 27#define RT2X00USB_H
28 28
29#include <linux/usb.h>
30
29#define to_usb_device_intf(d) \ 31#define to_usb_device_intf(d) \
30({ \ 32({ \
31 struct usb_interface *intf = to_usb_interface(d); \ 33 struct usb_interface *intf = to_usb_interface(d); \
@@ -39,17 +41,11 @@
39#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) 41#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
40 42
41/* 43/*
42 * Register defines.
43 * Some registers require multiple attempts before success,
44 * in those cases REGISTER_BUSY_COUNT attempts should be
45 * taken with a REGISTER_BUSY_DELAY interval.
46 * For USB vendor requests we need to pass a timeout 44 * For USB vendor requests we need to pass a timeout
47 * time in ms, for this we use the REGISTER_TIMEOUT, 45 * time in ms, for this we use the REGISTER_TIMEOUT,
48 * however when loading firmware a higher value is 46 * however when loading firmware a higher value is
49 * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE. 47 * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE.
50 */ 48 */
51#define REGISTER_BUSY_COUNT 5
52#define REGISTER_BUSY_DELAY 100
53#define REGISTER_TIMEOUT 500 49#define REGISTER_TIMEOUT 500
54#define REGISTER_TIMEOUT_FIRMWARE 1000 50#define REGISTER_TIMEOUT_FIRMWARE 1000
55 51
@@ -232,7 +228,7 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
232} 228}
233 229
234/** 230/**
235 * rt2x00usb_regbusy_read - Read 32bit register word 231 * rt2x00usb_register_read - Read 32bit register word
236 * @rt2x00dev: Device pointer, see &struct rt2x00_dev. 232 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
237 * @offset: Register offset 233 * @offset: Register offset
238 * @value: Pointer to where register contents should be stored 234 * @value: Pointer to where register contents should be stored
@@ -340,12 +336,13 @@ static inline void rt2x00usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
340 * through rt2x00usb_vendor_request_buff(). 336 * through rt2x00usb_vendor_request_buff().
341 */ 337 */
342static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, 338static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
343 const unsigned int offset, 339 const unsigned int offset,
344 void *value, const u32 length) 340 const void *value,
341 const u32 length)
345{ 342{
346 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 343 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
347 USB_VENDOR_REQUEST_OUT, offset, 344 USB_VENDOR_REQUEST_OUT, offset,
348 value, length, 345 (void *)value, length,
349 REGISTER_TIMEOUT32(length)); 346 REGISTER_TIMEOUT32(length));
350} 347}
351 348
@@ -364,7 +361,7 @@ static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
364 */ 361 */
365int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, 362int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
366 const unsigned int offset, 363 const unsigned int offset,
367 struct rt2x00_field32 field, 364 const struct rt2x00_field32 field,
368 u32 *reg); 365 u32 *reg);
369 366
370/* 367/*
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index b20e3eac9d67..bf04605896c7 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@ MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51 * These indirect registers work with busy bits, 51 * These indirect registers work with busy bits,
52 * and we will try maximal REGISTER_BUSY_COUNT times to access 52 * and we will try maximal REGISTER_BUSY_COUNT times to access
53 * the register while taking a REGISTER_BUSY_DELAY us delay 53 * the register while taking a REGISTER_BUSY_DELAY us delay
54 * between each attampt. When the busy bit is still set at that time, 54 * between each attempt. When the busy bit is still set at that time,
55 * the access attempt is considered to have failed, 55 * the access attempt is considered to have failed,
56 * and we will print an error. 56 * and we will print an error.
57 */ 57 */
@@ -386,7 +386,7 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
386 * The driver does not support the IV/EIV generation 386 * The driver does not support the IV/EIV generation
387 * in hardware. However it doesn't support the IV/EIV 387 * in hardware. However it doesn't support the IV/EIV
388 * inside the ieee80211 frame either, but requires it 388 * inside the ieee80211 frame either, but requires it
389 * to be provided seperately for the descriptor. 389 * to be provided separately for the descriptor.
390 * rt2x00lib will cut the IV/EIV data out of all frames 390 * rt2x00lib will cut the IV/EIV data out of all frames
391 * given to us by mac80211, but we must tell mac80211 391 * given to us by mac80211, but we must tell mac80211
392 * to generate the IV/EIV data. 392 * to generate the IV/EIV data.
@@ -397,7 +397,7 @@ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev,
397 /* 397 /*
398 * SEC_CSR0 contains only single-bit fields to indicate 398 * SEC_CSR0 contains only single-bit fields to indicate
399 * a particular key is valid. Because using the FIELD32() 399 * a particular key is valid. Because using the FIELD32()
400 * defines directly will cause a lot of overhead we use 400 * defines directly will cause a lot of overhead, we use
401 * a calculation to determine the correct bit directly. 401 * a calculation to determine the correct bit directly.
402 */ 402 */
403 mask = 1 << key->hw_key_idx; 403 mask = 1 << key->hw_key_idx;
@@ -425,11 +425,11 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
425 /* 425 /*
426 * rt2x00lib can't determine the correct free 426 * rt2x00lib can't determine the correct free
427 * key_idx for pairwise keys. We have 2 registers 427 * key_idx for pairwise keys. We have 2 registers
428 * with key valid bits. The goal is simple, read 428 * with key valid bits. The goal is simple: read
429 * the first register, if that is full move to 429 * the first register. If that is full, move to
430 * the next register. 430 * the next register.
431 * When both registers are full, we drop the key, 431 * When both registers are full, we drop the key.
432 * otherwise we use the first invalid entry. 432 * Otherwise, we use the first invalid entry.
433 */ 433 */
434 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg); 434 rt2x00pci_register_read(rt2x00dev, SEC_CSR2, &reg);
435 if (reg && reg == ~0) { 435 if (reg && reg == ~0) {
@@ -464,8 +464,8 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
464 &addr_entry, sizeof(addr_entry)); 464 &addr_entry, sizeof(addr_entry));
465 465
466 /* 466 /*
467 * Enable pairwise lookup table for given BSS idx, 467 * Enable pairwise lookup table for given BSS idx.
468 * without this received frames will not be decrypted 468 * Without this, received frames will not be decrypted
469 * by the hardware. 469 * by the hardware.
470 */ 470 */
471 rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg); 471 rt2x00pci_register_read(rt2x00dev, SEC_CSR4, &reg);
@@ -487,7 +487,7 @@ static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
487 /* 487 /*
488 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate 488 * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate
489 * a particular key is valid. Because using the FIELD32() 489 * a particular key is valid. Because using the FIELD32()
490 * defines directly will cause a lot of overhead we use 490 * defines directly will cause a lot of overhead, we use
491 * a calculation to determine the correct bit directly. 491 * a calculation to determine the correct bit directly.
492 */ 492 */
493 if (key->hw_key_idx < 32) { 493 if (key->hw_key_idx < 32) {
@@ -556,7 +556,7 @@ static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
556 if (flags & CONFIG_UPDATE_TYPE) { 556 if (flags & CONFIG_UPDATE_TYPE) {
557 /* 557 /*
558 * Clear current synchronisation setup. 558 * Clear current synchronisation setup.
559 * For the Beacon base registers we only need to clear 559 * For the Beacon base registers, we only need to clear
560 * the first byte since that byte contains the VALID and OWNER 560 * the first byte since that byte contains the VALID and OWNER
561 * bits which (when set to 0) will invalidate the entire beacon. 561 * bits which (when set to 0) will invalidate the entire beacon.
562 */ 562 */
@@ -1168,8 +1168,8 @@ static int rt61pci_check_firmware(struct rt2x00_dev *rt2x00dev,
1168 return FW_BAD_LENGTH; 1168 return FW_BAD_LENGTH;
1169 1169
1170 /* 1170 /*
1171 * The last 2 bytes in the firmware array are the crc checksum itself, 1171 * The last 2 bytes in the firmware array are the crc checksum itself.
1172 * this means that we should never pass those 2 bytes to the crc 1172 * This means that we should never pass those 2 bytes to the crc
1173 * algorithm. 1173 * algorithm.
1174 */ 1174 */
1175 fw_crc = (data[len - 2] << 8 | data[len - 1]); 1175 fw_crc = (data[len - 2] << 8 | data[len - 1]);
@@ -1986,7 +1986,7 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1986 1986
1987 /* 1987 /*
1988 * Hardware has stripped IV/EIV data from 802.11 frame during 1988 * Hardware has stripped IV/EIV data from 802.11 frame during
1989 * decryption. It has provided the data seperately but rt2x00lib 1989 * decryption. It has provided the data separately but rt2x00lib
1990 * should decide if it should be reinserted. 1990 * should decide if it should be reinserted.
1991 */ 1991 */
1992 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 1992 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
@@ -2042,7 +2042,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2042 * During each loop we will compare the freshly read 2042 * During each loop we will compare the freshly read
2043 * STA_CSR4 register value with the value read from 2043 * STA_CSR4 register value with the value read from
2044 * the previous loop. If the 2 values are equal then 2044 * the previous loop. If the 2 values are equal then
2045 * we should stop processing because the chance it 2045 * we should stop processing because the chance is
2046 * quite big that the device has been unplugged and 2046 * quite big that the device has been unplugged and
2047 * we risk going into an endless loop. 2047 * we risk going into an endless loop.
2048 */ 2048 */
@@ -2300,6 +2300,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2300 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 2300 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
2301 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg); 2301 rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
2302 rt2x00_set_chip_rf(rt2x00dev, value, reg); 2302 rt2x00_set_chip_rf(rt2x00dev, value, reg);
2303 rt2x00_print_chip(rt2x00dev);
2303 2304
2304 if (!rt2x00_rf(&rt2x00dev->chip, RF5225) && 2305 if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
2305 !rt2x00_rf(&rt2x00dev->chip, RF5325) && 2306 !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
@@ -2330,7 +2331,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2330 __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags); 2331 __set_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags);
2331 2332
2332 /* 2333 /*
2333 * Detect if this device has an hardware controlled radio. 2334 * Detect if this device has a hardware controlled radio.
2334 */ 2335 */
2335 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 2336 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
2336 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 2337 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
@@ -2355,7 +2356,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2355 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 2356 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
2356 2357
2357 /* 2358 /*
2358 * When working with a RF2529 chip without double antenna 2359 * When working with a RF2529 chip without double antenna,
2359 * the antenna settings should be gathered from the NIC 2360 * the antenna settings should be gathered from the NIC
2360 * eeprom word. 2361 * eeprom word.
2361 */ 2362 */
@@ -2668,7 +2669,7 @@ static int rt61pci_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2668 2669
2669 /* 2670 /*
2670 * We only need to perform additional register initialization 2671 * We only need to perform additional register initialization
2671 * for WMM queues/ 2672 * for WMM queues.
2672 */ 2673 */
2673 if (queue_idx >= 4) 2674 if (queue_idx >= 4)
2674 return 0; 2675 return 0;
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 93eb699165cc..6f33f7f5668c 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 14e7bb210075..5bbcf6626f7d 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -1825,6 +1825,7 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1825 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 1825 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1826 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg); 1826 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1827 rt2x00_set_chip(rt2x00dev, RT2571, value, reg); 1827 rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
1828 rt2x00_print_chip(rt2x00dev);
1828 1829
1829 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) || 1830 if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
1830 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) { 1831 rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 81fe0be51c42..e783a099a8f1 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 16429c49139c..a1a3dd15c664 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -548,7 +548,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
548 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma); 548 rtl818x_iowrite32(priv, &priv->map->TNPDA, priv->tx_ring[1].dma);
549 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma); 549 rtl818x_iowrite32(priv, &priv->map->TLPDA, priv->tx_ring[0].dma);
550 550
551 ret = request_irq(priv->pdev->irq, &rtl8180_interrupt, 551 ret = request_irq(priv->pdev->irq, rtl8180_interrupt,
552 IRQF_SHARED, KBUILD_MODNAME, dev); 552 IRQF_SHARED, KBUILD_MODNAME, dev);
553 if (ret) { 553 if (ret) {
554 printk(KERN_ERR "%s: failed to register IRQ handler\n", 554 printk(KERN_ERR "%s: failed to register IRQ handler\n",
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index bf9175a8c1f4..abb4907cf296 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -119,7 +119,6 @@ struct rtl8187_priv {
119 } hw_rev; 119 } hw_rev;
120 struct sk_buff_head rx_queue; 120 struct sk_buff_head rx_queue;
121 u8 signal; 121 u8 signal;
122 u8 quality;
123 u8 noise; 122 u8 noise;
124 u8 slot_time; 123 u8 slot_time;
125 u8 aifsn[4]; 124 u8 aifsn[4];
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 2017ccc00145..76973b8c7099 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -320,7 +320,6 @@ static void rtl8187_rx_cb(struct urb *urb)
320 struct ieee80211_rx_status rx_status = { 0 }; 320 struct ieee80211_rx_status rx_status = { 0 };
321 int rate, signal; 321 int rate, signal;
322 u32 flags; 322 u32 flags;
323 u32 quality;
324 unsigned long f; 323 unsigned long f;
325 324
326 spin_lock_irqsave(&priv->rx_queue.lock, f); 325 spin_lock_irqsave(&priv->rx_queue.lock, f);
@@ -338,10 +337,9 @@ static void rtl8187_rx_cb(struct urb *urb)
338 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); 337 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
339 flags = le32_to_cpu(hdr->flags); 338 flags = le32_to_cpu(hdr->flags);
340 /* As with the RTL8187B below, the AGC is used to calculate 339 /* As with the RTL8187B below, the AGC is used to calculate
341 * signal strength and quality. In this case, the scaling 340 * signal strength. In this case, the scaling
342 * constants are derived from the output of p54usb. 341 * constants are derived from the output of p54usb.
343 */ 342 */
344 quality = 130 - ((41 * hdr->agc) >> 6);
345 signal = -4 - ((27 * hdr->agc) >> 6); 343 signal = -4 - ((27 * hdr->agc) >> 6);
346 rx_status.antenna = (hdr->signal >> 7) & 1; 344 rx_status.antenna = (hdr->signal >> 7) & 1;
347 rx_status.mactime = le64_to_cpu(hdr->mac_time); 345 rx_status.mactime = le64_to_cpu(hdr->mac_time);
@@ -354,23 +352,18 @@ static void rtl8187_rx_cb(struct urb *urb)
354 * In testing, none of these quantities show qualitative 352 * In testing, none of these quantities show qualitative
355 * agreement with AP signal strength, except for the AGC, 353 * agreement with AP signal strength, except for the AGC,
356 * which is inversely proportional to the strength of the 354 * which is inversely proportional to the strength of the
357 * signal. In the following, the quality and signal strength 355 * signal. In the following, the signal strength
358 * are derived from the AGC. The arbitrary scaling constants 356 * is derived from the AGC. The arbitrary scaling constants
359 * are chosen to make the results close to the values obtained 357 * are chosen to make the results close to the values obtained
360 * for a BCM4312 using b43 as the driver. The noise is ignored 358 * for a BCM4312 using b43 as the driver. The noise is ignored
361 * for now. 359 * for now.
362 */ 360 */
363 flags = le32_to_cpu(hdr->flags); 361 flags = le32_to_cpu(hdr->flags);
364 quality = 170 - hdr->agc;
365 signal = 14 - hdr->agc / 2; 362 signal = 14 - hdr->agc / 2;
366 rx_status.antenna = (hdr->rssi >> 7) & 1; 363 rx_status.antenna = (hdr->rssi >> 7) & 1;
367 rx_status.mactime = le64_to_cpu(hdr->mac_time); 364 rx_status.mactime = le64_to_cpu(hdr->mac_time);
368 } 365 }
369 366
370 if (quality > 100)
371 quality = 100;
372 rx_status.qual = quality;
373 priv->quality = quality;
374 rx_status.signal = signal; 367 rx_status.signal = signal;
375 priv->signal = signal; 368 priv->signal = signal;
376 rate = (flags >> 20) & 0xF; 369 rate = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 88060e117541..785e0244e305 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,6 +1,6 @@
1menuconfig WL12XX 1menuconfig WL12XX
2 tristate "TI wl12xx driver support" 2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable TI wl12xx driver support. The drivers make 5 This will enable TI wl12xx driver support. The drivers make
6 use of the mac80211 stack. 6 use of the mac80211 stack.
@@ -42,6 +42,7 @@ config WL1251_SDIO
42config WL1271 42config WL1271
43 tristate "TI wl1271 support" 43 tristate "TI wl1271 support"
44 depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS 44 depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS
45 depends on INET
45 select FW_LOADER 46 select FW_LOADER
46 select CRC7 47 select CRC7
47 ---help--- 48 ---help---
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 998e4b6252bd..054533f7a124 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -269,6 +269,7 @@ struct wl1251 {
269 269
270 void (*set_power)(bool enable); 270 void (*set_power)(bool enable);
271 int irq; 271 int irq;
272 bool use_eeprom;
272 273
273 enum wl1251_state state; 274 enum wl1251_state state;
274 struct mutex mutex; 275 struct mutex mutex;
@@ -354,6 +355,8 @@ struct wl1251 {
354 /* is firmware in elp mode */ 355 /* is firmware in elp mode */
355 bool elp; 356 bool elp;
356 357
358 struct delayed_work elp_work;
359
357 /* we can be in psm, but not in elp, we have to differentiate */ 360 /* we can be in psm, but not in elp, we have to differentiate */
358 bool psm; 361 bool psm;
359 362
@@ -374,6 +377,8 @@ struct wl1251 {
374 u8 buffer_busyword[WL1251_BUSY_WORD_LEN]; 377 u8 buffer_busyword[WL1251_BUSY_WORD_LEN];
375 struct wl1251_rx_descriptor *rx_descriptor; 378 struct wl1251_rx_descriptor *rx_descriptor;
376 379
380 struct ieee80211_vif *vif;
381
377 u32 chip_id; 382 u32 chip_id;
378 char fw_ver[21]; 383 char fw_ver[21];
379}; 384};
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index 10b26c4532c9..acfa086dbfc5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -494,7 +494,7 @@ out:
494 return ret; 494 return ret;
495} 495}
496 496
497int wl1251_acx_beacon_filter_opt(struct wl1251 *wl) 497int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter)
498{ 498{
499 struct acx_beacon_filter_option *beacon_filter; 499 struct acx_beacon_filter_option *beacon_filter;
500 int ret; 500 int ret;
@@ -507,7 +507,7 @@ int wl1251_acx_beacon_filter_opt(struct wl1251 *wl)
507 goto out; 507 goto out;
508 } 508 }
509 509
510 beacon_filter->enable = 0; 510 beacon_filter->enable = enable_filter;
511 beacon_filter->max_num_beacons = 0; 511 beacon_filter->max_num_beacons = 0;
512 512
513 ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_OPT, 513 ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -525,6 +525,7 @@ out:
525int wl1251_acx_beacon_filter_table(struct wl1251 *wl) 525int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
526{ 526{
527 struct acx_beacon_filter_ie_table *ie_table; 527 struct acx_beacon_filter_ie_table *ie_table;
528 int idx = 0;
528 int ret; 529 int ret;
529 530
530 wl1251_debug(DEBUG_ACX, "acx beacon filter table"); 531 wl1251_debug(DEBUG_ACX, "acx beacon filter table");
@@ -535,8 +536,10 @@ int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
535 goto out; 536 goto out;
536 } 537 }
537 538
538 ie_table->num_ie = 0; 539 /* configure default beacon pass-through rules */
539 memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE); 540 ie_table->num_ie = 1;
541 ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN;
542 ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE;
540 543
541 ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, 544 ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
542 ie_table, sizeof(*ie_table)); 545 ie_table, sizeof(*ie_table));
@@ -550,6 +553,35 @@ out:
550 return ret; 553 return ret;
551} 554}
552 555
556int wl1251_acx_conn_monit_params(struct wl1251 *wl)
557{
558 struct acx_conn_monit_params *acx;
559 int ret;
560
561 wl1251_debug(DEBUG_ACX, "acx connection monitor parameters");
562
563 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
564 if (!acx) {
565 ret = -ENOMEM;
566 goto out;
567 }
568
569 acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD;
570 acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT;
571
572 ret = wl1251_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
573 acx, sizeof(*acx));
574 if (ret < 0) {
575 wl1251_warning("failed to set connection monitor "
576 "parameters: %d", ret);
577 goto out;
578 }
579
580out:
581 kfree(acx);
582 return ret;
583}
584
553int wl1251_acx_sg_enable(struct wl1251 *wl) 585int wl1251_acx_sg_enable(struct wl1251 *wl)
554{ 586{
555 struct acx_bt_wlan_coex *pta; 587 struct acx_bt_wlan_coex *pta;
@@ -916,3 +948,31 @@ out:
916 kfree(mem_conf); 948 kfree(mem_conf);
917 return ret; 949 return ret;
918} 950}
951
952int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim)
953{
954 struct wl1251_acx_wr_tbtt_and_dtim *acx;
955 int ret;
956
957 wl1251_debug(DEBUG_ACX, "acx tbtt and dtim");
958
959 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
960 if (!acx) {
961 ret = -ENOMEM;
962 goto out;
963 }
964
965 acx->tbtt = tbtt;
966 acx->dtim = dtim;
967
968 ret = wl1251_cmd_configure(wl, ACX_WR_TBTT_AND_DTIM,
969 acx, sizeof(*acx));
970 if (ret < 0) {
971 wl1251_warning("failed to set tbtt and dtim: %d", ret);
972 goto out;
973 }
974
975out:
976 kfree(acx);
977 return ret;
978}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index cafb91459504..652371432cd8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -450,6 +450,11 @@ struct acx_beacon_filter_option {
450 (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \ 450 (BEACON_FILTER_TABLE_MAX_VENDOR_SPECIFIC_IE_NUM * \
451 BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE)) 451 BEACON_FILTER_TABLE_EXTRA_VENDOR_SPECIFIC_IE_SIZE))
452 452
453#define BEACON_RULE_PASS_ON_CHANGE BIT(0)
454#define BEACON_RULE_PASS_ON_APPEARANCE BIT(1)
455
456#define BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN (37)
457
453struct acx_beacon_filter_ie_table { 458struct acx_beacon_filter_ie_table {
454 struct acx_header header; 459 struct acx_header header;
455 460
@@ -458,6 +463,16 @@ struct acx_beacon_filter_ie_table {
458 u8 pad[3]; 463 u8 pad[3];
459} __attribute__ ((packed)); 464} __attribute__ ((packed));
460 465
466#define SYNCH_FAIL_DEFAULT_THRESHOLD 10 /* number of beacons */
467#define NO_BEACON_DEFAULT_TIMEOUT (500) /* in microseconds */
468
469struct acx_conn_monit_params {
470 struct acx_header header;
471
472 u32 synch_fail_thold; /* number of beacons missed */
473 u32 bss_lose_timeout; /* number of TU's from synch fail */
474};
475
461enum { 476enum {
462 SG_ENABLE = 0, 477 SG_ENABLE = 0,
463 SG_DISABLE, 478 SG_DISABLE,
@@ -1134,6 +1149,23 @@ struct wl1251_acx_mem_map {
1134 u32 num_rx_mem_blocks; 1149 u32 num_rx_mem_blocks;
1135} __attribute__ ((packed)); 1150} __attribute__ ((packed));
1136 1151
1152
1153struct wl1251_acx_wr_tbtt_and_dtim {
1154
1155 struct acx_header header;
1156
1157 /* Time in TUs between two consecutive beacons */
1158 u16 tbtt;
1159
1160 /*
1161 * DTIM period
1162 * For BSS: Number of TBTTs in a DTIM period (range: 1-10)
1163 * For IBSS: value shall be set to 1
1164 */
1165 u8 dtim;
1166 u8 padding;
1167} __attribute__ ((packed));
1168
1137/************************************************************************* 1169/*************************************************************************
1138 1170
1139 Host Interrupt Register (WiLink -> Host) 1171 Host Interrupt Register (WiLink -> Host)
@@ -1273,8 +1305,9 @@ int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time);
1273int wl1251_acx_group_address_tbl(struct wl1251 *wl); 1305int wl1251_acx_group_address_tbl(struct wl1251 *wl);
1274int wl1251_acx_service_period_timeout(struct wl1251 *wl); 1306int wl1251_acx_service_period_timeout(struct wl1251 *wl);
1275int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold); 1307int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold);
1276int wl1251_acx_beacon_filter_opt(struct wl1251 *wl); 1308int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter);
1277int wl1251_acx_beacon_filter_table(struct wl1251 *wl); 1309int wl1251_acx_beacon_filter_table(struct wl1251 *wl);
1310int wl1251_acx_conn_monit_params(struct wl1251 *wl);
1278int wl1251_acx_sg_enable(struct wl1251 *wl); 1311int wl1251_acx_sg_enable(struct wl1251 *wl);
1279int wl1251_acx_sg_cfg(struct wl1251 *wl); 1312int wl1251_acx_sg_cfg(struct wl1251 *wl);
1280int wl1251_acx_cca_threshold(struct wl1251 *wl); 1313int wl1251_acx_cca_threshold(struct wl1251 *wl);
@@ -1288,5 +1321,6 @@ int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats);
1288int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime); 1321int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
1289int wl1251_acx_rate_policies(struct wl1251 *wl); 1322int wl1251_acx_rate_policies(struct wl1251 *wl);
1290int wl1251_acx_mem_cfg(struct wl1251 *wl); 1323int wl1251_acx_mem_cfg(struct wl1251 *wl);
1324int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
1291 1325
1292#endif /* __WL1251_ACX_H__ */ 1326#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index 452d748e42c6..2e733e7bdfd4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -296,8 +296,12 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
296 WL1251_ACX_INTR_INIT_COMPLETE; 296 WL1251_ACX_INTR_INIT_COMPLETE;
297 wl1251_boot_target_enable_interrupts(wl); 297 wl1251_boot_target_enable_interrupts(wl);
298 298
299 /* unmask all mbox events */ 299 wl->event_mask = SCAN_COMPLETE_EVENT_ID | BSS_LOSE_EVENT_ID |
300 wl->event_mask = 0xffffffff; 300 SYNCHRONIZATION_TIMEOUT_EVENT_ID |
301 ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
302 ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
303 REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
304 BT_PTA_PREDICTION_EVENT_ID;
301 305
302 ret = wl1251_event_unmask(wl); 306 ret = wl1251_event_unmask(wl);
303 if (ret < 0) { 307 if (ret < 0) {
@@ -314,8 +318,8 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
314static int wl1251_boot_upload_firmware(struct wl1251 *wl) 318static int wl1251_boot_upload_firmware(struct wl1251 *wl)
315{ 319{
316 int addr, chunk_num, partition_limit; 320 int addr, chunk_num, partition_limit;
317 size_t fw_data_len; 321 size_t fw_data_len, len;
318 u8 *p; 322 u8 *p, *buf;
319 323
320 /* whal_FwCtrl_LoadFwImageSm() */ 324 /* whal_FwCtrl_LoadFwImageSm() */
321 325
@@ -334,6 +338,12 @@ static int wl1251_boot_upload_firmware(struct wl1251 *wl)
334 return -EIO; 338 return -EIO;
335 } 339 }
336 340
341 buf = kmalloc(CHUNK_SIZE, GFP_KERNEL);
342 if (!buf) {
343 wl1251_error("allocation for firmware upload chunk failed");
344 return -ENOMEM;
345 }
346
337 wl1251_set_partition(wl, WL1251_PART_DOWN_MEM_START, 347 wl1251_set_partition(wl, WL1251_PART_DOWN_MEM_START,
338 WL1251_PART_DOWN_MEM_SIZE, 348 WL1251_PART_DOWN_MEM_SIZE,
339 WL1251_PART_DOWN_REG_START, 349 WL1251_PART_DOWN_REG_START,
@@ -364,7 +374,11 @@ static int wl1251_boot_upload_firmware(struct wl1251 *wl)
364 p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE; 374 p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
365 wl1251_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 375 wl1251_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
366 p, addr); 376 p, addr);
367 wl1251_mem_write(wl, addr, p, CHUNK_SIZE); 377
378 /* need to copy the chunk for dma */
379 len = CHUNK_SIZE;
380 memcpy(buf, p, len);
381 wl1251_mem_write(wl, addr, buf, len);
368 382
369 chunk_num++; 383 chunk_num++;
370 } 384 }
@@ -372,9 +386,16 @@ static int wl1251_boot_upload_firmware(struct wl1251 *wl)
372 /* 10.4 upload the last chunk */ 386 /* 10.4 upload the last chunk */
373 addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE; 387 addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE;
374 p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE; 388 p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
389
390 /* need to copy the chunk for dma */
391 len = fw_data_len % CHUNK_SIZE;
392 memcpy(buf, p, len);
393
375 wl1251_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x", 394 wl1251_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x",
376 fw_data_len % CHUNK_SIZE, p, addr); 395 len, p, addr);
377 wl1251_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE); 396 wl1251_mem_write(wl, addr, buf, len);
397
398 kfree(buf);
378 399
379 return 0; 400 return 0;
380} 401}
@@ -473,13 +494,19 @@ int wl1251_boot(struct wl1251 *wl)
473 goto out; 494 goto out;
474 495
475 /* 2. start processing NVS file */ 496 /* 2. start processing NVS file */
476 ret = wl1251_boot_upload_nvs(wl); 497 if (wl->use_eeprom) {
477 if (ret < 0) 498 wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
478 goto out; 499 msleep(4000);
479 500 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
480 /* write firmware's last address (ie. it's length) to 501 } else {
481 * ACX_EEPROMLESS_IND_REG */ 502 ret = wl1251_boot_upload_nvs(wl);
482 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len); 503 if (ret < 0)
504 goto out;
505
506 /* write firmware's last address (ie. it's length) to
507 * ACX_EEPROMLESS_IND_REG */
508 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len);
509 }
483 510
484 /* 6. read the EEPROM parameters */ 511 /* 6. read the EEPROM parameters */
485 tmp = wl1251_reg_read32(wl, SCR_PAD2); 512 tmp = wl1251_reg_read32(wl, SCR_PAD2);
diff --git a/drivers/net/wireless/wl12xx/wl1251_event.c b/drivers/net/wireless/wl12xx/wl1251_event.c
index 00076c4a8a21..020d764f9c13 100644
--- a/drivers/net/wireless/wl12xx/wl1251_event.c
+++ b/drivers/net/wireless/wl12xx/wl1251_event.c
@@ -79,6 +79,21 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
79 } 79 }
80 } 80 }
81 81
82 if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID && wl->psm) {
83 wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
84
85 /* indicate to the stack, that beacons have been lost */
86 ieee80211_beacon_loss(wl->vif);
87 }
88
89 if (vector & REGAINED_BSS_EVENT_ID) {
90 if (wl->psm_requested) {
91 ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
92 if (ret < 0)
93 return ret;
94 }
95 }
96
82 return 0; 97 return 0;
83} 98}
84 99
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index b2ee4f468fc4..5cb573383eeb 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -147,7 +147,8 @@ int wl1251_hw_init_beacon_filter(struct wl1251 *wl)
147{ 147{
148 int ret; 148 int ret;
149 149
150 ret = wl1251_acx_beacon_filter_opt(wl); 150 /* disable beacon filtering at this stage */
151 ret = wl1251_acx_beacon_filter_opt(wl, false);
151 if (ret < 0) 152 if (ret < 0)
152 return ret; 153 return ret;
153 154
@@ -364,6 +365,11 @@ int wl1251_hw_init(struct wl1251 *wl)
364 if (ret < 0) 365 if (ret < 0)
365 goto out_free_data_path; 366 goto out_free_data_path;
366 367
368 /* Initialize connection monitoring thresholds */
369 ret = wl1251_acx_conn_monit_params(wl);
370 if (ret < 0)
371 goto out_free_data_path;
372
367 /* Beacon filtering */ 373 /* Beacon filtering */
368 ret = wl1251_hw_init_beacon_filter(wl); 374 ret = wl1251_hw_init_beacon_filter(wl);
369 if (ret < 0) 375 if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1103256ad989..ff4be7bf5d36 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -28,6 +28,7 @@
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/etherdevice.h> 30#include <linux/etherdevice.h>
31#include <linux/vmalloc.h>
31 32
32#include "wl1251.h" 33#include "wl1251.h"
33#include "wl12xx_80211.h" 34#include "wl12xx_80211.h"
@@ -83,7 +84,7 @@ static int wl1251_fetch_firmware(struct wl1251 *wl)
83 } 84 }
84 85
85 wl->fw_len = fw->size; 86 wl->fw_len = fw->size;
86 wl->fw = kmalloc(wl->fw_len, GFP_KERNEL); 87 wl->fw = vmalloc(wl->fw_len);
87 88
88 if (!wl->fw) { 89 if (!wl->fw) {
89 wl1251_error("could not allocate memory for the firmware"); 90 wl1251_error("could not allocate memory for the firmware");
@@ -183,8 +184,11 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
183 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)", 184 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
184 wl->chip_id); 185 wl->chip_id);
185 break; 186 break;
186 case CHIP_ID_1251_PG10:
187 case CHIP_ID_1251_PG11: 187 case CHIP_ID_1251_PG11:
188 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
189 wl->chip_id);
190 break;
191 case CHIP_ID_1251_PG10:
188 default: 192 default:
189 wl1251_error("unsupported chip id: 0x%x", wl->chip_id); 193 wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
190 ret = -ENODEV; 194 ret = -ENODEV;
@@ -208,9 +212,10 @@ out:
208 return ret; 212 return ret;
209} 213}
210 214
215#define WL1251_IRQ_LOOP_COUNT 10
211static void wl1251_irq_work(struct work_struct *work) 216static void wl1251_irq_work(struct work_struct *work)
212{ 217{
213 u32 intr; 218 u32 intr, ctr = WL1251_IRQ_LOOP_COUNT;
214 struct wl1251 *wl = 219 struct wl1251 *wl =
215 container_of(work, struct wl1251, irq_work); 220 container_of(work, struct wl1251, irq_work);
216 int ret; 221 int ret;
@@ -231,78 +236,86 @@ static void wl1251_irq_work(struct work_struct *work)
231 intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 236 intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
232 wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr); 237 wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr);
233 238
234 if (wl->data_path) { 239 do {
235 wl->rx_counter = 240 if (wl->data_path) {
236 wl1251_mem_read32(wl, wl->data_path->rx_control_addr); 241 wl->rx_counter = wl1251_mem_read32(
237 242 wl, wl->data_path->rx_control_addr);
238 /* We handle a frmware bug here */ 243
239 switch ((wl->rx_counter - wl->rx_handled) & 0xf) { 244 /* We handle a frmware bug here */
240 case 0: 245 switch ((wl->rx_counter - wl->rx_handled) & 0xf) {
241 wl1251_debug(DEBUG_IRQ, "RX: FW and host in sync"); 246 case 0:
242 intr &= ~WL1251_ACX_INTR_RX0_DATA; 247 wl1251_debug(DEBUG_IRQ,
243 intr &= ~WL1251_ACX_INTR_RX1_DATA; 248 "RX: FW and host in sync");
244 break; 249 intr &= ~WL1251_ACX_INTR_RX0_DATA;
245 case 1: 250 intr &= ~WL1251_ACX_INTR_RX1_DATA;
246 wl1251_debug(DEBUG_IRQ, "RX: FW +1"); 251 break;
247 intr |= WL1251_ACX_INTR_RX0_DATA; 252 case 1:
248 intr &= ~WL1251_ACX_INTR_RX1_DATA; 253 wl1251_debug(DEBUG_IRQ, "RX: FW +1");
249 break; 254 intr |= WL1251_ACX_INTR_RX0_DATA;
250 case 2: 255 intr &= ~WL1251_ACX_INTR_RX1_DATA;
251 wl1251_debug(DEBUG_IRQ, "RX: FW +2"); 256 break;
252 intr |= WL1251_ACX_INTR_RX0_DATA; 257 case 2:
253 intr |= WL1251_ACX_INTR_RX1_DATA; 258 wl1251_debug(DEBUG_IRQ, "RX: FW +2");
254 break; 259 intr |= WL1251_ACX_INTR_RX0_DATA;
255 default: 260 intr |= WL1251_ACX_INTR_RX1_DATA;
256 wl1251_warning("RX: FW and host out of sync: %d", 261 break;
257 wl->rx_counter - wl->rx_handled); 262 default:
258 break; 263 wl1251_warning(
259 } 264 "RX: FW and host out of sync: %d",
260 265 wl->rx_counter - wl->rx_handled);
261 wl->rx_handled = wl->rx_counter; 266 break;
267 }
262 268
269 wl->rx_handled = wl->rx_counter;
263 270
264 wl1251_debug(DEBUG_IRQ, "RX counter: %d", wl->rx_counter); 271 wl1251_debug(DEBUG_IRQ, "RX counter: %d",
265 } 272 wl->rx_counter);
273 }
266 274
267 intr &= wl->intr_mask; 275 intr &= wl->intr_mask;
268 276
269 if (intr == 0) { 277 if (intr == 0) {
270 wl1251_debug(DEBUG_IRQ, "INTR is 0"); 278 wl1251_debug(DEBUG_IRQ, "INTR is 0");
271 wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 279 goto out_sleep;
272 ~(wl->intr_mask)); 280 }
273 281
274 goto out_sleep; 282 if (intr & WL1251_ACX_INTR_RX0_DATA) {
275 } 283 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
284 wl1251_rx(wl);
285 }
276 286
277 if (intr & WL1251_ACX_INTR_RX0_DATA) { 287 if (intr & WL1251_ACX_INTR_RX1_DATA) {
278 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA"); 288 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA");
279 wl1251_rx(wl); 289 wl1251_rx(wl);
280 } 290 }
281 291
282 if (intr & WL1251_ACX_INTR_RX1_DATA) { 292 if (intr & WL1251_ACX_INTR_TX_RESULT) {
283 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA"); 293 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
284 wl1251_rx(wl); 294 wl1251_tx_complete(wl);
285 } 295 }
286 296
287 if (intr & WL1251_ACX_INTR_TX_RESULT) { 297 if (intr & (WL1251_ACX_INTR_EVENT_A |
288 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT"); 298 WL1251_ACX_INTR_EVENT_B)) {
289 wl1251_tx_complete(wl); 299 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)",
290 } 300 intr);
301 if (intr & WL1251_ACX_INTR_EVENT_A)
302 wl1251_event_handle(wl, 0);
303 else
304 wl1251_event_handle(wl, 1);
305 }
291 306
292 if (intr & (WL1251_ACX_INTR_EVENT_A | WL1251_ACX_INTR_EVENT_B)) { 307 if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
293 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT (0x%x)", intr); 308 wl1251_debug(DEBUG_IRQ,
294 if (intr & WL1251_ACX_INTR_EVENT_A) 309 "WL1251_ACX_INTR_INIT_COMPLETE");
295 wl1251_event_handle(wl, 0);
296 else
297 wl1251_event_handle(wl, 1);
298 }
299 310
300 if (intr & WL1251_ACX_INTR_INIT_COMPLETE) 311 if (--ctr == 0)
301 wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_INIT_COMPLETE"); 312 break;
302 313
303 wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask)); 314 intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
315 } while (intr);
304 316
305out_sleep: 317out_sleep:
318 wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
306 wl1251_ps_elp_sleep(wl); 319 wl1251_ps_elp_sleep(wl);
307 320
308out: 321out:
@@ -506,6 +519,12 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
506 conf->type, conf->mac_addr); 519 conf->type, conf->mac_addr);
507 520
508 mutex_lock(&wl->mutex); 521 mutex_lock(&wl->mutex);
522 if (wl->vif) {
523 ret = -EBUSY;
524 goto out;
525 }
526
527 wl->vif = conf->vif;
509 528
510 switch (conf->type) { 529 switch (conf->type) {
511 case NL80211_IFTYPE_STATION: 530 case NL80211_IFTYPE_STATION:
@@ -535,7 +554,12 @@ out:
535static void wl1251_op_remove_interface(struct ieee80211_hw *hw, 554static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
536 struct ieee80211_if_init_conf *conf) 555 struct ieee80211_if_init_conf *conf)
537{ 556{
557 struct wl1251 *wl = hw->priv;
558
559 mutex_lock(&wl->mutex);
538 wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface"); 560 wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
561 wl->vif = NULL;
562 mutex_unlock(&wl->mutex);
539} 563}
540 564
541static int wl1251_build_null_data(struct wl1251 *wl) 565static int wl1251_build_null_data(struct wl1251 *wl)
@@ -552,7 +576,8 @@ static int wl1251_build_null_data(struct wl1251 *wl)
552 576
553 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 577 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
554 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 578 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
555 IEEE80211_STYPE_NULLFUNC); 579 IEEE80211_STYPE_NULLFUNC |
580 IEEE80211_FCTL_TODS);
556 581
557 return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template, 582 return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
558 sizeof(template)); 583 sizeof(template));
@@ -565,7 +590,10 @@ static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
565 590
566 memcpy(template.bssid, wl->bssid, ETH_ALEN); 591 memcpy(template.bssid, wl->bssid, ETH_ALEN);
567 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 592 memcpy(template.ta, wl->mac_addr, ETH_ALEN);
568 template.aid = aid; 593
594 /* aid in PS-Poll has its two MSBs each set to 1 */
595 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
596
569 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 597 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
570 598
571 return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template, 599 return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
@@ -1087,8 +1115,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
1087 wl->beacon_int = bss_conf->beacon_int; 1115 wl->beacon_int = bss_conf->beacon_int;
1088 wl->dtim_period = bss_conf->dtim_period; 1116 wl->dtim_period = bss_conf->dtim_period;
1089 1117
1090 /* FIXME: call join */ 1118 ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
1091 1119 wl->dtim_period);
1092 wl->aid = bss_conf->aid; 1120 wl->aid = bss_conf->aid;
1093 1121
1094 ret = wl1251_build_ps_poll(wl, wl->aid); 1122 ret = wl1251_build_ps_poll(wl, wl->aid);
@@ -1308,7 +1336,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1308 wl->hw->channel_change_time = 10000; 1336 wl->hw->channel_change_time = 10000;
1309 1337
1310 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1338 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1311 IEEE80211_HW_NOISE_DBM; 1339 IEEE80211_HW_NOISE_DBM |
1340 IEEE80211_HW_SUPPORTS_PS |
1341 IEEE80211_HW_BEACON_FILTER;
1312 1342
1313 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1343 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1314 wl->hw->wiphy->max_scan_ssids = 1; 1344 wl->hw->wiphy->max_scan_ssids = 1;
@@ -1351,6 +1381,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1351 skb_queue_head_init(&wl->tx_queue); 1381 skb_queue_head_init(&wl->tx_queue);
1352 1382
1353 INIT_WORK(&wl->filter_work, wl1251_filter_work); 1383 INIT_WORK(&wl->filter_work, wl1251_filter_work);
1384 INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
1354 wl->channel = WL1251_DEFAULT_CHANNEL; 1385 wl->channel = WL1251_DEFAULT_CHANNEL;
1355 wl->scanning = false; 1386 wl->scanning = false;
1356 wl->default_key = 0; 1387 wl->default_key = 0;
@@ -1368,6 +1399,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
1368 wl->power_level = WL1251_DEFAULT_POWER_LEVEL; 1399 wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
1369 wl->beacon_int = WL1251_DEFAULT_BEACON_INT; 1400 wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
1370 wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD; 1401 wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
1402 wl->vif = NULL;
1371 1403
1372 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) 1404 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
1373 wl->tx_frames[i] = NULL; 1405 wl->tx_frames[i] = NULL;
@@ -1409,7 +1441,7 @@ int wl1251_free_hw(struct wl1251 *wl)
1409 1441
1410 kfree(wl->target_mem_map); 1442 kfree(wl->target_mem_map);
1411 kfree(wl->data_path); 1443 kfree(wl->data_path);
1412 kfree(wl->fw); 1444 vfree(wl->fw);
1413 wl->fw = NULL; 1445 wl->fw = NULL;
1414 kfree(wl->nvs); 1446 kfree(wl->nvs);
1415 wl->nvs = NULL; 1447 wl->nvs = NULL;
@@ -1426,4 +1458,5 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1426MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1458MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1427MODULE_LICENSE("GPL"); 1459MODULE_LICENSE("GPL");
1428MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1460MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
1429MODULE_ALIAS("spi:wl12xx"); 1461MODULE_ALIAS("spi:wl1251");
1462MODULE_FIRMWARE(WL1251_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1251_netlink.h b/drivers/net/wireless/wl12xx/wl1251_netlink.h
deleted file mode 100644
index ee36695e134e..000000000000
--- a/drivers/net/wireless/wl12xx/wl1251_netlink.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * This file is part of wl1251
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1251_NETLINK_H__
25#define __WL1251_NETLINK_H__
26
27int wl1251_nl_register(void);
28void wl1251_nl_unregister(void);
29
30#endif /* __WL1251_NETLINK_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index c53e28727ed4..9931b197ff77 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -28,17 +28,41 @@
28 28
29#define WL1251_WAKEUP_TIMEOUT 2000 29#define WL1251_WAKEUP_TIMEOUT 2000
30 30
31/* Routines to toggle sleep mode while in ELP */ 31void wl1251_elp_work(struct work_struct *work)
32void wl1251_ps_elp_sleep(struct wl1251 *wl)
33{ 32{
33 struct delayed_work *dwork;
34 struct wl1251 *wl;
35
36 dwork = container_of(work, struct delayed_work, work);
37 wl = container_of(dwork, struct wl1251, elp_work);
38
39 wl1251_debug(DEBUG_PSM, "elp work");
40
41 mutex_lock(&wl->mutex);
42
34 if (wl->elp || !wl->psm) 43 if (wl->elp || !wl->psm)
35 return; 44 goto out;
36 45
37 wl1251_debug(DEBUG_PSM, "chip to elp"); 46 wl1251_debug(DEBUG_PSM, "chip to elp");
38
39 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 47 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
40
41 wl->elp = true; 48 wl->elp = true;
49
50out:
51 mutex_unlock(&wl->mutex);
52}
53
54#define ELP_ENTRY_DELAY 5
55
56/* Routines to toggle sleep mode while in ELP */
57void wl1251_ps_elp_sleep(struct wl1251 *wl)
58{
59 unsigned long delay;
60
61 if (wl->psm) {
62 cancel_delayed_work(&wl->elp_work);
63 delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
64 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
65 }
42} 66}
43 67
44int wl1251_ps_elp_wakeup(struct wl1251 *wl) 68int wl1251_ps_elp_wakeup(struct wl1251 *wl)
@@ -119,6 +143,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
119 case STATION_POWER_SAVE_MODE: 143 case STATION_POWER_SAVE_MODE:
120 wl1251_debug(DEBUG_PSM, "entering psm"); 144 wl1251_debug(DEBUG_PSM, "entering psm");
121 145
146 /* enable beacon filtering */
147 ret = wl1251_acx_beacon_filter_opt(wl, true);
148 if (ret < 0)
149 return ret;
150
122 ret = wl1251_acx_wake_up_conditions(wl, 151 ret = wl1251_acx_wake_up_conditions(wl,
123 WAKE_UP_EVENT_DTIM_BITMAP, 152 WAKE_UP_EVENT_DTIM_BITMAP,
124 wl->listen_int); 153 wl->listen_int);
@@ -142,6 +171,11 @@ int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode)
142 if (ret < 0) 171 if (ret < 0)
143 return ret; 172 return ret;
144 173
174 /* disable beacon filtering */
175 ret = wl1251_acx_beacon_filter_opt(wl, false);
176 if (ret < 0)
177 return ret;
178
145 ret = wl1251_acx_wake_up_conditions(wl, 179 ret = wl1251_acx_wake_up_conditions(wl,
146 WAKE_UP_EVENT_DTIM_BITMAP, 180 WAKE_UP_EVENT_DTIM_BITMAP,
147 wl->listen_int); 181 wl->listen_int);
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.h b/drivers/net/wireless/wl12xx/wl1251_ps.h
index db036fe12f25..c688ac57aee4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.h
@@ -31,6 +31,7 @@
31int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode); 31int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_cmd_ps_mode mode);
32void wl1251_ps_elp_sleep(struct wl1251 *wl); 32void wl1251_ps_elp_sleep(struct wl1251 *wl);
33int wl1251_ps_elp_wakeup(struct wl1251 *wl); 33int wl1251_ps_elp_wakeup(struct wl1251 *wl);
34void wl1251_elp_work(struct work_struct *work);
34 35
35 36
36#endif /* __WL1251_PS_H__ */ 37#endif /* __WL1251_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 06e1bd94a739..0ca3b4326056 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -370,6 +370,7 @@ enum wl12xx_acx_int_reg {
370 EEPROM location specified in the EE_ADDR register. 370 EEPROM location specified in the EE_ADDR register.
371 The Wlan hardware hardware clears this bit automatically. 371 The Wlan hardware hardware clears this bit automatically.
372*===============================================*/ 372*===============================================*/
373#define EE_CTL (REGISTERS_BASE + 0x2000)
373#define ACX_EE_CTL_REG EE_CTL 374#define ACX_EE_CTL_REG EE_CTL
374#define EE_WRITE 0x00000001ul 375#define EE_WRITE 0x00000001ul
375#define EE_READ 0x00000002ul 376#define EE_READ 0x00000002ul
@@ -380,6 +381,7 @@ enum wl12xx_acx_int_reg {
380 This register specifies the address 381 This register specifies the address
381 within the EEPROM from/to which to read/write data. 382 within the EEPROM from/to which to read/write data.
382 ===============================================*/ 383 ===============================================*/
384#define EE_ADDR (REGISTERS_BASE + 0x2008)
383#define ACX_EE_ADDR_REG EE_ADDR 385#define ACX_EE_ADDR_REG EE_ADDR
384 386
385/*=============================================== 387/*===============================================
@@ -389,8 +391,12 @@ enum wl12xx_acx_int_reg {
389 data from the EEPROM or the write data 391 data from the EEPROM or the write data
390 to be written to the EEPROM. 392 to be written to the EEPROM.
391 ===============================================*/ 393 ===============================================*/
394#define EE_DATA (REGISTERS_BASE + 0x2004)
392#define ACX_EE_DATA_REG EE_DATA 395#define ACX_EE_DATA_REG EE_DATA
393 396
397#define EEPROM_ACCESS_TO 10000 /* timeout counter */
398#define START_EEPROM_MGR 0x00000001
399
394/*=============================================== 400/*===============================================
395 EEPROM Base Address - 32bit RW 401 EEPROM Base Address - 32bit RW
396 ------------------------------------------ 402 ------------------------------------------
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 17c54b59ef86..f84cc89cbffc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -72,10 +72,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
72 } 72 }
73 73
74 status->signal = desc->rssi; 74 status->signal = desc->rssi;
75 status->qual = (desc->rssi - WL1251_RX_MIN_RSSI) * 100 /
76 (WL1251_RX_MAX_RSSI - WL1251_RX_MIN_RSSI);
77 status->qual = min(status->qual, 100);
78 status->qual = max(status->qual, 0);
79 75
80 /* 76 /*
81 * FIXME: guessing that snr needs to be divided by two, otherwise 77 * FIXME: guessing that snr needs to be divided by two, otherwise
@@ -153,7 +149,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
153 beacon ? "beacon" : ""); 149 beacon ? "beacon" : "");
154 150
155 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 151 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
156 ieee80211_rx(wl->hw, skb); 152 ieee80211_rx_ni(wl->hw, skb);
157} 153}
158 154
159static void wl1251_rx_ack(struct wl1251 *wl) 155static void wl1251_rx_ack(struct wl1251 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 14eff2b3d4c6..9cc8c323830f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -270,6 +270,8 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
270 return -ENODEV; 270 return -ENODEV;
271 } 271 }
272 272
273 wl->use_eeprom = pdata->use_eeprom;
274
273 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); 275 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
274 if (ret < 0) { 276 if (ret < 0) {
275 wl1251_error("request_irq() failed: %d", ret); 277 wl1251_error("request_irq() failed: %d", ret);
@@ -307,7 +309,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
307 309
308static struct spi_driver wl1251_spi_driver = { 310static struct spi_driver wl1251_spi_driver = {
309 .driver = { 311 .driver = {
310 .name = "wl12xx", 312 .name = "wl1251",
311 .bus = &spi_bus_type, 313 .bus = &spi_bus_type,
312 .owner = THIS_MODULE, 314 .owner = THIS_MODULE,
313 }, 315 },
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 55818f94017b..94359b1a861f 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -32,6 +32,8 @@
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "wl1271_conf.h"
36
35#define DRIVER_NAME "wl1271" 37#define DRIVER_NAME "wl1271"
36#define DRIVER_PREFIX DRIVER_NAME ": " 38#define DRIVER_PREFIX DRIVER_NAME ": "
37 39
@@ -97,21 +99,42 @@ enum {
97 } while (0) 99 } while (0)
98 100
99#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ 101#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
100 CFG_BSSID_FILTER_EN) 102 CFG_BSSID_FILTER_EN | \
103 CFG_MC_FILTER_EN)
101 104
102#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ 105#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
103 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ 106 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
104 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
105 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
106 109
110#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
111
107#define WL1271_FW_NAME "wl1271-fw.bin" 112#define WL1271_FW_NAME "wl1271-fw.bin"
108#define WL1271_NVS_NAME "wl1271-nvs.bin" 113#define WL1271_NVS_NAME "wl1271-nvs.bin"
109 114
110#define WL1271_BUSY_WORD_LEN 8 115/*
116 * Enable/disable 802.11a support for WL1273
117 */
118#undef WL1271_80211A_ENABLED
119
120/*
121 * FIXME: for the wl1271, a busy word count of 1 here will result in a more
122 * optimal SPI interface. There is some SPI bug however, causing RXS time outs
123 * with this mode occasionally on boot, so lets have three for now. A value of
124 * three should make sure, that the chipset will always be ready, though this
125 * will impact throughput and latencies slightly.
126 */
127#define WL1271_BUSY_WORD_CNT 3
128#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
111 129
112#define WL1271_ELP_HW_STATE_ASLEEP 0 130#define WL1271_ELP_HW_STATE_ASLEEP 0
113#define WL1271_ELP_HW_STATE_IRQ 1 131#define WL1271_ELP_HW_STATE_IRQ 1
114 132
133#define WL1271_DEFAULT_BEACON_INT 100
134#define WL1271_DEFAULT_DTIM_PERIOD 1
135
136#define ACX_TX_DESCRIPTORS 32
137
115enum wl1271_state { 138enum wl1271_state {
116 WL1271_STATE_OFF, 139 WL1271_STATE_OFF,
117 WL1271_STATE_ON, 140 WL1271_STATE_ON,
@@ -134,6 +157,8 @@ struct wl1271_partition {
134struct wl1271_partition_set { 157struct wl1271_partition_set {
135 struct wl1271_partition mem; 158 struct wl1271_partition mem;
136 struct wl1271_partition reg; 159 struct wl1271_partition reg;
160 struct wl1271_partition mem2;
161 struct wl1271_partition mem3;
137}; 162};
138 163
139struct wl1271; 164struct wl1271;
@@ -258,15 +283,15 @@ struct wl1271_debugfs {
258 283
259/* FW status registers */ 284/* FW status registers */
260struct wl1271_fw_status { 285struct wl1271_fw_status {
261 u32 intr; 286 __le32 intr;
262 u8 fw_rx_counter; 287 u8 fw_rx_counter;
263 u8 drv_rx_counter; 288 u8 drv_rx_counter;
264 u8 reserved; 289 u8 reserved;
265 u8 tx_results_counter; 290 u8 tx_results_counter;
266 u32 rx_pkt_descs[NUM_RX_PKT_DESC]; 291 __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
267 u32 tx_released_blks[NUM_TX_QUEUES]; 292 __le32 tx_released_blks[NUM_TX_QUEUES];
268 u32 fw_localtime; 293 __le32 fw_localtime;
269 u32 padding[2]; 294 __le32 padding[2];
270} __attribute__ ((packed)); 295} __attribute__ ((packed));
271 296
272struct wl1271_rx_mem_pool_addr { 297struct wl1271_rx_mem_pool_addr {
@@ -274,6 +299,15 @@ struct wl1271_rx_mem_pool_addr {
274 u32 addr_extra; 299 u32 addr_extra;
275}; 300};
276 301
302struct wl1271_scan {
303 u8 state;
304 u8 ssid[IW_ESSID_MAX_SIZE+1];
305 size_t ssid_len;
306 u8 active;
307 u8 high_prio;
308 u8 probe_requests;
309};
310
277struct wl1271 { 311struct wl1271 {
278 struct ieee80211_hw *hw; 312 struct ieee80211_hw *hw;
279 bool mac80211_registered; 313 bool mac80211_registered;
@@ -288,10 +322,7 @@ struct wl1271 {
288 enum wl1271_state state; 322 enum wl1271_state state;
289 struct mutex mutex; 323 struct mutex mutex;
290 324
291 int physical_mem_addr; 325 struct wl1271_partition_set part;
292 int physical_reg_addr;
293 int virtual_mem_addr;
294 int virtual_reg_addr;
295 326
296 struct wl1271_chip chip; 327 struct wl1271_chip chip;
297 328
@@ -308,7 +339,6 @@ struct wl1271 {
308 u8 bss_type; 339 u8 bss_type;
309 u8 ssid[IW_ESSID_MAX_SIZE + 1]; 340 u8 ssid[IW_ESSID_MAX_SIZE + 1];
310 u8 ssid_len; 341 u8 ssid_len;
311 u8 listen_int;
312 int channel; 342 int channel;
313 343
314 struct wl1271_acx_mem_map *target_mem_map; 344 struct wl1271_acx_mem_map *target_mem_map;
@@ -332,10 +362,14 @@ struct wl1271 {
332 bool tx_queue_stopped; 362 bool tx_queue_stopped;
333 363
334 struct work_struct tx_work; 364 struct work_struct tx_work;
335 struct work_struct filter_work;
336 365
337 /* Pending TX frames */ 366 /* Pending TX frames */
338 struct sk_buff *tx_frames[16]; 367 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
368
369 /* Security sequence number counters */
370 u8 tx_security_last_seq;
371 u16 tx_security_seq_16;
372 u32 tx_security_seq_32;
339 373
340 /* FW Rx counter */ 374 /* FW Rx counter */
341 u32 rx_counter; 375 u32 rx_counter;
@@ -354,10 +388,17 @@ struct wl1271 {
354 388
355 /* Are we currently scanning */ 389 /* Are we currently scanning */
356 bool scanning; 390 bool scanning;
391 struct wl1271_scan scan;
357 392
358 /* Our association ID */ 393 /* Our association ID */
359 u16 aid; 394 u16 aid;
360 395
396 /* currently configured rate set */
397 u32 basic_rate_set;
398
399 /* The current band */
400 enum ieee80211_band band;
401
361 /* Default key (for WEP) */ 402 /* Default key (for WEP) */
362 u32 default_key; 403 u32 default_key;
363 404
@@ -368,6 +409,7 @@ struct wl1271 {
368 bool elp; 409 bool elp;
369 410
370 struct completion *elp_compl; 411 struct completion *elp_compl;
412 struct delayed_work elp_work;
371 413
372 /* we can be in psm, but not in elp, we have to differentiate */ 414 /* we can be in psm, but not in elp, we have to differentiate */
373 bool psm; 415 bool psm;
@@ -375,6 +417,9 @@ struct wl1271 {
375 /* PSM mode requested */ 417 /* PSM mode requested */
376 bool psm_requested; 418 bool psm_requested;
377 419
420 /* retry counter for PSM entries */
421 u8 psm_entry_retry;
422
378 /* in dBm */ 423 /* in dBm */
379 int power_level; 424 int power_level;
380 425
@@ -383,11 +428,20 @@ struct wl1271 {
383 428
384 u32 buffer_32; 429 u32 buffer_32;
385 u32 buffer_cmd; 430 u32 buffer_cmd;
386 u8 buffer_busyword[WL1271_BUSY_WORD_LEN]; 431 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
387 struct wl1271_rx_descriptor *rx_descriptor;
388 432
389 struct wl1271_fw_status *fw_status; 433 struct wl1271_fw_status *fw_status;
390 struct wl1271_tx_hw_res_if *tx_res_if; 434 struct wl1271_tx_hw_res_if *tx_res_if;
435
436 struct ieee80211_vif *vif;
437
438 /* Used for a workaround to send disconnect before rejoining */
439 bool joined;
440
441 /* Current chipset configuration */
442 struct conf_drv_settings conf;
443
444 struct list_head list;
391}; 445};
392 446
393int wl1271_plt_start(struct wl1271 *wl); 447int wl1271_plt_start(struct wl1271 *wl);
@@ -404,4 +458,13 @@ int wl1271_plt_stop(struct wl1271 *wl);
404/* WL1271 needs a 200ms sleep after power on */ 458/* WL1271 needs a 200ms sleep after power on */
405#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 459#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
406 460
461static inline bool wl1271_11a_enabled(void)
462{
463#ifdef WL1271_80211A_ENABLED
464 return true;
465#else
466 return false;
467#endif
468}
469
407#endif 470#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index f622a4092615..5cc89bbdac7a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -34,8 +34,7 @@
34#include "wl1271_spi.h" 34#include "wl1271_spi.h"
35#include "wl1271_ps.h" 35#include "wl1271_ps.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
38 u8 listen_interval)
39{ 38{
40 struct acx_wake_up_condition *wake_up; 39 struct acx_wake_up_condition *wake_up;
41 int ret; 40 int ret;
@@ -48,8 +47,8 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
48 goto out; 47 goto out;
49 } 48 }
50 49
51 wake_up->wake_up_event = wake_up_event; 50 wake_up->wake_up_event = wl->conf.conn.wake_up_event;
52 wake_up->listen_interval = listen_interval; 51 wake_up->listen_interval = wl->conf.conn.listen_interval;
53 52
54 ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, 53 ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
55 wake_up, sizeof(*wake_up)); 54 wake_up, sizeof(*wake_up));
@@ -137,7 +136,12 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
137 goto out; 136 goto out;
138 } 137 }
139 138
140 acx->current_tx_power = power * 10; 139 /*
140 * FIXME: This is a workaround needed while we don't the correct
141 * calibration, to avoid distortions
142 */
143 /* acx->current_tx_power = power * 10; */
144 acx->current_tx_power = 120;
141 145
142 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 146 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
143 if (ret < 0) { 147 if (ret < 0) {
@@ -193,7 +197,7 @@ int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
193 return 0; 197 return 0;
194} 198}
195 199
196int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time) 200int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl)
197{ 201{
198 struct acx_rx_msdu_lifetime *acx; 202 struct acx_rx_msdu_lifetime *acx;
199 int ret; 203 int ret;
@@ -206,7 +210,7 @@ int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time)
206 goto out; 210 goto out;
207 } 211 }
208 212
209 acx->lifetime = life_time; 213 acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time);
210 ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, 214 ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
211 acx, sizeof(*acx)); 215 acx, sizeof(*acx));
212 if (ret < 0) { 216 if (ret < 0) {
@@ -232,8 +236,8 @@ int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
232 goto out; 236 goto out;
233 } 237 }
234 238
235 rx_config->config_options = config; 239 rx_config->config_options = cpu_to_le32(config);
236 rx_config->filter_options = filter; 240 rx_config->filter_options = cpu_to_le32(filter);
237 241
238 ret = wl1271_cmd_configure(wl, ACX_RX_CFG, 242 ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
239 rx_config, sizeof(*rx_config)); 243 rx_config, sizeof(*rx_config));
@@ -260,7 +264,7 @@ int wl1271_acx_pd_threshold(struct wl1271 *wl)
260 goto out; 264 goto out;
261 } 265 }
262 266
263 /* FIXME: threshold value not set */ 267 pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
264 268
265 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); 269 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
266 if (ret < 0) { 270 if (ret < 0) {
@@ -300,7 +304,8 @@ out:
300 return ret; 304 return ret;
301} 305}
302 306
303int wl1271_acx_group_address_tbl(struct wl1271 *wl) 307int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
308 void *mc_list, u32 mc_list_len)
304{ 309{
305 struct acx_dot11_grp_addr_tbl *acx; 310 struct acx_dot11_grp_addr_tbl *acx;
306 int ret; 311 int ret;
@@ -314,9 +319,9 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl)
314 } 319 }
315 320
316 /* MAC filtering */ 321 /* MAC filtering */
317 acx->enabled = 0; 322 acx->enabled = enable;
318 acx->num_groups = 0; 323 acx->num_groups = mc_list_len;
319 memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN); 324 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
320 325
321 ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, 326 ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
322 acx, sizeof(*acx)); 327 acx, sizeof(*acx));
@@ -343,8 +348,8 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
343 348
344 wl1271_debug(DEBUG_ACX, "acx service period timeout"); 349 wl1271_debug(DEBUG_ACX, "acx service period timeout");
345 350
346 rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF; 351 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
347 rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF; 352 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
348 353
349 ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, 354 ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
350 rx_timeout, sizeof(*rx_timeout)); 355 rx_timeout, sizeof(*rx_timeout));
@@ -372,7 +377,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold)
372 goto out; 377 goto out;
373 } 378 }
374 379
375 rts->threshold = rts_threshold; 380 rts->threshold = cpu_to_le16(rts_threshold);
376 381
377 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); 382 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
378 if (ret < 0) { 383 if (ret < 0) {
@@ -385,20 +390,29 @@ out:
385 return ret; 390 return ret;
386} 391}
387 392
388int wl1271_acx_beacon_filter_opt(struct wl1271 *wl) 393int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
389{ 394{
390 struct acx_beacon_filter_option *beacon_filter; 395 struct acx_beacon_filter_option *beacon_filter = NULL;
391 int ret; 396 int ret = 0;
392 397
393 wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); 398 wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
394 399
400 if (enable_filter &&
401 wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
402 goto out;
403
395 beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); 404 beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
396 if (!beacon_filter) { 405 if (!beacon_filter) {
397 ret = -ENOMEM; 406 ret = -ENOMEM;
398 goto out; 407 goto out;
399 } 408 }
400 409
401 beacon_filter->enable = 0; 410 beacon_filter->enable = enable_filter;
411
412 /*
413 * When set to zero, and the filter is enabled, beacons
414 * without the unicast TIM bit set are dropped.
415 */
402 beacon_filter->max_num_beacons = 0; 416 beacon_filter->max_num_beacons = 0;
403 417
404 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, 418 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -416,7 +430,9 @@ out:
416int wl1271_acx_beacon_filter_table(struct wl1271 *wl) 430int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
417{ 431{
418 struct acx_beacon_filter_ie_table *ie_table; 432 struct acx_beacon_filter_ie_table *ie_table;
433 int i, idx = 0;
419 int ret; 434 int ret;
435 bool vendor_spec = false;
420 436
421 wl1271_debug(DEBUG_ACX, "acx beacon filter table"); 437 wl1271_debug(DEBUG_ACX, "acx beacon filter table");
422 438
@@ -426,8 +442,32 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
426 goto out; 442 goto out;
427 } 443 }
428 444
445 /* configure default beacon pass-through rules */
429 ie_table->num_ie = 0; 446 ie_table->num_ie = 0;
430 memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE); 447 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
448 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
449 ie_table->table[idx++] = r->ie;
450 ie_table->table[idx++] = r->rule;
451
452 if (r->ie == WLAN_EID_VENDOR_SPECIFIC) {
453 /* only one vendor specific ie allowed */
454 if (vendor_spec)
455 continue;
456
457 /* for vendor specific rules configure the
458 additional fields */
459 memcpy(&(ie_table->table[idx]), r->oui,
460 CONF_BCN_IE_OUI_LEN);
461 idx += CONF_BCN_IE_OUI_LEN;
462 ie_table->table[idx++] = r->type;
463 memcpy(&(ie_table->table[idx]), r->version,
464 CONF_BCN_IE_VER_LEN);
465 idx += CONF_BCN_IE_VER_LEN;
466 vendor_spec = true;
467 }
468
469 ie_table->num_ie++;
470 }
431 471
432 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, 472 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
433 ie_table, sizeof(*ie_table)); 473 ie_table, sizeof(*ie_table));
@@ -441,6 +481,36 @@ out:
441 return ret; 481 return ret;
442} 482}
443 483
484int wl1271_acx_conn_monit_params(struct wl1271 *wl)
485{
486 struct acx_conn_monit_params *acx;
487 int ret;
488
489 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
490
491 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
492 if (!acx) {
493 ret = -ENOMEM;
494 goto out;
495 }
496
497 acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
498 acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
499
500 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
501 acx, sizeof(*acx));
502 if (ret < 0) {
503 wl1271_warning("failed to set connection monitor "
504 "parameters: %d", ret);
505 goto out;
506 }
507
508out:
509 kfree(acx);
510 return ret;
511}
512
513
444int wl1271_acx_sg_enable(struct wl1271 *wl) 514int wl1271_acx_sg_enable(struct wl1271 *wl)
445{ 515{
446 struct acx_bt_wlan_coex *pta; 516 struct acx_bt_wlan_coex *pta;
@@ -470,6 +540,7 @@ out:
470int wl1271_acx_sg_cfg(struct wl1271 *wl) 540int wl1271_acx_sg_cfg(struct wl1271 *wl)
471{ 541{
472 struct acx_bt_wlan_coex_param *param; 542 struct acx_bt_wlan_coex_param *param;
543 struct conf_sg_settings *c = &wl->conf.sg;
473 int ret; 544 int ret;
474 545
475 wl1271_debug(DEBUG_ACX, "acx sg cfg"); 546 wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -481,34 +552,19 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
481 } 552 }
482 553
483 /* BT-WLAN coext parameters */ 554 /* BT-WLAN coext parameters */
484 param->min_rate = RATE_INDEX_24MBPS; 555 param->per_threshold = cpu_to_le32(c->per_threshold);
485 param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF; 556 param->max_scan_compensation_time =
486 param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF; 557 cpu_to_le32(c->max_scan_compensation_time);
487 param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF; 558 param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
488 param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF; 559 param->load_ratio = c->load_ratio;
489 param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF; 560 param->auto_ps_mode = c->auto_ps_mode;
490 param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF; 561 param->probe_req_compensation = c->probe_req_compensation;
491 param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF; 562 param->scan_window_compensation = c->scan_window_compensation;
492 param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF; 563 param->antenna_config = c->antenna_config;
493 param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF; 564 param->beacon_miss_threshold = c->beacon_miss_threshold;
494 param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF; 565 param->rate_adaptation_threshold =
495 param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF; 566 cpu_to_le32(c->rate_adaptation_threshold);
496 param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF; 567 param->rate_adaptation_snr = c->rate_adaptation_snr;
497 param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
498 param->antenna_type = PTA_ANTENNA_TYPE_DEF;
499 param->signal_type = PTA_SIGNALING_TYPE_DEF;
500 param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
501 param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
502 param->max_cts = PTA_MAX_NUM_CTS_DEF;
503 param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
504 param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
505 param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
506 param->wlan_elp_hp = PTA_ELP_HP_DEF;
507 param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
508 param->ack_mode_dual_ant = PTA_ACK_MODE_DEF;
509 param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
510 param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
511 param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
512 568
513 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 569 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
514 if (ret < 0) { 570 if (ret < 0) {
@@ -534,8 +590,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
534 goto out; 590 goto out;
535 } 591 }
536 592
537 detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D; 593 detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold);
538 detection->tx_energy_detection = 0; 594 detection->tx_energy_detection = wl->conf.tx.tx_energy_detection;
539 595
540 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 596 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
541 detection, sizeof(*detection)); 597 detection, sizeof(*detection));
@@ -562,10 +618,10 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
562 goto out; 618 goto out;
563 } 619 }
564 620
565 bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE; 621 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
566 bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE; 622 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
567 bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE; 623 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
568 bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF; 624 bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold;
569 625
570 ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); 626 ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
571 if (ret < 0) { 627 if (ret < 0) {
@@ -591,7 +647,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
591 goto out; 647 goto out;
592 } 648 }
593 649
594 acx_aid->aid = aid; 650 acx_aid->aid = cpu_to_le16(aid);
595 651
596 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); 652 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
597 if (ret < 0) { 653 if (ret < 0) {
@@ -618,9 +674,8 @@ int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask)
618 } 674 }
619 675
620 /* high event mask is unused */ 676 /* high event mask is unused */
621 mask->high_event_mask = 0xffffffff; 677 mask->high_event_mask = cpu_to_le32(0xffffffff);
622 678 mask->event_mask = cpu_to_le32(event_mask);
623 mask->event_mask = event_mask;
624 679
625 ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, 680 ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
626 mask, sizeof(*mask)); 681 mask, sizeof(*mask));
@@ -703,9 +758,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
703 return 0; 758 return 0;
704} 759}
705 760
706int wl1271_acx_rate_policies(struct wl1271 *wl) 761int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
707{ 762{
708 struct acx_rate_policy *acx; 763 struct acx_rate_policy *acx;
764 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
709 int ret = 0; 765 int ret = 0;
710 766
711 wl1271_debug(DEBUG_ACX, "acx rate policies"); 767 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -718,11 +774,11 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
718 } 774 }
719 775
720 /* configure one default (one-size-fits-all) rate class */ 776 /* configure one default (one-size-fits-all) rate class */
721 acx->rate_class_cnt = 1; 777 acx->rate_class_cnt = cpu_to_le32(1);
722 acx->rate_class[0].enabled_rates = ACX_RATE_MASK_ALL; 778 acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
723 acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT; 779 acx->rate_class[0].short_retry_limit = c->short_retry_limit;
724 acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT; 780 acx->rate_class[0].long_retry_limit = c->long_retry_limit;
725 acx->rate_class[0].aflags = 0; 781 acx->rate_class[0].aflags = c->aflags;
726 782
727 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 783 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
728 if (ret < 0) { 784 if (ret < 0) {
@@ -749,22 +805,14 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
749 goto out; 805 goto out;
750 } 806 }
751 807
752 /* 808 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
753 * FIXME: Configure each AC with appropriate values (most suitable 809 struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
754 * values will probably be different for each AC. 810 acx->ac = c->ac;
755 */ 811 acx->cw_min = c->cw_min;
756 for (i = 0; i < WL1271_ACX_AC_COUNT; i++) { 812 acx->cw_max = cpu_to_le16(c->cw_max);
757 acx->ac = i; 813 acx->aifsn = c->aifsn;
758
759 /*
760 * FIXME: The following default values originate from
761 * the TI reference driver. What do they mean?
762 */
763 acx->cw_min = 15;
764 acx->cw_max = 63;
765 acx->aifsn = 3;
766 acx->reserved = 0; 814 acx->reserved = 0;
767 acx->tx_op_limit = 0; 815 acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
768 816
769 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); 817 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
770 if (ret < 0) { 818 if (ret < 0) {
@@ -793,12 +841,15 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
793 goto out; 841 goto out;
794 } 842 }
795 843
796 /* FIXME: configure each TID with a different AC reference */ 844 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
797 for (i = 0; i < WL1271_ACX_TID_COUNT; i++) { 845 struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
798 acx->queue_id = i; 846 acx->queue_id = c->queue_id;
799 acx->tsid = WL1271_ACX_AC_BE; 847 acx->channel_type = c->channel_type;
800 acx->ps_scheme = WL1271_ACX_PS_SCHEME_LEGACY; 848 acx->tsid = c->tsid;
801 acx->ack_policy = WL1271_ACX_ACK_POLICY_LEGACY; 849 acx->ps_scheme = c->ps_scheme;
850 acx->ack_policy = c->ack_policy;
851 acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
852 acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
802 853
803 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); 854 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
804 if (ret < 0) { 855 if (ret < 0) {
@@ -826,7 +877,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
826 goto out; 877 goto out;
827 } 878 }
828 879
829 acx->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 880 acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold);
830 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); 881 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
831 if (ret < 0) { 882 if (ret < 0) {
832 wl1271_warning("Setting of frag threshold failed: %d", ret); 883 wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -852,8 +903,8 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl)
852 goto out; 903 goto out;
853 } 904 }
854 905
855 acx->tx_compl_timeout = WL1271_ACX_TX_COMPL_TIMEOUT; 906 acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout);
856 acx->tx_compl_threshold = WL1271_ACX_TX_COMPL_THRESHOLD; 907 acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold);
857 ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); 908 ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx));
858 if (ret < 0) { 909 if (ret < 0) {
859 wl1271_warning("Setting of tx options failed: %d", ret); 910 wl1271_warning("Setting of tx options failed: %d", ret);
@@ -879,11 +930,11 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
879 } 930 }
880 931
881 /* memory config */ 932 /* memory config */
882 mem_conf->num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS); 933 mem_conf->num_stations = DEFAULT_NUM_STATIONS;
883 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; 934 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
884 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; 935 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
885 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; 936 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
886 mem_conf->total_tx_descriptors = ACX_TX_DESCRIPTORS; 937 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
887 938
888 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 939 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
889 sizeof(*mem_conf)); 940 sizeof(*mem_conf));
@@ -906,7 +957,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
906 return ret; 957 return ret;
907 958
908 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), 959 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
909 GFP_KERNEL); 960 GFP_KERNEL);
910 if (!wl->target_mem_map) { 961 if (!wl->target_mem_map) {
911 wl1271_error("couldn't allocate target memory map"); 962 wl1271_error("couldn't allocate target memory map");
912 return -ENOMEM; 963 return -ENOMEM;
@@ -923,7 +974,8 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
923 } 974 }
924 975
925 /* initialize TX block book keeping */ 976 /* initialize TX block book keeping */
926 wl->tx_blocks_available = wl->target_mem_map->num_tx_mem_blocks; 977 wl->tx_blocks_available =
978 le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks);
927 wl1271_debug(DEBUG_TX, "available tx blocks: %d", 979 wl1271_debug(DEBUG_TX, "available tx blocks: %d",
928 wl->tx_blocks_available); 980 wl->tx_blocks_available);
929 981
@@ -943,10 +995,10 @@ int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
943 goto out; 995 goto out;
944 } 996 }
945 997
946 rx_conf->threshold = WL1271_RX_INTR_THRESHOLD_DEF; 998 rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold);
947 rx_conf->timeout = WL1271_RX_INTR_TIMEOUT_DEF; 999 rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout);
948 rx_conf->mblk_threshold = USHORT_MAX; /* Disabled */ 1000 rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold);
949 rx_conf->queue_type = RX_QUEUE_TYPE_RX_LOW_PRIORITY; 1001 rx_conf->queue_type = wl->conf.rx.queue_type;
950 1002
951 ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, 1003 ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf,
952 sizeof(*rx_conf)); 1004 sizeof(*rx_conf));
@@ -959,3 +1011,124 @@ out:
959 kfree(rx_conf); 1011 kfree(rx_conf);
960 return ret; 1012 return ret;
961} 1013}
1014
1015int wl1271_acx_smart_reflex(struct wl1271 *wl)
1016{
1017 struct acx_smart_reflex_state *sr_state = NULL;
1018 struct acx_smart_reflex_config_params *sr_param = NULL;
1019 int i, ret;
1020
1021 wl1271_debug(DEBUG_ACX, "acx smart reflex");
1022
1023 sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
1024 if (!sr_param) {
1025 ret = -ENOMEM;
1026 goto out;
1027 }
1028
1029 for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
1030 struct conf_mart_reflex_err_table *e =
1031 &(wl->conf.init.sr_err_tbl[i]);
1032
1033 sr_param->error_table[i].len = e->len;
1034 sr_param->error_table[i].upper_limit = e->upper_limit;
1035 memcpy(sr_param->error_table[i].values, e->values, e->len);
1036 }
1037
1038 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
1039 sr_param, sizeof(*sr_param));
1040 if (ret < 0) {
1041 wl1271_warning("failed to set smart reflex params: %d", ret);
1042 goto out;
1043 }
1044
1045 sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
1046 if (!sr_state) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 /* enable smart reflex */
1052 sr_state->enable = wl->conf.init.sr_enable;
1053
1054 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
1055 sr_state, sizeof(*sr_state));
1056 if (ret < 0) {
1057 wl1271_warning("failed to set smart reflex params: %d", ret);
1058 goto out;
1059 }
1060
1061out:
1062 kfree(sr_state);
1063 kfree(sr_param);
1064 return ret;
1065
1066}
1067
1068int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1069{
1070 struct wl1271_acx_bet_enable *acx = NULL;
1071 int ret = 0;
1072
1073 wl1271_debug(DEBUG_ACX, "acx bet enable");
1074
1075 if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE)
1076 goto out;
1077
1078 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1079 if (!acx) {
1080 ret = -ENOMEM;
1081 goto out;
1082 }
1083
1084 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
1085 acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
1086
1087 ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
1088 if (ret < 0) {
1089 wl1271_warning("acx bet enable failed: %d", ret);
1090 goto out;
1091 }
1092
1093out:
1094 kfree(acx);
1095 return ret;
1096}
1097
1098int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1099 u8 version)
1100{
1101 struct wl1271_acx_arp_filter *acx;
1102 int ret;
1103
1104 wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
1105
1106 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1107 if (!acx) {
1108 ret = -ENOMEM;
1109 goto out;
1110 }
1111
1112 acx->version = version;
1113 acx->enable = enable;
1114
1115 if (enable == true) {
1116 if (version == ACX_IPV4_VERSION)
1117 memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE);
1118 else if (version == ACX_IPV6_VERSION)
1119 memcpy(acx->address, address, sizeof(acx->address));
1120 else
1121 wl1271_error("Invalid IP version");
1122 }
1123
1124 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
1125 acx, sizeof(*acx));
1126 if (ret < 0) {
1127 wl1271_warning("failed to set arp ip filter: %d", ret);
1128 goto out;
1129 }
1130
1131out:
1132 kfree(acx);
1133 return ret;
1134}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 9068daaf0ddf..2ce0a8128542 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -61,8 +61,9 @@
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 61 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 62 WL1271_ACX_INTR_DATA)
63 63
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ 64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \
65 WL1271_ACX_INTR_EVENT_B | \ 65 WL1271_ACX_INTR_EVENT_B | \
66 WL1271_ACX_INTR_HW_AVAILABLE | \
66 WL1271_ACX_INTR_DATA) 67 WL1271_ACX_INTR_DATA)
67 68
68/* Target's information element */ 69/* Target's information element */
@@ -70,11 +71,11 @@ struct acx_header {
70 struct wl1271_cmd_header cmd; 71 struct wl1271_cmd_header cmd;
71 72
72 /* acx (or information element) header */ 73 /* acx (or information element) header */
73 u16 id; 74 __le16 id;
74 75
75 /* payload length (not including headers */ 76 /* payload length (not including headers */
76 u16 len; 77 __le16 len;
77}; 78} __attribute__ ((packed));
78 79
79struct acx_error_counter { 80struct acx_error_counter {
80 struct acx_header header; 81 struct acx_header header;
@@ -82,21 +83,21 @@ struct acx_error_counter {
82 /* The number of PLCP errors since the last time this */ 83 /* The number of PLCP errors since the last time this */
83 /* information element was interrogated. This field is */ 84 /* information element was interrogated. This field is */
84 /* automatically cleared when it is interrogated.*/ 85 /* automatically cleared when it is interrogated.*/
85 u32 PLCP_error; 86 __le32 PLCP_error;
86 87
87 /* The number of FCS errors since the last time this */ 88 /* The number of FCS errors since the last time this */
88 /* information element was interrogated. This field is */ 89 /* information element was interrogated. This field is */
89 /* automatically cleared when it is interrogated.*/ 90 /* automatically cleared when it is interrogated.*/
90 u32 FCS_error; 91 __le32 FCS_error;
91 92
92 /* The number of MPDUs without PLCP header errors received*/ 93 /* The number of MPDUs without PLCP header errors received*/
93 /* since the last time this information element was interrogated. */ 94 /* since the last time this information element was interrogated. */
94 /* This field is automatically cleared when it is interrogated.*/ 95 /* This field is automatically cleared when it is interrogated.*/
95 u32 valid_frame; 96 __le32 valid_frame;
96 97
97 /* the number of missed sequence numbers in the squentially */ 98 /* the number of missed sequence numbers in the squentially */
98 /* values of frames seq numbers */ 99 /* values of frames seq numbers */
99 u32 seq_num_miss; 100 __le32 seq_num_miss;
100} __attribute__ ((packed)); 101} __attribute__ ((packed));
101 102
102struct acx_revision { 103struct acx_revision {
@@ -125,7 +126,7 @@ struct acx_revision {
125 * (1 = first spin, 2 = second spin, and so on). 126 * (1 = first spin, 2 = second spin, and so on).
126 * bits 24 - 31: Chip ID - The WiLink chip ID. 127 * bits 24 - 31: Chip ID - The WiLink chip ID.
127 */ 128 */
128 u32 hw_version; 129 __le32 hw_version;
129} __attribute__ ((packed)); 130} __attribute__ ((packed));
130 131
131enum wl1271_psm_mode { 132enum wl1271_psm_mode {
@@ -170,7 +171,6 @@ enum {
170#define DP_RX_PACKET_RING_CHUNK_NUM 2 171#define DP_RX_PACKET_RING_CHUNK_NUM 2
171#define DP_TX_PACKET_RING_CHUNK_NUM 2 172#define DP_TX_PACKET_RING_CHUNK_NUM 2
172#define DP_TX_COMPLETE_TIME_OUT 20 173#define DP_TX_COMPLETE_TIME_OUT 20
173#define FW_TX_CMPLT_BLOCK_SIZE 16
174 174
175#define TX_MSDU_LIFETIME_MIN 0 175#define TX_MSDU_LIFETIME_MIN 0
176#define TX_MSDU_LIFETIME_MAX 3000 176#define TX_MSDU_LIFETIME_MAX 3000
@@ -186,7 +186,7 @@ struct acx_rx_msdu_lifetime {
186 * The maximum amount of time, in TU, before the 186 * The maximum amount of time, in TU, before the
187 * firmware discards the MSDU. 187 * firmware discards the MSDU.
188 */ 188 */
189 u32 lifetime; 189 __le32 lifetime;
190} __attribute__ ((packed)); 190} __attribute__ ((packed));
191 191
192/* 192/*
@@ -273,14 +273,14 @@ struct acx_rx_msdu_lifetime {
273struct acx_rx_config { 273struct acx_rx_config {
274 struct acx_header header; 274 struct acx_header header;
275 275
276 u32 config_options; 276 __le32 config_options;
277 u32 filter_options; 277 __le32 filter_options;
278} __attribute__ ((packed)); 278} __attribute__ ((packed));
279 279
280struct acx_packet_detection { 280struct acx_packet_detection {
281 struct acx_header header; 281 struct acx_header header;
282 282
283 u32 threshold; 283 __le32 threshold;
284} __attribute__ ((packed)); 284} __attribute__ ((packed));
285 285
286 286
@@ -302,8 +302,8 @@ struct acx_slot {
302} __attribute__ ((packed)); 302} __attribute__ ((packed));
303 303
304 304
305#define ADDRESS_GROUP_MAX (8) 305#define ACX_MC_ADDRESS_GROUP_MAX (8)
306#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX) 306#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
307 307
308struct acx_dot11_grp_addr_tbl { 308struct acx_dot11_grp_addr_tbl {
309 struct acx_header header; 309 struct acx_header header;
@@ -314,40 +314,17 @@ struct acx_dot11_grp_addr_tbl {
314 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 314 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
315} __attribute__ ((packed)); 315} __attribute__ ((packed));
316 316
317
318#define RX_TIMEOUT_PS_POLL_MIN 0
319#define RX_TIMEOUT_PS_POLL_MAX (200000)
320#define RX_TIMEOUT_PS_POLL_DEF (15)
321#define RX_TIMEOUT_UPSD_MIN 0
322#define RX_TIMEOUT_UPSD_MAX (200000)
323#define RX_TIMEOUT_UPSD_DEF (15)
324
325struct acx_rx_timeout { 317struct acx_rx_timeout {
326 struct acx_header header; 318 struct acx_header header;
327 319
328 /* 320 __le16 ps_poll_timeout;
329 * The longest time the STA will wait to receive 321 __le16 upsd_timeout;
330 * traffic from the AP after a PS-poll has been
331 * transmitted.
332 */
333 u16 ps_poll_timeout;
334
335 /*
336 * The longest time the STA will wait to receive
337 * traffic from the AP after a frame has been sent
338 * from an UPSD enabled queue.
339 */
340 u16 upsd_timeout;
341} __attribute__ ((packed)); 322} __attribute__ ((packed));
342 323
343#define RTS_THRESHOLD_MIN 0
344#define RTS_THRESHOLD_MAX 4096
345#define RTS_THRESHOLD_DEF 2347
346
347struct acx_rts_threshold { 324struct acx_rts_threshold {
348 struct acx_header header; 325 struct acx_header header;
349 326
350 u16 threshold; 327 __le16 threshold;
351 u8 pad[2]; 328 u8 pad[2];
352} __attribute__ ((packed)); 329} __attribute__ ((packed));
353 330
@@ -408,6 +385,13 @@ struct acx_beacon_filter_ie_table {
408 u8 pad[3]; 385 u8 pad[3];
409} __attribute__ ((packed)); 386} __attribute__ ((packed));
410 387
388struct acx_conn_monit_params {
389 struct acx_header header;
390
391 __le32 synch_fail_thold; /* number of beacons missed */
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed));
394
411enum { 395enum {
412 SG_ENABLE = 0, 396 SG_ENABLE = 0,
413 SG_DISABLE, 397 SG_DISABLE,
@@ -431,6 +415,25 @@ struct acx_bt_wlan_coex {
431 u8 pad[3]; 415 u8 pad[3];
432} __attribute__ ((packed)); 416} __attribute__ ((packed));
433 417
418struct acx_smart_reflex_state {
419 struct acx_header header;
420
421 u8 enable;
422 u8 padding[3];
423} __attribute__ ((packed));
424
425struct smart_reflex_err_table {
426 u8 len;
427 s8 upper_limit;
428 s8 values[14];
429} __attribute__ ((packed));
430
431struct acx_smart_reflex_config_params {
432 struct acx_header header;
433
434 struct smart_reflex_err_table error_table[3];
435} __attribute__ ((packed));
436
434#define PTA_ANTENNA_TYPE_DEF (0) 437#define PTA_ANTENNA_TYPE_DEF (0)
435#define PTA_BT_HP_MAXTIME_DEF (2000) 438#define PTA_BT_HP_MAXTIME_DEF (2000)
436#define PTA_WLAN_HP_MAX_TIME_DEF (5000) 439#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
@@ -463,150 +466,34 @@ struct acx_bt_wlan_coex {
463struct acx_bt_wlan_coex_param { 466struct acx_bt_wlan_coex_param {
464 struct acx_header header; 467 struct acx_header header;
465 468
466 /* 469 __le32 per_threshold;
467 * The minimum rate of a received WLAN packet in the STA, 470 __le32 max_scan_compensation_time;
468 * during protective mode, of which a new BT-HP request 471 __le16 nfs_sample_interval;
469 * during this Rx will always be respected and gain the antenna. 472 u8 load_ratio;
470 */ 473 u8 auto_ps_mode;
471 u32 min_rate; 474 u8 probe_req_compensation;
472 475 u8 scan_window_compensation;
473 /* Max time the BT HP will be respected. */ 476 u8 antenna_config;
474 u16 bt_hp_max_time; 477 u8 beacon_miss_threshold;
475 478 __le32 rate_adaptation_threshold;
476 /* Max time the WLAN HP will be respected. */ 479 s8 rate_adaptation_snr;
477 u16 wlan_hp_max_time; 480 u8 padding[3];
478
479 /*
480 * The time between the last BT activity
481 * and the moment when the sense mode returns
482 * to SENSE_INACTIVE.
483 */
484 u16 sense_disable_timer;
485
486 /* Time before the next BT HP instance */
487 u16 rx_time_bt_hp;
488 u16 tx_time_bt_hp;
489
490 /* range: 10-20000 default: 1500 */
491 u16 rx_time_bt_hp_fast;
492 u16 tx_time_bt_hp_fast;
493
494 /* range: 2000-65535 default: 8700 */
495 u16 wlan_cycle_fast;
496
497 /* range: 0 - 15000 (Msec) default: 1000 */
498 u16 bt_anti_starvation_period;
499
500 /* range 400-10000(Usec) default: 3000 */
501 u16 next_bt_lp_packet;
502
503 /* Deafult: worst case for BT DH5 traffic */
504 u16 wake_up_beacon;
505
506 /* range: 0-50000(Usec) default: 1050 */
507 u16 hp_dm_max_guard_time;
508
509 /*
510 * This is to prevent both BT & WLAN antenna
511 * starvation.
512 * Range: 100-50000(Usec) default:2550
513 */
514 u16 next_wlan_packet;
515
516 /* 0 -> shared antenna */
517 u8 antenna_type;
518
519 /*
520 * 0 -> TI legacy
521 * 1 -> Palau
522 */
523 u8 signal_type;
524
525 /*
526 * BT AFH status
527 * 0 -> no AFH
528 * 1 -> from dedicated GPIO
529 * 2 -> AFH on (from host)
530 */
531 u8 afh_leverage_on;
532
533 /*
534 * The number of cycles during which no
535 * TX will be sent after 1 cycle of RX
536 * transaction in protective mode
537 */
538 u8 quiet_cycle_num;
539
540 /*
541 * The maximum number of CTSs that will
542 * be sent for receiving RX packet in
543 * protective mode
544 */
545 u8 max_cts;
546
547 /*
548 * The number of WLAN packets
549 * transferred in common mode before
550 * switching to BT.
551 */
552 u8 wlan_packets_num;
553
554 /*
555 * The number of BT packets
556 * transferred in common mode before
557 * switching to WLAN.
558 */
559 u8 bt_packets_num;
560
561 /* range: 1-255 default: 5 */
562 u8 missed_rx_avalanche;
563
564 /* range: 0-1 default: 1 */
565 u8 wlan_elp_hp;
566
567 /* range: 0 - 15 default: 4 */
568 u8 bt_anti_starvation_cycles;
569
570 u8 ack_mode_dual_ant;
571
572 /*
573 * Allow PA_SD assertion/de-assertion
574 * during enabled BT activity.
575 */
576 u8 pa_sd_enable;
577
578 /*
579 * Enable/Disable PTA in auto mode:
580 * Support Both Active & P.S modes
581 */
582 u8 pta_auto_mode_enable;
583
584 /* range: 0 - 20 default: 1 */
585 u8 bt_hp_respected_num;
586} __attribute__ ((packed)); 481} __attribute__ ((packed));
587 482
588#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
589#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
590
591struct acx_energy_detection { 483struct acx_energy_detection {
592 struct acx_header header; 484 struct acx_header header;
593 485
594 /* The RX Clear Channel Assessment threshold in the PHY */ 486 /* The RX Clear Channel Assessment threshold in the PHY */
595 u16 rx_cca_threshold; 487 __le16 rx_cca_threshold;
596 u8 tx_energy_detection; 488 u8 tx_energy_detection;
597 u8 pad; 489 u8 pad;
598} __attribute__ ((packed)); 490} __attribute__ ((packed));
599 491
600#define BCN_RX_TIMEOUT_DEF_VALUE 10000
601#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
602#define RX_BROADCAST_IN_PS_DEF_VALUE 1
603#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4
604
605struct acx_beacon_broadcast { 492struct acx_beacon_broadcast {
606 struct acx_header header; 493 struct acx_header header;
607 494
608 u16 beacon_rx_timeout; 495 __le16 beacon_rx_timeout;
609 u16 broadcast_timeout; 496 __le16 broadcast_timeout;
610 497
611 /* Enables receiving of broadcast packets in PS mode */ 498 /* Enables receiving of broadcast packets in PS mode */
612 u8 rx_broadcast_in_ps; 499 u8 rx_broadcast_in_ps;
@@ -619,8 +506,8 @@ struct acx_beacon_broadcast {
619struct acx_event_mask { 506struct acx_event_mask {
620 struct acx_header header; 507 struct acx_header header;
621 508
622 u32 event_mask; 509 __le32 event_mask;
623 u32 high_event_mask; /* Unused */ 510 __le32 high_event_mask; /* Unused */
624} __attribute__ ((packed)); 511} __attribute__ ((packed));
625 512
626#define CFG_RX_FCS BIT(2) 513#define CFG_RX_FCS BIT(2)
@@ -657,11 +544,15 @@ struct acx_event_mask {
657#define SCAN_TRIGGERED BIT(2) 544#define SCAN_TRIGGERED BIT(2)
658#define SCAN_PRIORITY_HIGH BIT(3) 545#define SCAN_PRIORITY_HIGH BIT(3)
659 546
547/* When set, disable HW encryption */
548#define DF_ENCRYPTION_DISABLE 0x01
549#define DF_SNIFF_MODE_ENABLE 0x80
550
660struct acx_feature_config { 551struct acx_feature_config {
661 struct acx_header header; 552 struct acx_header header;
662 553
663 u32 options; 554 __le32 options;
664 u32 data_flow_options; 555 __le32 data_flow_options;
665} __attribute__ ((packed)); 556} __attribute__ ((packed));
666 557
667struct acx_current_tx_power { 558struct acx_current_tx_power {
@@ -671,14 +562,6 @@ struct acx_current_tx_power {
671 u8 padding[3]; 562 u8 padding[3];
672} __attribute__ ((packed)); 563} __attribute__ ((packed));
673 564
674enum acx_wake_up_event {
675 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
676 WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/
677 WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */
678 WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */
679 WAKE_UP_EVENT_BITS_MASK = 0x0F
680};
681
682struct acx_wake_up_condition { 565struct acx_wake_up_condition {
683 struct acx_header header; 566 struct acx_header header;
684 567
@@ -693,7 +576,7 @@ struct acx_aid {
693 /* 576 /*
694 * To be set when associated with an AP. 577 * To be set when associated with an AP.
695 */ 578 */
696 u16 aid; 579 __le16 aid;
697 u8 pad[2]; 580 u8 pad[2];
698} __attribute__ ((packed)); 581} __attribute__ ((packed));
699 582
@@ -725,152 +608,152 @@ struct acx_ctsprotect {
725} __attribute__ ((packed)); 608} __attribute__ ((packed));
726 609
727struct acx_tx_statistics { 610struct acx_tx_statistics {
728 u32 internal_desc_overflow; 611 __le32 internal_desc_overflow;
729} __attribute__ ((packed)); 612} __attribute__ ((packed));
730 613
731struct acx_rx_statistics { 614struct acx_rx_statistics {
732 u32 out_of_mem; 615 __le32 out_of_mem;
733 u32 hdr_overflow; 616 __le32 hdr_overflow;
734 u32 hw_stuck; 617 __le32 hw_stuck;
735 u32 dropped; 618 __le32 dropped;
736 u32 fcs_err; 619 __le32 fcs_err;
737 u32 xfr_hint_trig; 620 __le32 xfr_hint_trig;
738 u32 path_reset; 621 __le32 path_reset;
739 u32 reset_counter; 622 __le32 reset_counter;
740} __attribute__ ((packed)); 623} __attribute__ ((packed));
741 624
742struct acx_dma_statistics { 625struct acx_dma_statistics {
743 u32 rx_requested; 626 __le32 rx_requested;
744 u32 rx_errors; 627 __le32 rx_errors;
745 u32 tx_requested; 628 __le32 tx_requested;
746 u32 tx_errors; 629 __le32 tx_errors;
747} __attribute__ ((packed)); 630} __attribute__ ((packed));
748 631
749struct acx_isr_statistics { 632struct acx_isr_statistics {
750 /* host command complete */ 633 /* host command complete */
751 u32 cmd_cmplt; 634 __le32 cmd_cmplt;
752 635
753 /* fiqisr() */ 636 /* fiqisr() */
754 u32 fiqs; 637 __le32 fiqs;
755 638
756 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ 639 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
757 u32 rx_headers; 640 __le32 rx_headers;
758 641
759 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ 642 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
760 u32 rx_completes; 643 __le32 rx_completes;
761 644
762 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ 645 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
763 u32 rx_mem_overflow; 646 __le32 rx_mem_overflow;
764 647
765 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ 648 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
766 u32 rx_rdys; 649 __le32 rx_rdys;
767 650
768 /* irqisr() */ 651 /* irqisr() */
769 u32 irqs; 652 __le32 irqs;
770 653
771 /* (INT_STS_ND & INT_TRIG_TX_PROC) */ 654 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
772 u32 tx_procs; 655 __le32 tx_procs;
773 656
774 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ 657 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
775 u32 decrypt_done; 658 __le32 decrypt_done;
776 659
777 /* (INT_STS_ND & INT_TRIG_DMA0) */ 660 /* (INT_STS_ND & INT_TRIG_DMA0) */
778 u32 dma0_done; 661 __le32 dma0_done;
779 662
780 /* (INT_STS_ND & INT_TRIG_DMA1) */ 663 /* (INT_STS_ND & INT_TRIG_DMA1) */
781 u32 dma1_done; 664 __le32 dma1_done;
782 665
783 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ 666 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
784 u32 tx_exch_complete; 667 __le32 tx_exch_complete;
785 668
786 /* (INT_STS_ND & INT_TRIG_COMMAND) */ 669 /* (INT_STS_ND & INT_TRIG_COMMAND) */
787 u32 commands; 670 __le32 commands;
788 671
789 /* (INT_STS_ND & INT_TRIG_RX_PROC) */ 672 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
790 u32 rx_procs; 673 __le32 rx_procs;
791 674
792 /* (INT_STS_ND & INT_TRIG_PM_802) */ 675 /* (INT_STS_ND & INT_TRIG_PM_802) */
793 u32 hw_pm_mode_changes; 676 __le32 hw_pm_mode_changes;
794 677
795 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ 678 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
796 u32 host_acknowledges; 679 __le32 host_acknowledges;
797 680
798 /* (INT_STS_ND & INT_TRIG_PM_PCI) */ 681 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
799 u32 pci_pm; 682 __le32 pci_pm;
800 683
801 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ 684 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
802 u32 wakeups; 685 __le32 wakeups;
803 686
804 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 687 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
805 u32 low_rssi; 688 __le32 low_rssi;
806} __attribute__ ((packed)); 689} __attribute__ ((packed));
807 690
808struct acx_wep_statistics { 691struct acx_wep_statistics {
809 /* WEP address keys configured */ 692 /* WEP address keys configured */
810 u32 addr_key_count; 693 __le32 addr_key_count;
811 694
812 /* default keys configured */ 695 /* default keys configured */
813 u32 default_key_count; 696 __le32 default_key_count;
814 697
815 u32 reserved; 698 __le32 reserved;
816 699
817 /* number of times that WEP key not found on lookup */ 700 /* number of times that WEP key not found on lookup */
818 u32 key_not_found; 701 __le32 key_not_found;
819 702
820 /* number of times that WEP key decryption failed */ 703 /* number of times that WEP key decryption failed */
821 u32 decrypt_fail; 704 __le32 decrypt_fail;
822 705
823 /* WEP packets decrypted */ 706 /* WEP packets decrypted */
824 u32 packets; 707 __le32 packets;
825 708
826 /* WEP decrypt interrupts */ 709 /* WEP decrypt interrupts */
827 u32 interrupt; 710 __le32 interrupt;
828} __attribute__ ((packed)); 711} __attribute__ ((packed));
829 712
830#define ACX_MISSED_BEACONS_SPREAD 10 713#define ACX_MISSED_BEACONS_SPREAD 10
831 714
832struct acx_pwr_statistics { 715struct acx_pwr_statistics {
833 /* the amount of enters into power save mode (both PD & ELP) */ 716 /* the amount of enters into power save mode (both PD & ELP) */
834 u32 ps_enter; 717 __le32 ps_enter;
835 718
836 /* the amount of enters into ELP mode */ 719 /* the amount of enters into ELP mode */
837 u32 elp_enter; 720 __le32 elp_enter;
838 721
839 /* the amount of missing beacon interrupts to the host */ 722 /* the amount of missing beacon interrupts to the host */
840 u32 missing_bcns; 723 __le32 missing_bcns;
841 724
842 /* the amount of wake on host-access times */ 725 /* the amount of wake on host-access times */
843 u32 wake_on_host; 726 __le32 wake_on_host;
844 727
845 /* the amount of wake on timer-expire */ 728 /* the amount of wake on timer-expire */
846 u32 wake_on_timer_exp; 729 __le32 wake_on_timer_exp;
847 730
848 /* the number of packets that were transmitted with PS bit set */ 731 /* the number of packets that were transmitted with PS bit set */
849 u32 tx_with_ps; 732 __le32 tx_with_ps;
850 733
851 /* the number of packets that were transmitted with PS bit clear */ 734 /* the number of packets that were transmitted with PS bit clear */
852 u32 tx_without_ps; 735 __le32 tx_without_ps;
853 736
854 /* the number of received beacons */ 737 /* the number of received beacons */
855 u32 rcvd_beacons; 738 __le32 rcvd_beacons;
856 739
857 /* the number of entering into PowerOn (power save off) */ 740 /* the number of entering into PowerOn (power save off) */
858 u32 power_save_off; 741 __le32 power_save_off;
859 742
860 /* the number of entries into power save mode */ 743 /* the number of entries into power save mode */
861 u16 enable_ps; 744 __le16 enable_ps;
862 745
863 /* 746 /*
864 * the number of exits from power save, not including failed PS 747 * the number of exits from power save, not including failed PS
865 * transitions 748 * transitions
866 */ 749 */
867 u16 disable_ps; 750 __le16 disable_ps;
868 751
869 /* 752 /*
870 * the number of times the TSF counter was adjusted because 753 * the number of times the TSF counter was adjusted because
871 * of drift 754 * of drift
872 */ 755 */
873 u32 fix_tsf_ps; 756 __le32 fix_tsf_ps;
874 757
875 /* Gives statistics about the spread continuous missed beacons. 758 /* Gives statistics about the spread continuous missed beacons.
876 * The 16 LSB are dedicated for the PS mode. 759 * The 16 LSB are dedicated for the PS mode.
@@ -881,53 +764,53 @@ struct acx_pwr_statistics {
881 * ... 764 * ...
882 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. 765 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
883 */ 766 */
884 u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; 767 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
885 768
886 /* the number of beacons in awake mode */ 769 /* the number of beacons in awake mode */
887 u32 rcvd_awake_beacons; 770 __le32 rcvd_awake_beacons;
888} __attribute__ ((packed)); 771} __attribute__ ((packed));
889 772
890struct acx_mic_statistics { 773struct acx_mic_statistics {
891 u32 rx_pkts; 774 __le32 rx_pkts;
892 u32 calc_failure; 775 __le32 calc_failure;
893} __attribute__ ((packed)); 776} __attribute__ ((packed));
894 777
895struct acx_aes_statistics { 778struct acx_aes_statistics {
896 u32 encrypt_fail; 779 __le32 encrypt_fail;
897 u32 decrypt_fail; 780 __le32 decrypt_fail;
898 u32 encrypt_packets; 781 __le32 encrypt_packets;
899 u32 decrypt_packets; 782 __le32 decrypt_packets;
900 u32 encrypt_interrupt; 783 __le32 encrypt_interrupt;
901 u32 decrypt_interrupt; 784 __le32 decrypt_interrupt;
902} __attribute__ ((packed)); 785} __attribute__ ((packed));
903 786
904struct acx_event_statistics { 787struct acx_event_statistics {
905 u32 heart_beat; 788 __le32 heart_beat;
906 u32 calibration; 789 __le32 calibration;
907 u32 rx_mismatch; 790 __le32 rx_mismatch;
908 u32 rx_mem_empty; 791 __le32 rx_mem_empty;
909 u32 rx_pool; 792 __le32 rx_pool;
910 u32 oom_late; 793 __le32 oom_late;
911 u32 phy_transmit_error; 794 __le32 phy_transmit_error;
912 u32 tx_stuck; 795 __le32 tx_stuck;
913} __attribute__ ((packed)); 796} __attribute__ ((packed));
914 797
915struct acx_ps_statistics { 798struct acx_ps_statistics {
916 u32 pspoll_timeouts; 799 __le32 pspoll_timeouts;
917 u32 upsd_timeouts; 800 __le32 upsd_timeouts;
918 u32 upsd_max_sptime; 801 __le32 upsd_max_sptime;
919 u32 upsd_max_apturn; 802 __le32 upsd_max_apturn;
920 u32 pspoll_max_apturn; 803 __le32 pspoll_max_apturn;
921 u32 pspoll_utilization; 804 __le32 pspoll_utilization;
922 u32 upsd_utilization; 805 __le32 upsd_utilization;
923} __attribute__ ((packed)); 806} __attribute__ ((packed));
924 807
925struct acx_rxpipe_statistics { 808struct acx_rxpipe_statistics {
926 u32 rx_prep_beacon_drop; 809 __le32 rx_prep_beacon_drop;
927 u32 descr_host_int_trig_rx_data; 810 __le32 descr_host_int_trig_rx_data;
928 u32 beacon_buffer_thres_host_int_trig_rx_data; 811 __le32 beacon_buffer_thres_host_int_trig_rx_data;
929 u32 missed_beacon_host_int_trig_rx_data; 812 __le32 missed_beacon_host_int_trig_rx_data;
930 u32 tx_xfr_host_int_trig_rx_data; 813 __le32 tx_xfr_host_int_trig_rx_data;
931} __attribute__ ((packed)); 814} __attribute__ ((packed));
932 815
933struct acx_statistics { 816struct acx_statistics {
@@ -946,13 +829,8 @@ struct acx_statistics {
946 struct acx_rxpipe_statistics rxpipe; 829 struct acx_rxpipe_statistics rxpipe;
947} __attribute__ ((packed)); 830} __attribute__ ((packed));
948 831
949#define ACX_MAX_RATE_CLASSES 8
950#define ACX_RATE_MASK_UNSPECIFIED 0
951#define ACX_RATE_MASK_ALL 0x1eff
952#define ACX_RATE_RETRY_LIMIT 10
953
954struct acx_rate_class { 832struct acx_rate_class {
955 u32 enabled_rates; 833 __le32 enabled_rates;
956 u8 short_retry_limit; 834 u8 short_retry_limit;
957 u8 long_retry_limit; 835 u8 long_retry_limit;
958 u8 aflags; 836 u8 aflags;
@@ -962,47 +840,20 @@ struct acx_rate_class {
962struct acx_rate_policy { 840struct acx_rate_policy {
963 struct acx_header header; 841 struct acx_header header;
964 842
965 u32 rate_class_cnt; 843 __le32 rate_class_cnt;
966 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; 844 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
967} __attribute__ ((packed)); 845} __attribute__ ((packed));
968 846
969#define WL1271_ACX_AC_COUNT 4
970
971struct acx_ac_cfg { 847struct acx_ac_cfg {
972 struct acx_header header; 848 struct acx_header header;
973 u8 ac; 849 u8 ac;
974 u8 cw_min; 850 u8 cw_min;
975 u16 cw_max; 851 __le16 cw_max;
976 u8 aifsn; 852 u8 aifsn;
977 u8 reserved; 853 u8 reserved;
978 u16 tx_op_limit; 854 __le16 tx_op_limit;
979} __attribute__ ((packed)); 855} __attribute__ ((packed));
980 856
981enum wl1271_acx_ac {
982 WL1271_ACX_AC_BE = 0,
983 WL1271_ACX_AC_BK = 1,
984 WL1271_ACX_AC_VI = 2,
985 WL1271_ACX_AC_VO = 3,
986 WL1271_ACX_AC_CTS2SELF = 4,
987 WL1271_ACX_AC_ANY_TID = 0x1F,
988 WL1271_ACX_AC_INVALID = 0xFF,
989};
990
991enum wl1271_acx_ps_scheme {
992 WL1271_ACX_PS_SCHEME_LEGACY = 0,
993 WL1271_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
994 WL1271_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
995 WL1271_ACX_PS_SCHEME_SAPSD = 3,
996};
997
998enum wl1271_acx_ack_policy {
999 WL1271_ACX_ACK_POLICY_LEGACY = 0,
1000 WL1271_ACX_ACK_POLICY_NO_ACK = 1,
1001 WL1271_ACX_ACK_POLICY_BLOCK = 2,
1002};
1003
1004#define WL1271_ACX_TID_COUNT 7
1005
1006struct acx_tid_config { 857struct acx_tid_config {
1007 struct acx_header header; 858 struct acx_header header;
1008 u8 queue_id; 859 u8 queue_id;
@@ -1011,22 +862,19 @@ struct acx_tid_config {
1011 u8 ps_scheme; 862 u8 ps_scheme;
1012 u8 ack_policy; 863 u8 ack_policy;
1013 u8 padding[3]; 864 u8 padding[3];
1014 u32 apsd_conf[2]; 865 __le32 apsd_conf[2];
1015} __attribute__ ((packed)); 866} __attribute__ ((packed));
1016 867
1017struct acx_frag_threshold { 868struct acx_frag_threshold {
1018 struct acx_header header; 869 struct acx_header header;
1019 u16 frag_threshold; 870 __le16 frag_threshold;
1020 u8 padding[2]; 871 u8 padding[2];
1021} __attribute__ ((packed)); 872} __attribute__ ((packed));
1022 873
1023#define WL1271_ACX_TX_COMPL_TIMEOUT 5
1024#define WL1271_ACX_TX_COMPL_THRESHOLD 5
1025
1026struct acx_tx_config_options { 874struct acx_tx_config_options {
1027 struct acx_header header; 875 struct acx_header header;
1028 u16 tx_compl_timeout; /* msec */ 876 __le16 tx_compl_timeout; /* msec */
1029 u16 tx_compl_threshold; /* number of packets */ 877 __le16 tx_compl_threshold; /* number of packets */
1030} __attribute__ ((packed)); 878} __attribute__ ((packed));
1031 879
1032#define ACX_RX_MEM_BLOCKS 64 880#define ACX_RX_MEM_BLOCKS 64
@@ -1041,79 +889,87 @@ struct wl1271_acx_config_memory {
1041 u8 tx_min_mem_block_num; 889 u8 tx_min_mem_block_num;
1042 u8 num_stations; 890 u8 num_stations;
1043 u8 num_ssid_profiles; 891 u8 num_ssid_profiles;
1044 u32 total_tx_descriptors; 892 __le32 total_tx_descriptors;
1045} __attribute__ ((packed)); 893} __attribute__ ((packed));
1046 894
1047struct wl1271_acx_mem_map { 895struct wl1271_acx_mem_map {
1048 struct acx_header header; 896 struct acx_header header;
1049 897
1050 void *code_start; 898 __le32 code_start;
1051 void *code_end; 899 __le32 code_end;
1052 900
1053 void *wep_defkey_start; 901 __le32 wep_defkey_start;
1054 void *wep_defkey_end; 902 __le32 wep_defkey_end;
1055 903
1056 void *sta_table_start; 904 __le32 sta_table_start;
1057 void *sta_table_end; 905 __le32 sta_table_end;
1058 906
1059 void *packet_template_start; 907 __le32 packet_template_start;
1060 void *packet_template_end; 908 __le32 packet_template_end;
1061 909
1062 /* Address of the TX result interface (control block) */ 910 /* Address of the TX result interface (control block) */
1063 u32 tx_result; 911 __le32 tx_result;
1064 u32 tx_result_queue_start; 912 __le32 tx_result_queue_start;
1065 913
1066 void *queue_memory_start; 914 __le32 queue_memory_start;
1067 void *queue_memory_end; 915 __le32 queue_memory_end;
1068 916
1069 u32 packet_memory_pool_start; 917 __le32 packet_memory_pool_start;
1070 u32 packet_memory_pool_end; 918 __le32 packet_memory_pool_end;
1071 919
1072 void *debug_buffer1_start; 920 __le32 debug_buffer1_start;
1073 void *debug_buffer1_end; 921 __le32 debug_buffer1_end;
1074 922
1075 void *debug_buffer2_start; 923 __le32 debug_buffer2_start;
1076 void *debug_buffer2_end; 924 __le32 debug_buffer2_end;
1077 925
1078 /* Number of blocks FW allocated for TX packets */ 926 /* Number of blocks FW allocated for TX packets */
1079 u32 num_tx_mem_blocks; 927 __le32 num_tx_mem_blocks;
1080 928
1081 /* Number of blocks FW allocated for RX packets */ 929 /* Number of blocks FW allocated for RX packets */
1082 u32 num_rx_mem_blocks; 930 __le32 num_rx_mem_blocks;
1083 931
1084 /* the following 4 fields are valid in SLAVE mode only */ 932 /* the following 4 fields are valid in SLAVE mode only */
1085 u8 *tx_cbuf; 933 u8 *tx_cbuf;
1086 u8 *rx_cbuf; 934 u8 *rx_cbuf;
1087 void *rx_ctrl; 935 __le32 rx_ctrl;
1088 void *tx_ctrl; 936 __le32 tx_ctrl;
1089} __attribute__ ((packed)); 937} __attribute__ ((packed));
1090 938
1091enum wl1271_acx_rx_queue_type {
1092 RX_QUEUE_TYPE_RX_LOW_PRIORITY, /* All except the high priority */
1093 RX_QUEUE_TYPE_RX_HIGH_PRIORITY, /* Management and voice packets */
1094 RX_QUEUE_TYPE_NUM,
1095 RX_QUEUE_TYPE_MAX = USHORT_MAX
1096};
1097
1098#define WL1271_RX_INTR_THRESHOLD_DEF 0 /* no pacing, send interrupt on
1099 * every event */
1100#define WL1271_RX_INTR_THRESHOLD_MIN 0
1101#define WL1271_RX_INTR_THRESHOLD_MAX 15
1102
1103#define WL1271_RX_INTR_TIMEOUT_DEF 5
1104#define WL1271_RX_INTR_TIMEOUT_MIN 1
1105#define WL1271_RX_INTR_TIMEOUT_MAX 100
1106
1107struct wl1271_acx_rx_config_opt { 939struct wl1271_acx_rx_config_opt {
1108 struct acx_header header; 940 struct acx_header header;
1109 941
1110 u16 mblk_threshold; 942 __le16 mblk_threshold;
1111 u16 threshold; 943 __le16 threshold;
1112 u16 timeout; 944 __le16 timeout;
1113 u8 queue_type; 945 u8 queue_type;
1114 u8 reserved; 946 u8 reserved;
1115} __attribute__ ((packed)); 947} __attribute__ ((packed));
1116 948
949
950struct wl1271_acx_bet_enable {
951 struct acx_header header;
952
953 u8 enable;
954 u8 max_consecutive;
955 u8 padding[2];
956} __attribute__ ((packed));
957
958#define ACX_IPV4_VERSION 4
959#define ACX_IPV6_VERSION 6
960#define ACX_IPV4_ADDR_SIZE 4
961struct wl1271_acx_arp_filter {
962 struct acx_header header;
963 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
964 u8 enable; /* 1 to enable ARP filtering, 0 to disable */
965 u8 padding[2];
966 u8 address[16]; /* The configured device IP address - all ARP
967 requests directed to this IP address will pass
968 through. For IPv4, the first four bytes are
969 used. */
970} __attribute__((packed));
971
972
1117enum { 973enum {
1118 ACX_WAKE_UP_CONDITIONS = 0x0002, 974 ACX_WAKE_UP_CONDITIONS = 0x0002,
1119 ACX_MEM_CFG = 0x0003, 975 ACX_MEM_CFG = 0x0003,
@@ -1170,6 +1026,9 @@ enum {
1170 ACX_PEER_HT_CAP = 0x0057, 1026 ACX_PEER_HT_CAP = 0x0057,
1171 ACX_HT_BSS_OPERATION = 0x0058, 1027 ACX_HT_BSS_OPERATION = 0x0058,
1172 ACX_COEX_ACTIVITY = 0x0059, 1028 ACX_COEX_ACTIVITY = 0x0059,
1029 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1030 ACX_SET_SMART_REFLEX_STATE = 0x005B,
1031 ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
1173 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1032 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1174 DOT11_CUR_TX_PWR = 0x100D, 1033 DOT11_CUR_TX_PWR = 0x100D,
1175 DOT11_RX_DOT11_MODE = 0x1012, 1034 DOT11_RX_DOT11_MODE = 0x1012,
@@ -1182,23 +1041,24 @@ enum {
1182}; 1041};
1183 1042
1184 1043
1185int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, 1044int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
1186 u8 listen_interval);
1187int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); 1045int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
1188int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len); 1046int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len);
1189int wl1271_acx_tx_power(struct wl1271 *wl, int power); 1047int wl1271_acx_tx_power(struct wl1271 *wl, int power);
1190int wl1271_acx_feature_cfg(struct wl1271 *wl); 1048int wl1271_acx_feature_cfg(struct wl1271 *wl);
1191int wl1271_acx_mem_map(struct wl1271 *wl, 1049int wl1271_acx_mem_map(struct wl1271 *wl,
1192 struct acx_header *mem_map, size_t len); 1050 struct acx_header *mem_map, size_t len);
1193int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time); 1051int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
1194int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter); 1052int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
1195int wl1271_acx_pd_threshold(struct wl1271 *wl); 1053int wl1271_acx_pd_threshold(struct wl1271 *wl);
1196int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); 1054int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
1197int wl1271_acx_group_address_tbl(struct wl1271 *wl); 1055int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1056 void *mc_list, u32 mc_list_len);
1198int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1057int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1199int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1058int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1200int wl1271_acx_beacon_filter_opt(struct wl1271 *wl); 1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1201int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl);
1202int wl1271_acx_sg_enable(struct wl1271 *wl); 1062int wl1271_acx_sg_enable(struct wl1271 *wl);
1203int wl1271_acx_sg_cfg(struct wl1271 *wl); 1063int wl1271_acx_sg_cfg(struct wl1271 *wl);
1204int wl1271_acx_cca_threshold(struct wl1271 *wl); 1064int wl1271_acx_cca_threshold(struct wl1271 *wl);
@@ -1207,9 +1067,9 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
1207int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); 1067int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
1208int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); 1068int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1209int wl1271_acx_cts_protect(struct wl1271 *wl, 1069int wl1271_acx_cts_protect(struct wl1271 *wl,
1210 enum acx_ctsprotect_type ctsprotect); 1070 enum acx_ctsprotect_type ctsprotect);
1211int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1212int wl1271_acx_rate_policies(struct wl1271 *wl); 1072int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
1213int wl1271_acx_ac_cfg(struct wl1271 *wl); 1073int wl1271_acx_ac_cfg(struct wl1271 *wl);
1214int wl1271_acx_tid_cfg(struct wl1271 *wl); 1074int wl1271_acx_tid_cfg(struct wl1271 *wl);
1215int wl1271_acx_frag_threshold(struct wl1271 *wl); 1075int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1217,5 +1077,9 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl);
1217int wl1271_acx_mem_cfg(struct wl1271 *wl); 1077int wl1271_acx_mem_cfg(struct wl1271 *wl);
1218int wl1271_acx_init_mem_config(struct wl1271 *wl); 1078int wl1271_acx_init_mem_config(struct wl1271 *wl);
1219int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1079int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1080int wl1271_acx_smart_reflex(struct wl1271 *wl);
1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1083 u8 version);
1220 1084
1221#endif /* __WL1271_ACX_H__ */ 1085#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 8228ef474a7e..b7c96454cca3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -39,6 +39,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
39 .start = REGISTERS_BASE, 39 .start = REGISTERS_BASE,
40 .size = 0x00008800 40 .size = 0x00008800
41 }, 41 },
42 .mem2 = {
43 .start = 0x00000000,
44 .size = 0x00000000
45 },
46 .mem3 = {
47 .start = 0x00000000,
48 .size = 0x00000000
49 },
42 }, 50 },
43 51
44 [PART_WORK] = { 52 [PART_WORK] = {
@@ -48,7 +56,15 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
48 }, 56 },
49 .reg = { 57 .reg = {
50 .start = REGISTERS_BASE, 58 .start = REGISTERS_BASE,
51 .size = 0x0000b000 59 .size = 0x0000a000
60 },
61 .mem2 = {
62 .start = 0x003004f8,
63 .size = 0x00000004
64 },
65 .mem3 = {
66 .start = 0x00040404,
67 .size = 0x00000000
52 }, 68 },
53 }, 69 },
54 70
@@ -60,6 +76,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
60 .reg = { 76 .reg = {
61 .start = DRPW_BASE, 77 .start = DRPW_BASE,
62 .size = 0x00006000 78 .size = 0x00006000
79 },
80 .mem2 = {
81 .start = 0x00000000,
82 .size = 0x00000000
83 },
84 .mem3 = {
85 .start = 0x00000000,
86 .size = 0x00000000
63 } 87 }
64 } 88 }
65}; 89};
@@ -69,19 +93,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
69 u32 cpu_ctrl; 93 u32 cpu_ctrl;
70 94
71 /* 10.5.0 run the firmware (I) */ 95 /* 10.5.0 run the firmware (I) */
72 cpu_ctrl = wl1271_reg_read32(wl, ACX_REG_ECPU_CONTROL); 96 cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
73 97
74 /* 10.5.1 run the firmware (II) */ 98 /* 10.5.1 run the firmware (II) */
75 cpu_ctrl |= flag; 99 cpu_ctrl |= flag;
76 wl1271_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 100 wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
77} 101}
78 102
79static void wl1271_boot_fw_version(struct wl1271 *wl) 103static void wl1271_boot_fw_version(struct wl1271 *wl)
80{ 104{
81 struct wl1271_static_data static_data; 105 struct wl1271_static_data static_data;
82 106
83 wl1271_spi_mem_read(wl, wl->cmd_box_addr, 107 wl1271_spi_read(wl, wl->cmd_box_addr,
84 &static_data, sizeof(static_data)); 108 &static_data, sizeof(static_data), false);
85 109
86 strncpy(wl->chip.fw_ver, static_data.fw_version, 110 strncpy(wl->chip.fw_ver, static_data.fw_version,
87 sizeof(wl->chip.fw_ver)); 111 sizeof(wl->chip.fw_ver));
@@ -93,8 +117,9 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
93static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 117static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
94 size_t fw_data_len, u32 dest) 118 size_t fw_data_len, u32 dest)
95{ 119{
120 struct wl1271_partition_set partition;
96 int addr, chunk_num, partition_limit; 121 int addr, chunk_num, partition_limit;
97 u8 *p; 122 u8 *p, *chunk;
98 123
99 /* whal_FwCtrl_LoadFwImageSm() */ 124 /* whal_FwCtrl_LoadFwImageSm() */
100 125
@@ -103,16 +128,20 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
103 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", 128 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
104 fw_data_len, CHUNK_SIZE); 129 fw_data_len, CHUNK_SIZE);
105 130
106
107 if ((fw_data_len % 4) != 0) { 131 if ((fw_data_len % 4) != 0) {
108 wl1271_error("firmware length not multiple of four"); 132 wl1271_error("firmware length not multiple of four");
109 return -EIO; 133 return -EIO;
110 } 134 }
111 135
112 wl1271_set_partition(wl, dest, 136 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
113 part_table[PART_DOWN].mem.size, 137 if (!chunk) {
114 part_table[PART_DOWN].reg.start, 138 wl1271_error("allocation for firmware upload chunk failed");
115 part_table[PART_DOWN].reg.size); 139 return -ENOMEM;
140 }
141
142 memcpy(&partition, &part_table[PART_DOWN], sizeof(partition));
143 partition.mem.start = dest;
144 wl1271_set_partition(wl, &partition);
116 145
117 /* 10.1 set partition limit and chunk num */ 146 /* 10.1 set partition limit and chunk num */
118 chunk_num = 0; 147 chunk_num = 0;
@@ -125,21 +154,17 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
125 addr = dest + chunk_num * CHUNK_SIZE; 154 addr = dest + chunk_num * CHUNK_SIZE;
126 partition_limit = chunk_num * CHUNK_SIZE + 155 partition_limit = chunk_num * CHUNK_SIZE +
127 part_table[PART_DOWN].mem.size; 156 part_table[PART_DOWN].mem.size;
128 157 partition.mem.start = addr;
129 /* FIXME: Over 80 chars! */ 158 wl1271_set_partition(wl, &partition);
130 wl1271_set_partition(wl,
131 addr,
132 part_table[PART_DOWN].mem.size,
133 part_table[PART_DOWN].reg.start,
134 part_table[PART_DOWN].reg.size);
135 } 159 }
136 160
137 /* 10.3 upload the chunk */ 161 /* 10.3 upload the chunk */
138 addr = dest + chunk_num * CHUNK_SIZE; 162 addr = dest + chunk_num * CHUNK_SIZE;
139 p = buf + chunk_num * CHUNK_SIZE; 163 p = buf + chunk_num * CHUNK_SIZE;
164 memcpy(chunk, p, CHUNK_SIZE);
140 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 165 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
141 p, addr); 166 p, addr);
142 wl1271_spi_mem_write(wl, addr, p, CHUNK_SIZE); 167 wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
143 168
144 chunk_num++; 169 chunk_num++;
145 } 170 }
@@ -147,28 +172,31 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
147 /* 10.4 upload the last chunk */ 172 /* 10.4 upload the last chunk */
148 addr = dest + chunk_num * CHUNK_SIZE; 173 addr = dest + chunk_num * CHUNK_SIZE;
149 p = buf + chunk_num * CHUNK_SIZE; 174 p = buf + chunk_num * CHUNK_SIZE;
175 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
150 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", 176 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
151 fw_data_len % CHUNK_SIZE, p, addr); 177 fw_data_len % CHUNK_SIZE, p, addr);
152 wl1271_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE); 178 wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
153 179
180 kfree(chunk);
154 return 0; 181 return 0;
155} 182}
156 183
157static int wl1271_boot_upload_firmware(struct wl1271 *wl) 184static int wl1271_boot_upload_firmware(struct wl1271 *wl)
158{ 185{
159 u32 chunks, addr, len; 186 u32 chunks, addr, len;
187 int ret = 0;
160 u8 *fw; 188 u8 *fw;
161 189
162 fw = wl->fw; 190 fw = wl->fw;
163 chunks = be32_to_cpup((u32 *) fw); 191 chunks = be32_to_cpup((__be32 *) fw);
164 fw += sizeof(u32); 192 fw += sizeof(u32);
165 193
166 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); 194 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
167 195
168 while (chunks--) { 196 while (chunks--) {
169 addr = be32_to_cpup((u32 *) fw); 197 addr = be32_to_cpup((__be32 *) fw);
170 fw += sizeof(u32); 198 fw += sizeof(u32);
171 len = be32_to_cpup((u32 *) fw); 199 len = be32_to_cpup((__be32 *) fw);
172 fw += sizeof(u32); 200 fw += sizeof(u32);
173 201
174 if (len > 300000) { 202 if (len > 300000) {
@@ -177,11 +205,13 @@ static int wl1271_boot_upload_firmware(struct wl1271 *wl)
177 } 205 }
178 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", 206 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
179 chunks, addr, len); 207 chunks, addr, len);
180 wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); 208 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
209 if (ret != 0)
210 break;
181 fw += len; 211 fw += len;
182 } 212 }
183 213
184 return 0; 214 return ret;
185} 215}
186 216
187static int wl1271_boot_upload_nvs(struct wl1271 *wl) 217static int wl1271_boot_upload_nvs(struct wl1271 *wl)
@@ -235,7 +265,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
235 wl1271_debug(DEBUG_BOOT, 265 wl1271_debug(DEBUG_BOOT,
236 "nvs burst write 0x%x: 0x%x", 266 "nvs burst write 0x%x: 0x%x",
237 dest_addr, val); 267 dest_addr, val);
238 wl1271_reg_write32(wl, dest_addr, val); 268 wl1271_spi_write32(wl, dest_addr, val);
239 269
240 nvs_ptr += 4; 270 nvs_ptr += 4;
241 dest_addr += 4; 271 dest_addr += 4;
@@ -253,20 +283,18 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
253 /* FIXME: The driver sets the partition here, but this is not needed, 283 /* FIXME: The driver sets the partition here, but this is not needed,
254 since it sets to the same one as currently in use */ 284 since it sets to the same one as currently in use */
255 /* Now we must set the partition correctly */ 285 /* Now we must set the partition correctly */
256 wl1271_set_partition(wl, 286 wl1271_set_partition(wl, &part_table[PART_WORK]);
257 part_table[PART_WORK].mem.start,
258 part_table[PART_WORK].mem.size,
259 part_table[PART_WORK].reg.start,
260 part_table[PART_WORK].reg.size);
261 287
262 /* Copy the NVS tables to a new block to ensure alignment */ 288 /* Copy the NVS tables to a new block to ensure alignment */
263 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); 289 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
290 if (!nvs_aligned)
291 return -ENOMEM;
264 292
265 /* And finally we upload the NVS tables */ 293 /* And finally we upload the NVS tables */
266 /* FIXME: In wl1271, we upload everything at once. 294 /* FIXME: In wl1271, we upload everything at once.
267 No endianness handling needed here?! The ref driver doesn't do 295 No endianness handling needed here?! The ref driver doesn't do
268 anything about it at this point */ 296 anything about it at this point */
269 wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len); 297 wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
270 298
271 kfree(nvs_aligned); 299 kfree(nvs_aligned);
272 return 0; 300 return 0;
@@ -275,9 +303,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
275static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 303static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
276{ 304{
277 enable_irq(wl->irq); 305 enable_irq(wl->irq);
278 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 306 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
279 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 307 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
280 wl1271_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL); 308 wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
281} 309}
282 310
283static int wl1271_boot_soft_reset(struct wl1271 *wl) 311static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -286,12 +314,13 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
286 u32 boot_data; 314 u32 boot_data;
287 315
288 /* perform soft reset */ 316 /* perform soft reset */
289 wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); 317 wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
318 ACX_SLV_SOFT_RESET_BIT);
290 319
291 /* SOFT_RESET is self clearing */ 320 /* SOFT_RESET is self clearing */
292 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); 321 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
293 while (1) { 322 while (1) {
294 boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET); 323 boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
295 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); 324 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
296 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) 325 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
297 break; 326 break;
@@ -307,10 +336,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
307 } 336 }
308 337
309 /* disable Rx/Tx */ 338 /* disable Rx/Tx */
310 wl1271_reg_write32(wl, ENABLE, 0x0); 339 wl1271_spi_write32(wl, ENABLE, 0x0);
311 340
312 /* disable auto calibration on start*/ 341 /* disable auto calibration on start*/
313 wl1271_reg_write32(wl, SPARE_A2, 0xffff); 342 wl1271_spi_write32(wl, SPARE_A2, 0xffff);
314 343
315 return 0; 344 return 0;
316} 345}
@@ -322,7 +351,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
322 351
323 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 352 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
324 353
325 chip_id = wl1271_reg_read32(wl, CHIP_ID_B); 354 chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
326 355
327 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); 356 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
328 357
@@ -335,7 +364,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
335 loop = 0; 364 loop = 0;
336 while (loop++ < INIT_LOOP) { 365 while (loop++ < INIT_LOOP) {
337 udelay(INIT_LOOP_DELAY); 366 udelay(INIT_LOOP_DELAY);
338 interrupt = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 367 interrupt = wl1271_spi_read32(wl,
368 ACX_REG_INTERRUPT_NO_CLEAR);
339 369
340 if (interrupt == 0xffffffff) { 370 if (interrupt == 0xffffffff) {
341 wl1271_error("error reading hardware complete " 371 wl1271_error("error reading hardware complete "
@@ -344,30 +374,26 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
344 } 374 }
345 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 375 /* check that ACX_INTR_INIT_COMPLETE is enabled */
346 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { 376 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
347 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 377 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
348 WL1271_ACX_INTR_INIT_COMPLETE); 378 WL1271_ACX_INTR_INIT_COMPLETE);
349 break; 379 break;
350 } 380 }
351 } 381 }
352 382
353 if (loop >= INIT_LOOP) { 383 if (loop > INIT_LOOP) {
354 wl1271_error("timeout waiting for the hardware to " 384 wl1271_error("timeout waiting for the hardware to "
355 "complete initialization"); 385 "complete initialization");
356 return -EIO; 386 return -EIO;
357 } 387 }
358 388
359 /* get hardware config command mail box */ 389 /* get hardware config command mail box */
360 wl->cmd_box_addr = wl1271_reg_read32(wl, REG_COMMAND_MAILBOX_PTR); 390 wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
361 391
362 /* get hardware config event mail box */ 392 /* get hardware config event mail box */
363 wl->event_box_addr = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); 393 wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
364 394
365 /* set the working partition to its "running" mode offset */ 395 /* set the working partition to its "running" mode offset */
366 wl1271_set_partition(wl, 396 wl1271_set_partition(wl, &part_table[PART_WORK]);
367 part_table[PART_WORK].mem.start,
368 part_table[PART_WORK].mem.size,
369 part_table[PART_WORK].reg.start,
370 part_table[PART_WORK].reg.size);
371 397
372 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", 398 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
373 wl->cmd_box_addr, wl->event_box_addr); 399 wl->cmd_box_addr, wl->event_box_addr);
@@ -379,11 +405,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
379 * ready to receive event from the command mailbox 405 * ready to receive event from the command mailbox
380 */ 406 */
381 407
382 /* enable gpio interrupts */ 408 /* unmask required mbox events */
383 wl1271_boot_enable_interrupts(wl); 409 wl->event_mask = BSS_LOSE_EVENT_ID |
384 410 SCAN_COMPLETE_EVENT_ID |
385 /* unmask all mbox events */ 411 PS_REPORT_EVENT_ID;
386 wl->event_mask = 0xffffffff;
387 412
388 ret = wl1271_event_unmask(wl); 413 ret = wl1271_event_unmask(wl);
389 if (ret < 0) { 414 if (ret < 0) {
@@ -399,34 +424,13 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
399 424
400static int wl1271_boot_write_irq_polarity(struct wl1271 *wl) 425static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
401{ 426{
402 u32 polarity, status, i; 427 u32 polarity;
403
404 wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
405 wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_READ);
406
407 /* Wait until the command is complete (ie. bit 18 is set) */
408 for (i = 0; i < OCP_CMD_LOOP; i++) {
409 polarity = wl1271_reg_read32(wl, OCP_DATA_READ);
410 if (polarity & OCP_READY_MASK)
411 break;
412 }
413 if (i == OCP_CMD_LOOP) {
414 wl1271_error("OCP command timeout!");
415 return -EIO;
416 }
417 428
418 status = polarity & OCP_STATUS_MASK; 429 polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
419 if (status != OCP_STATUS_OK) {
420 wl1271_error("OCP command failed (%d)", status);
421 return -EIO;
422 }
423 430
424 /* We use HIGH polarity, so unset the LOW bit */ 431 /* We use HIGH polarity, so unset the LOW bit */
425 polarity &= ~POLARITY_LOW; 432 polarity &= ~POLARITY_LOW;
426 433 wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
427 wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
428 wl1271_reg_write32(wl, OCP_DATA_WRITE, polarity);
429 wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_WRITE);
430 434
431 return 0; 435 return 0;
432} 436}
@@ -436,16 +440,32 @@ int wl1271_boot(struct wl1271 *wl)
436 int ret = 0; 440 int ret = 0;
437 u32 tmp, clk, pause; 441 u32 tmp, clk, pause;
438 442
439 if (REF_CLOCK == 0 || REF_CLOCK == 2) 443 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
440 /* ref clk: 19.2/38.4 */ 444 /* ref clk: 19.2/38.4/38.4-XTAL */
441 clk = 0x3; 445 clk = 0x3;
442 else if (REF_CLOCK == 1 || REF_CLOCK == 3) 446 else if (REF_CLOCK == 1 || REF_CLOCK == 3)
443 /* ref clk: 26/52 */ 447 /* ref clk: 26/52 */
444 clk = 0x5; 448 clk = 0x5;
445 449
446 wl1271_reg_write32(wl, PLL_PARAMETERS, clk); 450 if (REF_CLOCK != 0) {
451 u16 val;
452 /* Set clock type */
453 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
454 val &= FREF_CLK_TYPE_BITS;
455 val |= CLK_REQ_PRCM;
456 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
457 } else {
458 u16 val;
459 /* Set clock polarity */
460 val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
461 val &= FREF_CLK_POLARITY_BITS;
462 val |= CLK_REQ_OUTN_SEL;
463 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
464 }
465
466 wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
447 467
448 pause = wl1271_reg_read32(wl, PLL_PARAMETERS); 468 pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
449 469
450 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 470 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
451 471
@@ -454,39 +474,31 @@ int wl1271_boot(struct wl1271 *wl)
454 * 0x3ff (magic number ). How does 474 * 0x3ff (magic number ). How does
455 * this work?! */ 475 * this work?! */
456 pause |= WU_COUNTER_PAUSE_VAL; 476 pause |= WU_COUNTER_PAUSE_VAL;
457 wl1271_reg_write32(wl, WU_COUNTER_PAUSE, pause); 477 wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
458 478
459 /* Continue the ELP wake up sequence */ 479 /* Continue the ELP wake up sequence */
460 wl1271_reg_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 480 wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
461 udelay(500); 481 udelay(500);
462 482
463 wl1271_set_partition(wl, 483 wl1271_set_partition(wl, &part_table[PART_DRPW]);
464 part_table[PART_DRPW].mem.start,
465 part_table[PART_DRPW].mem.size,
466 part_table[PART_DRPW].reg.start,
467 part_table[PART_DRPW].reg.size);
468 484
469 /* Read-modify-write DRPW_SCRATCH_START register (see next state) 485 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
470 to be used by DRPw FW. The RTRIM value will be added by the FW 486 to be used by DRPw FW. The RTRIM value will be added by the FW
471 before taking DRPw out of reset */ 487 before taking DRPw out of reset */
472 488
473 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); 489 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
474 clk = wl1271_reg_read32(wl, DRPW_SCRATCH_START); 490 clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
475 491
476 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 492 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
477 493
478 /* 2 */ 494 /* 2 */
479 clk |= (REF_CLOCK << 1) << 4; 495 clk |= (REF_CLOCK << 1) << 4;
480 wl1271_reg_write32(wl, DRPW_SCRATCH_START, clk); 496 wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
481 497
482 wl1271_set_partition(wl, 498 wl1271_set_partition(wl, &part_table[PART_WORK]);
483 part_table[PART_WORK].mem.start,
484 part_table[PART_WORK].mem.size,
485 part_table[PART_WORK].reg.start,
486 part_table[PART_WORK].reg.size);
487 499
488 /* Disable interrupts */ 500 /* Disable interrupts */
489 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 501 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
490 502
491 ret = wl1271_boot_soft_reset(wl); 503 ret = wl1271_boot_soft_reset(wl);
492 if (ret < 0) 504 if (ret < 0)
@@ -501,21 +513,22 @@ int wl1271_boot(struct wl1271 *wl)
501 * ACX_EEPROMLESS_IND_REG */ 513 * ACX_EEPROMLESS_IND_REG */
502 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); 514 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
503 515
504 wl1271_reg_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG); 516 wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
517 ACX_EEPROMLESS_IND_REG);
505 518
506 tmp = wl1271_reg_read32(wl, CHIP_ID_B); 519 tmp = wl1271_spi_read32(wl, CHIP_ID_B);
507 520
508 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 521 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
509 522
510 /* 6. read the EEPROM parameters */ 523 /* 6. read the EEPROM parameters */
511 tmp = wl1271_reg_read32(wl, SCR_PAD2); 524 tmp = wl1271_spi_read32(wl, SCR_PAD2);
512 525
513 ret = wl1271_boot_write_irq_polarity(wl); 526 ret = wl1271_boot_write_irq_polarity(wl);
514 if (ret < 0) 527 if (ret < 0)
515 goto out; 528 goto out;
516 529
517 /* FIXME: Need to check whether this is really what we want */ 530 /* FIXME: Need to check whether this is really what we want */
518 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 531 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
519 WL1271_ACX_ALL_EVENTS_VECTOR); 532 WL1271_ACX_ALL_EVENTS_VECTOR);
520 533
521 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 534 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
@@ -530,6 +543,9 @@ int wl1271_boot(struct wl1271 *wl)
530 if (ret < 0) 543 if (ret < 0)
531 goto out; 544 goto out;
532 545
546 /* Enable firmware interrupts now */
547 wl1271_boot_enable_interrupts(wl);
548
533 /* set the wl1271 default filters */ 549 /* set the wl1271 default filters */
534 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 550 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
535 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 551 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index b0d8fb46a439..412443ee655a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -50,23 +50,17 @@ struct wl1271_static_data {
50#define WU_COUNTER_PAUSE_VAL 0x3FF 50#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4 51#define WELP_ARM_COMMAND_VAL 0x4
52 52
53#define OCP_CMD_LOOP 32 53#define OCP_REG_POLARITY 0x0064
54 54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_CMD_WRITE 0x1 55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_CMD_READ 0x2
57
58#define OCP_READY_MASK BIT(18)
59#define OCP_STATUS_MASK (BIT(16) | BIT(17))
60
61#define OCP_STATUS_NO_RESP 0x00000
62#define OCP_STATUS_OK 0x10000
63#define OCP_STATUS_REQ_FAILED 0x20000
64#define OCP_STATUS_RESP_ERROR 0x30000
65
66#define OCP_REG_POLARITY 0x30032
67 56
68#define CMD_MBOX_ADDRESS 0x407B4 57#define CMD_MBOX_ADDRESS 0x407B4
69 58
70#define POLARITY_LOW BIT(1) 59#define POLARITY_LOW BIT(1)
71 60
61#define FREF_CLK_TYPE_BITS 0xfffffe7f
62#define CLK_REQ_PRCM 0x100
63#define FREF_CLK_POLARITY_BITS 0xfffff8ff
64#define CLK_REQ_OUTN_SEL 0x700
65
72#endif 66#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 2a4351ff54dc..990eb01b4c71 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -42,26 +42,28 @@
42 * @buf: buffer containing the command, must work with dma 42 * @buf: buffer containing the command, must work with dma
43 * @len: length of the buffer 43 * @len: length of the buffer
44 */ 44 */
45int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len) 45int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
46 size_t res_len)
46{ 47{
47 struct wl1271_cmd_header *cmd; 48 struct wl1271_cmd_header *cmd;
48 unsigned long timeout; 49 unsigned long timeout;
49 u32 intr; 50 u32 intr;
50 int ret = 0; 51 int ret = 0;
52 u16 status;
51 53
52 cmd = buf; 54 cmd = buf;
53 cmd->id = id; 55 cmd->id = cpu_to_le16(id);
54 cmd->status = 0; 56 cmd->status = 0;
55 57
56 WARN_ON(len % 4 != 0); 58 WARN_ON(len % 4 != 0);
57 59
58 wl1271_spi_mem_write(wl, wl->cmd_box_addr, buf, len); 60 wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
59 61
60 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); 62 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
61 63
62 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 64 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
63 65
64 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 66 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
65 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 67 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
66 if (time_after(jiffies, timeout)) { 68 if (time_after(jiffies, timeout)) {
67 wl1271_error("command complete timeout"); 69 wl1271_error("command complete timeout");
@@ -71,17 +73,28 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len)
71 73
72 msleep(1); 74 msleep(1);
73 75
74 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 76 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
77 }
78
79 /* read back the status code of the command */
80 if (res_len == 0)
81 res_len = sizeof(struct wl1271_cmd_header);
82 wl1271_spi_read(wl, wl->cmd_box_addr, cmd, res_len, false);
83
84 status = le16_to_cpu(cmd->status);
85 if (status != CMD_STATUS_SUCCESS) {
86 wl1271_error("command execute failure %d", status);
87 ret = -EIO;
75 } 88 }
76 89
77 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 90 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
78 WL1271_ACX_INTR_CMD_COMPLETE); 91 WL1271_ACX_INTR_CMD_COMPLETE);
79 92
80out: 93out:
81 return ret; 94 return ret;
82} 95}
83 96
84int wl1271_cmd_cal_channel_tune(struct wl1271 *wl) 97static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
85{ 98{
86 struct wl1271_cmd_cal_channel_tune *cmd; 99 struct wl1271_cmd_cal_channel_tune *cmd;
87 int ret = 0; 100 int ret = 0;
@@ -104,7 +117,7 @@ int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
104 return ret; 117 return ret;
105} 118}
106 119
107int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl) 120static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
108{ 121{
109 struct wl1271_cmd_cal_update_ref_point *cmd; 122 struct wl1271_cmd_cal_update_ref_point *cmd;
110 int ret = 0; 123 int ret = 0;
@@ -129,7 +142,7 @@ int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
129 return ret; 142 return ret;
130} 143}
131 144
132int wl1271_cmd_cal_p2g(struct wl1271 *wl) 145static int wl1271_cmd_cal_p2g(struct wl1271 *wl)
133{ 146{
134 struct wl1271_cmd_cal_p2g *cmd; 147 struct wl1271_cmd_cal_p2g *cmd;
135 int ret = 0; 148 int ret = 0;
@@ -150,7 +163,7 @@ int wl1271_cmd_cal_p2g(struct wl1271 *wl)
150 return ret; 163 return ret;
151} 164}
152 165
153int wl1271_cmd_cal(struct wl1271 *wl) 166static int wl1271_cmd_cal(struct wl1271 *wl)
154{ 167{
155 /* 168 /*
156 * FIXME: we must make sure that we're not sleeping when calibration 169 * FIXME: we must make sure that we're not sleeping when calibration
@@ -175,11 +188,9 @@ int wl1271_cmd_cal(struct wl1271 *wl)
175 return ret; 188 return ret;
176} 189}
177 190
178int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, 191int wl1271_cmd_join(struct wl1271 *wl)
179 u16 beacon_interval, u8 wait)
180{ 192{
181 static bool do_cal = true; 193 static bool do_cal = true;
182 unsigned long timeout;
183 struct wl1271_cmd_join *join; 194 struct wl1271_cmd_join *join;
184 int ret, i; 195 int ret, i;
185 u8 *bssid; 196 u8 *bssid;
@@ -193,6 +204,18 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
193 do_cal = false; 204 do_cal = false;
194 } 205 }
195 206
207 /* FIXME: This is a workaround, because with the current stack, we
208 * cannot know when we have disassociated. So, if we have already
209 * joined, we disconnect before joining again. */
210 if (wl->joined) {
211 ret = wl1271_cmd_disconnect(wl);
212 if (ret < 0) {
213 wl1271_error("failed to disconnect before rejoining");
214 goto out;
215 }
216
217 wl->joined = false;
218 }
196 219
197 join = kzalloc(sizeof(*join), GFP_KERNEL); 220 join = kzalloc(sizeof(*join), GFP_KERNEL);
198 if (!join) { 221 if (!join) {
@@ -207,15 +230,34 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
207 for (i = 0; i < ETH_ALEN; i++) 230 for (i = 0; i < ETH_ALEN; i++)
208 bssid[i] = wl->bssid[ETH_ALEN - i - 1]; 231 bssid[i] = wl->bssid[ETH_ALEN - i - 1];
209 232
210 join->rx_config_options = wl->rx_config; 233 join->rx_config_options = cpu_to_le32(wl->rx_config);
211 join->rx_filter_options = wl->rx_filter; 234 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
235 join->bss_type = wl->bss_type;
212 236
213 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | 237 /*
214 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; 238 * FIXME: disable temporarily all filters because after commit
239 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
240 * association. The filter logic needs to be implemented properly
241 * and once that is done, this hack can be removed.
242 */
243 join->rx_config_options = cpu_to_le32(0);
244 join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
245
246 if (wl->band == IEEE80211_BAND_2GHZ)
247 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
248 CONF_HW_BIT_RATE_2MBPS |
249 CONF_HW_BIT_RATE_5_5MBPS |
250 CONF_HW_BIT_RATE_11MBPS);
251 else {
252 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
253 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
254 CONF_HW_BIT_RATE_12MBPS |
255 CONF_HW_BIT_RATE_24MBPS);
256 }
257
258 join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
259 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
215 260
216 join->beacon_interval = beacon_interval;
217 join->dtim_interval = dtim_interval;
218 join->bss_type = bss_type;
219 join->channel = wl->channel; 261 join->channel = wl->channel;
220 join->ssid_len = wl->ssid_len; 262 join->ssid_len = wl->ssid_len;
221 memcpy(join->ssid, wl->ssid, wl->ssid_len); 263 memcpy(join->ssid, wl->ssid, wl->ssid_len);
@@ -228,21 +270,24 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
228 270
229 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; 271 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
230 272
273 /* reset TX security counters */
274 wl->tx_security_last_seq = 0;
275 wl->tx_security_seq_16 = 0;
276 wl->tx_security_seq_32 = 0;
231 277
232 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join)); 278 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
233 if (ret < 0) { 279 if (ret < 0) {
234 wl1271_error("failed to initiate cmd join"); 280 wl1271_error("failed to initiate cmd join");
235 goto out_free; 281 goto out_free;
236 } 282 }
237 283
238 timeout = msecs_to_jiffies(JOIN_TIMEOUT); 284 wl->joined = true;
239 285
240 /* 286 /*
241 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 287 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
242 * simplify locking we just sleep instead, for now 288 * simplify locking we just sleep instead, for now
243 */ 289 */
244 if (wait) 290 msleep(10);
245 msleep(10);
246 291
247out_free: 292out_free:
248 kfree(join); 293 kfree(join);
@@ -262,34 +307,21 @@ out:
262int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer) 307int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
263{ 308{
264 int ret; 309 int ret;
310 size_t res_len = 0;
265 311
266 wl1271_debug(DEBUG_CMD, "cmd test"); 312 wl1271_debug(DEBUG_CMD, "cmd test");
267 313
268 ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len); 314 if (answer)
315 res_len = buf_len;
316
317 ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len, res_len);
269 318
270 if (ret < 0) { 319 if (ret < 0) {
271 wl1271_warning("TEST command failed"); 320 wl1271_warning("TEST command failed");
272 return ret; 321 return ret;
273 } 322 }
274 323
275 if (answer) { 324 return ret;
276 struct wl1271_command *cmd_answer;
277
278 /*
279 * The test command got in, we can read the answer.
280 * The answer would be a wl1271_command, where the
281 * parameter array contains the actual answer.
282 */
283 wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
284
285 cmd_answer = buf;
286
287 if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
288 wl1271_error("TEST command answer error: %d",
289 cmd_answer->header.status);
290 }
291
292 return 0;
293} 325}
294 326
295/** 327/**
@@ -307,26 +339,15 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
307 339
308 wl1271_debug(DEBUG_CMD, "cmd interrogate"); 340 wl1271_debug(DEBUG_CMD, "cmd interrogate");
309 341
310 acx->id = id; 342 acx->id = cpu_to_le16(id);
311 343
312 /* payload length, does not include any headers */ 344 /* payload length, does not include any headers */
313 acx->len = len - sizeof(*acx); 345 acx->len = cpu_to_le16(len - sizeof(*acx));
314 346
315 ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx)); 347 ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len);
316 if (ret < 0) { 348 if (ret < 0)
317 wl1271_error("INTERROGATE command failed"); 349 wl1271_error("INTERROGATE command failed");
318 goto out;
319 }
320
321 /* the interrogate command got in, we can read the answer */
322 wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, len);
323 350
324 acx = buf;
325 if (acx->cmd.status != CMD_STATUS_SUCCESS)
326 wl1271_error("INTERROGATE command error: %d",
327 acx->cmd.status);
328
329out:
330 return ret; 351 return ret;
331} 352}
332 353
@@ -345,12 +366,12 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
345 366
346 wl1271_debug(DEBUG_CMD, "cmd configure"); 367 wl1271_debug(DEBUG_CMD, "cmd configure");
347 368
348 acx->id = id; 369 acx->id = cpu_to_le16(id);
349 370
350 /* payload length, does not include any headers */ 371 /* payload length, does not include any headers */
351 acx->len = len - sizeof(*acx); 372 acx->len = cpu_to_le16(len - sizeof(*acx));
352 373
353 ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len); 374 ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0);
354 if (ret < 0) { 375 if (ret < 0) {
355 wl1271_warning("CONFIGURE command NOK"); 376 wl1271_warning("CONFIGURE command NOK");
356 return ret; 377 return ret;
@@ -383,7 +404,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
383 cmd_tx = CMD_DISABLE_TX; 404 cmd_tx = CMD_DISABLE_TX;
384 } 405 }
385 406
386 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd)); 407 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
387 if (ret < 0) { 408 if (ret < 0) {
388 wl1271_error("rx %s cmd for channel %d failed", 409 wl1271_error("rx %s cmd for channel %d failed",
389 enable ? "start" : "stop", channel); 410 enable ? "start" : "stop", channel);
@@ -393,7 +414,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
393 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", 414 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
394 enable ? "start" : "stop", channel); 415 enable ? "start" : "stop", channel);
395 416
396 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd)); 417 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
397 if (ret < 0) { 418 if (ret < 0) {
398 wl1271_error("tx %s cmd for channel %d failed", 419 wl1271_error("tx %s cmd for channel %d failed",
399 enable ? "start" : "stop", channel); 420 enable ? "start" : "stop", channel);
@@ -414,8 +435,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
414 int ret = 0; 435 int ret = 0;
415 436
416 /* FIXME: this should be in ps.c */ 437 /* FIXME: this should be in ps.c */
417 ret = wl1271_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP, 438 ret = wl1271_acx_wake_up_conditions(wl);
418 wl->listen_int);
419 if (ret < 0) { 439 if (ret < 0) {
420 wl1271_error("couldn't set wake up conditions"); 440 wl1271_error("couldn't set wake up conditions");
421 goto out; 441 goto out;
@@ -433,10 +453,10 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
433 ps_params->send_null_data = 1; 453 ps_params->send_null_data = 1;
434 ps_params->retries = 5; 454 ps_params->retries = 5;
435 ps_params->hang_over_period = 128; 455 ps_params->hang_over_period = 128;
436 ps_params->null_data_rate = 1; /* 1 Mbps */ 456 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
437 457
438 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 458 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
439 sizeof(*ps_params)); 459 sizeof(*ps_params), 0);
440 if (ret < 0) { 460 if (ret < 0) {
441 wl1271_error("cmd set_ps_mode failed"); 461 wl1271_error("cmd set_ps_mode failed");
442 goto out; 462 goto out;
@@ -464,22 +484,17 @@ int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
464 WARN_ON(len > MAX_READ_SIZE); 484 WARN_ON(len > MAX_READ_SIZE);
465 len = min_t(size_t, len, MAX_READ_SIZE); 485 len = min_t(size_t, len, MAX_READ_SIZE);
466 486
467 cmd->addr = addr; 487 cmd->addr = cpu_to_le32(addr);
468 cmd->size = len; 488 cmd->size = cpu_to_le32(len);
469 489
470 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd)); 490 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd),
491 sizeof(*cmd));
471 if (ret < 0) { 492 if (ret < 0) {
472 wl1271_error("read memory command failed: %d", ret); 493 wl1271_error("read memory command failed: %d", ret);
473 goto out; 494 goto out;
474 } 495 }
475 496
476 /* the read command got in, we can now read the answer */ 497 /* the read command got in */
477 wl1271_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
478
479 if (cmd->header.status != CMD_STATUS_SUCCESS)
480 wl1271_error("error in read command result: %d",
481 cmd->header.status);
482
483 memcpy(answer, cmd->value, len); 498 memcpy(answer, cmd->value, len);
484 499
485out: 500out:
@@ -488,14 +503,31 @@ out:
488} 503}
489 504
490int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 505int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
491 u8 active_scan, u8 high_prio, u8 num_channels, 506 u8 active_scan, u8 high_prio, u8 band,
492 u8 probe_requests) 507 u8 probe_requests)
493{ 508{
494 509
495 struct wl1271_cmd_trigger_scan_to *trigger = NULL; 510 struct wl1271_cmd_trigger_scan_to *trigger = NULL;
496 struct wl1271_cmd_scan *params = NULL; 511 struct wl1271_cmd_scan *params = NULL;
497 int i, ret; 512 struct ieee80211_channel *channels;
513 int i, j, n_ch, ret;
498 u16 scan_options = 0; 514 u16 scan_options = 0;
515 u8 ieee_band;
516
517 if (band == WL1271_SCAN_BAND_2_4_GHZ)
518 ieee_band = IEEE80211_BAND_2GHZ;
519 else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
520 ieee_band = IEEE80211_BAND_2GHZ;
521 else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
522 ieee_band = IEEE80211_BAND_5GHZ;
523 else
524 return -EINVAL;
525
526 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
527 return -EINVAL;
528
529 channels = wl->hw->wiphy->bands[ieee_band]->channels;
530 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
499 531
500 if (wl->scanning) 532 if (wl->scanning)
501 return -EINVAL; 533 return -EINVAL;
@@ -512,32 +544,43 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
512 scan_options |= WL1271_SCAN_OPT_PASSIVE; 544 scan_options |= WL1271_SCAN_OPT_PASSIVE;
513 if (high_prio) 545 if (high_prio)
514 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH; 546 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH;
515 params->params.scan_options = scan_options; 547 params->params.scan_options = cpu_to_le16(scan_options);
516 548
517 params->params.num_channels = num_channels;
518 params->params.num_probe_requests = probe_requests; 549 params->params.num_probe_requests = probe_requests;
519 params->params.tx_rate = cpu_to_le32(RATE_MASK_2MBPS); 550 /* Let the fw autodetect suitable tx_rate for probes */
551 params->params.tx_rate = 0;
520 params->params.tid_trigger = 0; 552 params->params.tid_trigger = 0;
521 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 553 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
522 554
523 for (i = 0; i < num_channels; i++) { 555 if (band == WL1271_SCAN_BAND_DUAL)
524 params->channels[i].min_duration = 556 params->params.band = WL1271_SCAN_BAND_2_4_GHZ;
525 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); 557 else
526 params->channels[i].max_duration = 558 params->params.band = band;
527 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); 559
528 memset(&params->channels[i].bssid_lsb, 0xff, 4); 560 for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) {
529 memset(&params->channels[i].bssid_msb, 0xff, 2); 561 if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) {
530 params->channels[i].early_termination = 0; 562 params->channels[j].min_duration =
531 params->channels[i].tx_power_att = WL1271_SCAN_CURRENT_TX_PWR; 563 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
532 params->channels[i].channel = i + 1; 564 params->channels[j].max_duration =
565 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
566 memset(&params->channels[j].bssid_lsb, 0xff, 4);
567 memset(&params->channels[j].bssid_msb, 0xff, 2);
568 params->channels[j].early_termination = 0;
569 params->channels[j].tx_power_att =
570 WL1271_SCAN_CURRENT_TX_PWR;
571 params->channels[j].channel = channels[i].hw_value;
572 j++;
573 }
533 } 574 }
534 575
576 params->params.num_channels = j;
577
535 if (len && ssid) { 578 if (len && ssid) {
536 params->params.ssid_len = len; 579 params->params.ssid_len = len;
537 memcpy(params->params.ssid, ssid, len); 580 memcpy(params->params.ssid, ssid, len);
538 } 581 }
539 582
540 ret = wl1271_cmd_build_probe_req(wl, ssid, len); 583 ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
541 if (ret < 0) { 584 if (ret < 0) {
542 wl1271_error("PROBE request template failed"); 585 wl1271_error("PROBE request template failed");
543 goto out; 586 goto out;
@@ -553,7 +596,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
553 trigger->timeout = 0; 596 trigger->timeout = 0;
554 597
555 ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, 598 ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
556 sizeof(*trigger)); 599 sizeof(*trigger), 0);
557 if (ret < 0) { 600 if (ret < 0) {
558 wl1271_error("trigger scan to failed for hw scan"); 601 wl1271_error("trigger scan to failed for hw scan");
559 goto out; 602 goto out;
@@ -562,20 +605,24 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
562 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 605 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
563 606
564 wl->scanning = true; 607 wl->scanning = true;
608 if (wl1271_11a_enabled()) {
609 wl->scan.state = band;
610 if (band == WL1271_SCAN_BAND_DUAL) {
611 wl->scan.active = active_scan;
612 wl->scan.high_prio = high_prio;
613 wl->scan.probe_requests = probe_requests;
614 if (len && ssid) {
615 wl->scan.ssid_len = len;
616 memcpy(wl->scan.ssid, ssid, len);
617 } else
618 wl->scan.ssid_len = 0;
619 }
620 }
565 621
566 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); 622 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
567 if (ret < 0) { 623 if (ret < 0) {
568 wl1271_error("SCAN failed"); 624 wl1271_error("SCAN failed");
569 goto out;
570 }
571
572 wl1271_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
573
574 if (params->header.status != CMD_STATUS_SUCCESS) {
575 wl1271_error("Scan command error: %d",
576 params->header.status);
577 wl->scanning = false; 625 wl->scanning = false;
578 ret = -EIO;
579 goto out; 626 goto out;
580 } 627 }
581 628
@@ -603,14 +650,14 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
603 650
604 cmd->len = cpu_to_le16(buf_len); 651 cmd->len = cpu_to_le16(buf_len);
605 cmd->template_type = template_id; 652 cmd->template_type = template_id;
606 cmd->enabled_rates = ACX_RATE_MASK_UNSPECIFIED; 653 cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
607 cmd->short_retry_limit = ACX_RATE_RETRY_LIMIT; 654 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
608 cmd->long_retry_limit = ACX_RATE_RETRY_LIMIT; 655 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
609 656
610 if (buf) 657 if (buf)
611 memcpy(cmd->template_data, buf, buf_len); 658 memcpy(cmd->template_data, buf, buf_len);
612 659
613 ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd)); 660 ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd), 0);
614 if (ret < 0) { 661 if (ret < 0) {
615 wl1271_warning("cmd set_template failed: %d", ret); 662 wl1271_warning("cmd set_template failed: %d", ret);
616 goto out_free; 663 goto out_free;
@@ -623,30 +670,62 @@ out:
623 return ret; 670 return ret;
624} 671}
625 672
626static int wl1271_build_basic_rates(char *rates) 673static int wl1271_build_basic_rates(char *rates, u8 band)
627{ 674{
628 u8 index = 0; 675 u8 index = 0;
629 676
630 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; 677 if (band == IEEE80211_BAND_2GHZ) {
631 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; 678 rates[index++] =
632 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; 679 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
633 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; 680 rates[index++] =
681 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
682 rates[index++] =
683 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
684 rates[index++] =
685 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
686 } else if (band == IEEE80211_BAND_5GHZ) {
687 rates[index++] =
688 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
689 rates[index++] =
690 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
691 rates[index++] =
692 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
693 } else {
694 wl1271_error("build_basic_rates invalid band: %d", band);
695 }
634 696
635 return index; 697 return index;
636} 698}
637 699
638static int wl1271_build_extended_rates(char *rates) 700static int wl1271_build_extended_rates(char *rates, u8 band)
639{ 701{
640 u8 index = 0; 702 u8 index = 0;
641 703
642 rates[index++] = IEEE80211_OFDM_RATE_6MB; 704 if (band == IEEE80211_BAND_2GHZ) {
643 rates[index++] = IEEE80211_OFDM_RATE_9MB; 705 rates[index++] = IEEE80211_OFDM_RATE_6MB;
644 rates[index++] = IEEE80211_OFDM_RATE_12MB; 706 rates[index++] = IEEE80211_OFDM_RATE_9MB;
645 rates[index++] = IEEE80211_OFDM_RATE_18MB; 707 rates[index++] = IEEE80211_OFDM_RATE_12MB;
646 rates[index++] = IEEE80211_OFDM_RATE_24MB; 708 rates[index++] = IEEE80211_OFDM_RATE_18MB;
647 rates[index++] = IEEE80211_OFDM_RATE_36MB; 709 rates[index++] = IEEE80211_OFDM_RATE_24MB;
648 rates[index++] = IEEE80211_OFDM_RATE_48MB; 710 rates[index++] = IEEE80211_OFDM_RATE_36MB;
649 rates[index++] = IEEE80211_OFDM_RATE_54MB; 711 rates[index++] = IEEE80211_OFDM_RATE_48MB;
712 rates[index++] = IEEE80211_OFDM_RATE_54MB;
713 } else if (band == IEEE80211_BAND_5GHZ) {
714 rates[index++] =
715 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
716 rates[index++] =
717 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
718 rates[index++] =
719 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
720 rates[index++] =
721 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
722 rates[index++] =
723 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
724 rates[index++] =
725 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
726 } else {
727 wl1271_error("build_basic_rates invalid band: %d", band);
728 }
650 729
651 return index; 730 return index;
652} 731}
@@ -665,7 +744,8 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
665 744
666 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 745 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
667 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 746 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
668 IEEE80211_STYPE_NULLFUNC); 747 IEEE80211_STYPE_NULLFUNC |
748 IEEE80211_FCTL_TODS);
669 749
670 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, 750 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
671 sizeof(template)); 751 sizeof(template));
@@ -678,7 +758,10 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
678 758
679 memcpy(template.bssid, wl->bssid, ETH_ALEN); 759 memcpy(template.bssid, wl->bssid, ETH_ALEN);
680 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 760 memcpy(template.ta, wl->mac_addr, ETH_ALEN);
681 template.aid = aid; 761
762 /* aid in PS-Poll has its two MSBs each set to 1 */
763 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
764
682 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 765 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
683 766
684 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, 767 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
@@ -686,12 +769,14 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
686 769
687} 770}
688 771
689int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len) 772int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
773 u8 band)
690{ 774{
691 struct wl12xx_probe_req_template template; 775 struct wl12xx_probe_req_template template;
692 struct wl12xx_ie_rates *rates; 776 struct wl12xx_ie_rates *rates;
693 char *ptr; 777 char *ptr;
694 u16 size; 778 u16 size;
779 int ret;
695 780
696 ptr = (char *)&template; 781 ptr = (char *)&template;
697 size = sizeof(struct ieee80211_header); 782 size = sizeof(struct ieee80211_header);
@@ -713,20 +798,25 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len)
713 /* Basic Rates */ 798 /* Basic Rates */
714 rates = (struct wl12xx_ie_rates *)ptr; 799 rates = (struct wl12xx_ie_rates *)ptr;
715 rates->header.id = WLAN_EID_SUPP_RATES; 800 rates->header.id = WLAN_EID_SUPP_RATES;
716 rates->header.len = wl1271_build_basic_rates(rates->rates); 801 rates->header.len = wl1271_build_basic_rates(rates->rates, band);
717 size += sizeof(struct wl12xx_ie_header) + rates->header.len; 802 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
718 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len; 803 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
719 804
720 /* Extended rates */ 805 /* Extended rates */
721 rates = (struct wl12xx_ie_rates *)ptr; 806 rates = (struct wl12xx_ie_rates *)ptr;
722 rates->header.id = WLAN_EID_EXT_SUPP_RATES; 807 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
723 rates->header.len = wl1271_build_extended_rates(rates->rates); 808 rates->header.len = wl1271_build_extended_rates(rates->rates, band);
724 size += sizeof(struct wl12xx_ie_header) + rates->header.len; 809 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
725 810
726 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size); 811 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
727 812
728 return wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 813 if (band == IEEE80211_BAND_2GHZ)
729 &template, size); 814 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
815 &template, size);
816 else
817 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
818 &template, size);
819 return ret;
730} 820}
731 821
732int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 822int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
@@ -743,10 +833,10 @@ int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
743 } 833 }
744 834
745 cmd->id = id; 835 cmd->id = id;
746 cmd->key_action = KEY_SET_ID; 836 cmd->key_action = cpu_to_le16(KEY_SET_ID);
747 cmd->key_type = KEY_WEP; 837 cmd->key_type = KEY_WEP;
748 838
749 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); 839 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
750 if (ret < 0) { 840 if (ret < 0) {
751 wl1271_warning("cmd set_default_wep_key failed: %d", ret); 841 wl1271_warning("cmd set_default_wep_key failed: %d", ret);
752 goto out; 842 goto out;
@@ -759,7 +849,8 @@ out:
759} 849}
760 850
761int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 851int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
762 u8 key_size, const u8 *key, const u8 *addr) 852 u8 key_size, const u8 *key, const u8 *addr,
853 u32 tx_seq_32, u16 tx_seq_16)
763{ 854{
764 struct wl1271_cmd_set_keys *cmd; 855 struct wl1271_cmd_set_keys *cmd;
765 int ret = 0; 856 int ret = 0;
@@ -773,16 +864,18 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
773 if (key_type != KEY_WEP) 864 if (key_type != KEY_WEP)
774 memcpy(cmd->addr, addr, ETH_ALEN); 865 memcpy(cmd->addr, addr, ETH_ALEN);
775 866
776 cmd->key_action = action; 867 cmd->key_action = cpu_to_le16(action);
777 cmd->key_size = key_size; 868 cmd->key_size = key_size;
778 cmd->key_type = key_type; 869 cmd->key_type = key_type;
779 870
871 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
872 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
873
780 /* we have only one SSID profile */ 874 /* we have only one SSID profile */
781 cmd->ssid_profile = 0; 875 cmd->ssid_profile = 0;
782 876
783 cmd->id = id; 877 cmd->id = id;
784 878
785 /* FIXME: this is from wl1251, needs to be checked */
786 if (key_type == KEY_TKIP) { 879 if (key_type == KEY_TKIP) {
787 /* 880 /*
788 * We get the key in the following form: 881 * We get the key in the following form:
@@ -800,7 +893,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
800 893
801 wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd)); 894 wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd));
802 895
803 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); 896 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
804 if (ret < 0) { 897 if (ret < 0) {
805 wl1271_warning("could not set keys"); 898 wl1271_warning("could not set keys");
806 goto out; 899 goto out;
@@ -811,3 +904,34 @@ out:
811 904
812 return ret; 905 return ret;
813} 906}
907
908int wl1271_cmd_disconnect(struct wl1271 *wl)
909{
910 struct wl1271_cmd_disconnect *cmd;
911 int ret = 0;
912
913 wl1271_debug(DEBUG_CMD, "cmd disconnect");
914
915 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
916 if (!cmd) {
917 ret = -ENOMEM;
918 goto out;
919 }
920
921 cmd->rx_config_options = cpu_to_le32(wl->rx_config);
922 cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
923 /* disconnect reason is not used in immediate disconnections */
924 cmd->type = DISCONNECT_IMMEDIATE;
925
926 ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0);
927 if (ret < 0) {
928 wl1271_error("failed to send disconnect command");
929 goto out_free;
930 }
931
932out_free:
933 kfree(cmd);
934
935out:
936 return ret;
937}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 951a8447a516..9d7061b3c8a0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -29,9 +29,9 @@
29 29
30struct acx_header; 30struct acx_header;
31 31
32int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len); 32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, 33 size_t res_len);
34 u16 beacon_interval, u8 wait); 34int wl1271_cmd_join(struct wl1271 *wl);
35int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 35int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
36int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 36int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
37int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 37int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -40,16 +40,19 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
40int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 40int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
41 size_t len); 41 size_t len);
42int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 42int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
43 u8 active_scan, u8 high_prio, u8 num_channels, 43 u8 active_scan, u8 high_prio, u8 band,
44 u8 probe_requests); 44 u8 probe_requests);
45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
46 void *buf, size_t buf_len); 46 void *buf, size_t buf_len);
47int wl1271_cmd_build_null_data(struct wl1271 *wl); 47int wl1271_cmd_build_null_data(struct wl1271 *wl);
48int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); 48int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
49int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len); 49int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
50 u8 band);
50int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 51int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
51int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 52int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
52 u8 key_size, const u8 *key, const u8 *addr); 53 u8 key_size, const u8 *key, const u8 *addr,
54 u32 tx_seq_32, u16 tx_seq_16);
55int wl1271_cmd_disconnect(struct wl1271 *wl);
53 56
54enum wl1271_commands { 57enum wl1271_commands {
55 CMD_INTERROGATE = 1, /*use this to read information elements*/ 58 CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -118,8 +121,8 @@ enum cmd_templ {
118#define WL1271_CMD_TEMPL_MAX_SIZE 252 121#define WL1271_CMD_TEMPL_MAX_SIZE 252
119 122
120struct wl1271_cmd_header { 123struct wl1271_cmd_header {
121 u16 id; 124 __le16 id;
122 u16 status; 125 __le16 status;
123 /* payload */ 126 /* payload */
124 u8 data[0]; 127 u8 data[0];
125} __attribute__ ((packed)); 128} __attribute__ ((packed));
@@ -172,17 +175,17 @@ struct cmd_read_write_memory {
172 struct wl1271_cmd_header header; 175 struct wl1271_cmd_header header;
173 176
174 /* The address of the memory to read from or write to.*/ 177 /* The address of the memory to read from or write to.*/
175 u32 addr; 178 __le32 addr;
176 179
177 /* The amount of data in bytes to read from or write to the WiLink 180 /* The amount of data in bytes to read from or write to the WiLink
178 * device.*/ 181 * device.*/
179 u32 size; 182 __le32 size;
180 183
181 /* The actual value read from or written to the Wilink. The source 184 /* The actual value read from or written to the Wilink. The source
182 of this field is the Host in WRITE command or the Wilink in READ 185 of this field is the Host in WRITE command or the Wilink in READ
183 command. */ 186 command. */
184 u8 value[MAX_READ_SIZE]; 187 u8 value[MAX_READ_SIZE];
185}; 188} __attribute__ ((packed));
186 189
187#define CMDMBOX_HEADER_LEN 4 190#define CMDMBOX_HEADER_LEN 4
188#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 191#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -196,22 +199,23 @@ enum {
196 199
197#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ 200#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */
198#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 201#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
202#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
199 203
200struct wl1271_cmd_join { 204struct wl1271_cmd_join {
201 struct wl1271_cmd_header header; 205 struct wl1271_cmd_header header;
202 206
203 u32 bssid_lsb; 207 __le32 bssid_lsb;
204 u16 bssid_msb; 208 __le16 bssid_msb;
205 u16 beacon_interval; /* in TBTTs */ 209 __le16 beacon_interval; /* in TBTTs */
206 u32 rx_config_options; 210 __le32 rx_config_options;
207 u32 rx_filter_options; 211 __le32 rx_filter_options;
208 212
209 /* 213 /*
210 * The target uses this field to determine the rate at 214 * The target uses this field to determine the rate at
211 * which to transmit control frame responses (such as 215 * which to transmit control frame responses (such as
212 * ACK or CTS frames). 216 * ACK or CTS frames).
213 */ 217 */
214 u32 basic_rate_set; 218 __le32 basic_rate_set;
215 u8 dtim_interval; 219 u8 dtim_interval;
216 /* 220 /*
217 * bits 0-2: This bitwise field specifies the type 221 * bits 0-2: This bitwise field specifies the type
@@ -240,10 +244,10 @@ struct cmd_enabledisable_path {
240struct wl1271_cmd_template_set { 244struct wl1271_cmd_template_set {
241 struct wl1271_cmd_header header; 245 struct wl1271_cmd_header header;
242 246
243 u16 len; 247 __le16 len;
244 u8 template_type; 248 u8 template_type;
245 u8 index; /* relevant only for KLV_TEMPLATE type */ 249 u8 index; /* relevant only for KLV_TEMPLATE type */
246 u32 enabled_rates; 250 __le32 enabled_rates;
247 u8 short_retry_limit; 251 u8 short_retry_limit;
248 u8 long_retry_limit; 252 u8 long_retry_limit;
249 u8 aflags; 253 u8 aflags;
@@ -280,18 +284,13 @@ struct wl1271_cmd_ps_params {
280 * to power save mode. 284 * to power save mode.
281 */ 285 */
282 u8 hang_over_period; 286 u8 hang_over_period;
283 u32 null_data_rate; 287 __le32 null_data_rate;
284} __attribute__ ((packed)); 288} __attribute__ ((packed));
285 289
286/* HW encryption keys */ 290/* HW encryption keys */
287#define NUM_ACCESS_CATEGORIES_COPY 4 291#define NUM_ACCESS_CATEGORIES_COPY 4
288#define MAX_KEY_SIZE 32 292#define MAX_KEY_SIZE 32
289 293
290/* When set, disable HW encryption */
291#define DF_ENCRYPTION_DISABLE 0x01
292/* When set, disable HW decryption */
293#define DF_SNIFF_MODE_ENABLE 0x80
294
295enum wl1271_cmd_key_action { 294enum wl1271_cmd_key_action {
296 KEY_ADD_OR_REPLACE = 1, 295 KEY_ADD_OR_REPLACE = 1,
297 KEY_REMOVE = 2, 296 KEY_REMOVE = 2,
@@ -316,9 +315,9 @@ struct wl1271_cmd_set_keys {
316 u8 addr[ETH_ALEN]; 315 u8 addr[ETH_ALEN];
317 316
318 /* key_action_e */ 317 /* key_action_e */
319 u16 key_action; 318 __le16 key_action;
320 319
321 u16 reserved_1; 320 __le16 reserved_1;
322 321
323 /* key size in bytes */ 322 /* key size in bytes */
324 u8 key_size; 323 u8 key_size;
@@ -334,8 +333,8 @@ struct wl1271_cmd_set_keys {
334 u8 id; 333 u8 id;
335 u8 reserved_2[6]; 334 u8 reserved_2[6];
336 u8 key[MAX_KEY_SIZE]; 335 u8 key[MAX_KEY_SIZE];
337 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 336 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
338 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 337 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
339} __attribute__ ((packed)); 338} __attribute__ ((packed));
340 339
341 340
@@ -347,19 +346,22 @@ struct wl1271_cmd_set_keys {
347#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 346#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
348#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ 347#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */
349#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ 348#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */
349#define WL1271_SCAN_BAND_2_4_GHZ 0
350#define WL1271_SCAN_BAND_5_GHZ 1
351#define WL1271_SCAN_BAND_DUAL 2
350 352
351struct basic_scan_params { 353struct basic_scan_params {
352 u32 rx_config_options; 354 __le32 rx_config_options;
353 u32 rx_filter_options; 355 __le32 rx_filter_options;
354 /* Scan option flags (WL1271_SCAN_OPT_*) */ 356 /* Scan option flags (WL1271_SCAN_OPT_*) */
355 u16 scan_options; 357 __le16 scan_options;
356 /* Number of scan channels in the list (maximum 30) */ 358 /* Number of scan channels in the list (maximum 30) */
357 u8 num_channels; 359 u8 num_channels;
358 /* This field indicates the number of probe requests to send 360 /* This field indicates the number of probe requests to send
359 per channel for an active scan */ 361 per channel for an active scan */
360 u8 num_probe_requests; 362 u8 num_probe_requests;
361 /* Rate bit field for sending the probes */ 363 /* Rate bit field for sending the probes */
362 u32 tx_rate; 364 __le32 tx_rate;
363 u8 tid_trigger; 365 u8 tid_trigger;
364 u8 ssid_len; 366 u8 ssid_len;
365 /* in order to align */ 367 /* in order to align */
@@ -374,10 +376,10 @@ struct basic_scan_params {
374 376
375struct basic_scan_channel_params { 377struct basic_scan_channel_params {
376 /* Duration in TU to wait for frames on a channel for active scan */ 378 /* Duration in TU to wait for frames on a channel for active scan */
377 u32 min_duration; 379 __le32 min_duration;
378 u32 max_duration; 380 __le32 max_duration;
379 u32 bssid_lsb; 381 __le32 bssid_lsb;
380 u16 bssid_msb; 382 __le16 bssid_msb;
381 u8 early_termination; 383 u8 early_termination;
382 u8 tx_power_att; 384 u8 tx_power_att;
383 u8 channel; 385 u8 channel;
@@ -397,13 +399,13 @@ struct wl1271_cmd_scan {
397struct wl1271_cmd_trigger_scan_to { 399struct wl1271_cmd_trigger_scan_to {
398 struct wl1271_cmd_header header; 400 struct wl1271_cmd_header header;
399 401
400 u32 timeout; 402 __le32 timeout;
401}; 403} __attribute__ ((packed));
402 404
403struct wl1271_cmd_test_header { 405struct wl1271_cmd_test_header {
404 u8 id; 406 u8 id;
405 u8 padding[3]; 407 u8 padding[3];
406}; 408} __attribute__ ((packed));
407 409
408enum wl1271_channel_tune_bands { 410enum wl1271_channel_tune_bands {
409 WL1271_CHANNEL_TUNE_BAND_2_4, 411 WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -425,7 +427,7 @@ struct wl1271_cmd_cal_channel_tune {
425 u8 band; 427 u8 band;
426 u8 channel; 428 u8 channel;
427 429
428 u16 radio_status; 430 __le16 radio_status;
429} __attribute__ ((packed)); 431} __attribute__ ((packed));
430 432
431struct wl1271_cmd_cal_update_ref_point { 433struct wl1271_cmd_cal_update_ref_point {
@@ -433,8 +435,8 @@ struct wl1271_cmd_cal_update_ref_point {
433 435
434 struct wl1271_cmd_test_header test; 436 struct wl1271_cmd_test_header test;
435 437
436 s32 ref_power; 438 __le32 ref_power;
437 s32 ref_detector; 439 __le32 ref_detector;
438 u8 sub_band; 440 u8 sub_band;
439 u8 padding[3]; 441 u8 padding[3];
440} __attribute__ ((packed)); 442} __attribute__ ((packed));
@@ -449,16 +451,42 @@ struct wl1271_cmd_cal_p2g {
449 451
450 struct wl1271_cmd_test_header test; 452 struct wl1271_cmd_test_header test;
451 453
452 u16 len; 454 __le16 len;
453 u8 buf[MAX_TLV_LENGTH]; 455 u8 buf[MAX_TLV_LENGTH];
454 u8 type; 456 u8 type;
455 u8 padding; 457 u8 padding;
456 458
457 s16 radio_status; 459 __le16 radio_status;
458 u8 nvs_version[MAX_NVS_VERSION_LENGTH]; 460 u8 nvs_version[MAX_NVS_VERSION_LENGTH];
459 461
460 u8 sub_band_mask; 462 u8 sub_band_mask;
461 u8 padding2; 463 u8 padding2;
462} __attribute__ ((packed)); 464} __attribute__ ((packed));
463 465
466
467/*
468 * There are three types of disconnections:
469 *
470 * DISCONNECT_IMMEDIATE: the fw doesn't send any frames
471 * DISCONNECT_DEAUTH: the fw generates a DEAUTH request with the reason
472 * we have passed
473 * DISCONNECT_DISASSOC: the fw generates a DESASSOC request with the reason
474 * we have passed
475 */
476enum wl1271_disconnect_type {
477 DISCONNECT_IMMEDIATE,
478 DISCONNECT_DEAUTH,
479 DISCONNECT_DISASSOC
480};
481
482struct wl1271_cmd_disconnect {
483 __le32 rx_config_options;
484 __le32 rx_filter_options;
485
486 __le16 reason;
487 u8 type;
488
489 u8 padding;
490} __attribute__ ((packed));
491
464#endif /* __WL1271_CMD_H__ */ 492#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
new file mode 100644
index 000000000000..565373ede265
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -0,0 +1,919 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1271_CONF_H__
25#define __WL1271_CONF_H__
26
27enum {
28 CONF_HW_BIT_RATE_1MBPS = BIT(0),
29 CONF_HW_BIT_RATE_2MBPS = BIT(1),
30 CONF_HW_BIT_RATE_5_5MBPS = BIT(2),
31 CONF_HW_BIT_RATE_6MBPS = BIT(3),
32 CONF_HW_BIT_RATE_9MBPS = BIT(4),
33 CONF_HW_BIT_RATE_11MBPS = BIT(5),
34 CONF_HW_BIT_RATE_12MBPS = BIT(6),
35 CONF_HW_BIT_RATE_18MBPS = BIT(7),
36 CONF_HW_BIT_RATE_22MBPS = BIT(8),
37 CONF_HW_BIT_RATE_24MBPS = BIT(9),
38 CONF_HW_BIT_RATE_36MBPS = BIT(10),
39 CONF_HW_BIT_RATE_48MBPS = BIT(11),
40 CONF_HW_BIT_RATE_54MBPS = BIT(12),
41 CONF_HW_BIT_RATE_MCS_0 = BIT(13),
42 CONF_HW_BIT_RATE_MCS_1 = BIT(14),
43 CONF_HW_BIT_RATE_MCS_2 = BIT(15),
44 CONF_HW_BIT_RATE_MCS_3 = BIT(16),
45 CONF_HW_BIT_RATE_MCS_4 = BIT(17),
46 CONF_HW_BIT_RATE_MCS_5 = BIT(18),
47 CONF_HW_BIT_RATE_MCS_6 = BIT(19),
48 CONF_HW_BIT_RATE_MCS_7 = BIT(20)
49};
50
51enum {
52 CONF_HW_RATE_INDEX_1MBPS = 0,
53 CONF_HW_RATE_INDEX_2MBPS = 1,
54 CONF_HW_RATE_INDEX_5_5MBPS = 2,
55 CONF_HW_RATE_INDEX_6MBPS = 3,
56 CONF_HW_RATE_INDEX_9MBPS = 4,
57 CONF_HW_RATE_INDEX_11MBPS = 5,
58 CONF_HW_RATE_INDEX_12MBPS = 6,
59 CONF_HW_RATE_INDEX_18MBPS = 7,
60 CONF_HW_RATE_INDEX_22MBPS = 8,
61 CONF_HW_RATE_INDEX_24MBPS = 9,
62 CONF_HW_RATE_INDEX_36MBPS = 10,
63 CONF_HW_RATE_INDEX_48MBPS = 11,
64 CONF_HW_RATE_INDEX_54MBPS = 12,
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66};
67
68struct conf_sg_settings {
69 /*
70 * Defines the PER threshold in PPM of the BT voice of which reaching
71 * this value will trigger raising the priority of the BT voice by
72 * the BT IP until next NFS sample interval time as defined in
73 * nfs_sample_interval.
74 *
75 * Unit: PER value in PPM (parts per million)
76 * #Error_packets / #Total_packets
77
78 * Range: u32
79 */
80 u32 per_threshold;
81
82 /*
83 * This value is an absolute time in micro-seconds to limit the
84 * maximum scan duration compensation while in SG
85 */
86 u32 max_scan_compensation_time;
87
88 /* Defines the PER threshold of the BT voice of which reaching this
89 * value will trigger raising the priority of the BT voice until next
90 * NFS sample interval time as defined in sample_interval.
91 *
92 * Unit: msec
93 * Range: 1-65000
94 */
95 u16 nfs_sample_interval;
96
97 /*
98 * Defines the load ratio for the BT.
99 * The WLAN ratio is: 100 - load_ratio
100 *
101 * Unit: Percent
102 * Range: 0-100
103 */
104 u8 load_ratio;
105
106 /*
107 * true - Co-ex is allowed to enter/exit P.S automatically and
108 * transparently to the host
109 *
110 * false - Co-ex is disallowed to enter/exit P.S and will trigger an
111 * event to the host to notify for the need to enter/exit P.S
112 * due to BT change state
113 *
114 */
115 u8 auto_ps_mode;
116
117 /*
118 * This parameter defines the compensation percentage of num of probe
119 * requests in case scan is initiated during BT voice/BT ACL
120 * guaranteed link.
121 *
122 * Unit: Percent
123 * Range: 0-255 (0 - No compensation)
124 */
125 u8 probe_req_compensation;
126
127 /*
128 * This parameter defines the compensation percentage of scan window
129 * size in case scan is initiated during BT voice/BT ACL Guaranteed
130 * link.
131 *
132 * Unit: Percent
133 * Range: 0-255 (0 - No compensation)
134 */
135 u8 scan_window_compensation;
136
137 /*
138 * Defines the antenna configuration.
139 *
140 * Range: 0 - Single Antenna; 1 - Dual Antenna
141 */
142 u8 antenna_config;
143
144 /*
145 * The percent out of the Max consecutive beacon miss roaming trigger
146 * which is the threshold for raising the priority of beacon
147 * reception.
148 *
149 * Range: 1-100
150 * N = MaxConsecutiveBeaconMiss
151 * P = coexMaxConsecutiveBeaconMissPrecent
152 * Threshold = MIN( N-1, round(N * P / 100))
153 */
154 u8 beacon_miss_threshold;
155
156 /*
157 * The RX rate threshold below which rate adaptation is assumed to be
158 * occurring at the AP which will raise priority for ACTIVE_RX and RX
159 * SP.
160 *
161 * Range: HW_BIT_RATE_*
162 */
163 u32 rate_adaptation_threshold;
164
165 /*
166 * The SNR above which the RX rate threshold indicating AP rate
167 * adaptation is valid
168 *
169 * Range: -128 - 127
170 */
171 s8 rate_adaptation_snr;
172};
173
174enum conf_rx_queue_type {
175 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
176 CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */
177};
178
179struct conf_rx_settings {
180 /*
181 * The maximum amount of time, in TU, before the
182 * firmware discards the MSDU.
183 *
184 * Range: 0 - 0xFFFFFFFF
185 */
186 u32 rx_msdu_life_time;
187
188 /*
189 * Packet detection threshold in the PHY.
190 *
191 * FIXME: details unknown.
192 */
193 u32 packet_detection_threshold;
194
195 /*
196 * The longest time the STA will wait to receive traffic from the AP
197 * after a PS-poll has been transmitted.
198 *
199 * Range: 0 - 200000
200 */
201 u16 ps_poll_timeout;
202 /*
203 * The longest time the STA will wait to receive traffic from the AP
204 * after a frame has been sent from an UPSD enabled queue.
205 *
206 * Range: 0 - 200000
207 */
208 u16 upsd_timeout;
209
210 /*
211 * The number of octets in an MPDU, below which an RTS/CTS
212 * handshake is not performed.
213 *
214 * Range: 0 - 4096
215 */
216 u16 rts_threshold;
217
218 /*
219 * The RX Clear Channel Assessment threshold in the PHY
220 * (the energy threshold).
221 *
222 * Range: ENABLE_ENERGY_D == 0x140A
223 * DISABLE_ENERGY_D == 0xFFEF
224 */
225 u16 rx_cca_threshold;
226
227 /*
228 * Occupied Rx mem-blocks number which requires interrupting the host
229 * (0 = no buffering, 0xffff = disabled).
230 *
231 * Range: u16
232 */
233 u16 irq_blk_threshold;
234
235 /*
236 * Rx packets number which requires interrupting the host
237 * (0 = no buffering).
238 *
239 * Range: u16
240 */
241 u16 irq_pkt_threshold;
242
243 /*
244 * Max time in msec the FW may delay RX-Complete interrupt.
245 *
246 * Range: 1 - 100
247 */
248 u16 irq_timeout;
249
250 /*
251 * The RX queue type.
252 *
253 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
254 */
255 u8 queue_type;
256};
257
258#define CONF_TX_MAX_RATE_CLASSES 8
259
260#define CONF_TX_RATE_MASK_UNSPECIFIED 0
261#define CONF_TX_RATE_MASK_ALL 0x1eff
262#define CONF_TX_RATE_RETRY_LIMIT 10
263
264struct conf_tx_rate_class {
265
266 /*
267 * The rates enabled for this rate class.
268 *
269 * Range: CONF_HW_BIT_RATE_* bit mask
270 */
271 u32 enabled_rates;
272
273 /*
274 * The dot11 short retry limit used for TX retries.
275 *
276 * Range: u8
277 */
278 u8 short_retry_limit;
279
280 /*
281 * The dot11 long retry limit used for TX retries.
282 *
283 * Range: u8
284 */
285 u8 long_retry_limit;
286
287 /*
288 * Flags controlling the attributes of TX transmission.
289 *
290 * Range: bit 0: Truncate - when set, FW attempts to send a frame stop
291 * when the total valid per-rate attempts have
292 * been exhausted; otherwise transmissions
293 * will continue at the lowest available rate
294 * until the appropriate one of the
295 * short_retry_limit, long_retry_limit,
296 * dot11_max_transmit_msdu_life_time, or
297 * max_tx_life_time, is exhausted.
298 * 1: Preamble Override - indicates if the preamble type
299 * should be used in TX.
300 * 2: Preamble Type - the type of the preamble to be used by
301 * the policy (0 - long preamble, 1 - short preamble.
302 */
303 u8 aflags;
304};
305
306#define CONF_TX_MAX_AC_COUNT 4
307
308/* Slot number setting to start transmission at PIFS interval */
309#define CONF_TX_AIFS_PIFS 1
310/* Slot number setting to start transmission at DIFS interval normal
311 * DCF access */
312#define CONF_TX_AIFS_DIFS 2
313
314
315enum conf_tx_ac {
316 CONF_TX_AC_BE = 0, /* best effort / legacy */
317 CONF_TX_AC_BK = 1, /* background */
318 CONF_TX_AC_VI = 2, /* video */
319 CONF_TX_AC_VO = 3, /* voice */
320 CONF_TX_AC_CTS2SELF = 4, /* fictious AC, follows AC_VO */
321 CONF_TX_AC_ANY_TID = 0x1f
322};
323
324struct conf_tx_ac_category {
325 /*
326 * The AC class identifier.
327 *
328 * Range: enum conf_tx_ac
329 */
330 u8 ac;
331
332 /*
333 * The contention window minimum size (in slots) for the access
334 * class.
335 *
336 * Range: u8
337 */
338 u8 cw_min;
339
340 /*
341 * The contention window maximum size (in slots) for the access
342 * class.
343 *
344 * Range: u8
345 */
346 u16 cw_max;
347
348 /*
349 * The AIF value (in slots) for the access class.
350 *
351 * Range: u8
352 */
353 u8 aifsn;
354
355 /*
356 * The TX Op Limit (in microseconds) for the access class.
357 *
358 * Range: u16
359 */
360 u16 tx_op_limit;
361};
362
363#define CONF_TX_MAX_TID_COUNT 7
364
365enum {
366 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
367 CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/
368 CONF_CHANNEL_TYPE_HCCA = 2, /* HCCA*/
369};
370
371enum {
372 CONF_PS_SCHEME_LEGACY = 0,
373 CONF_PS_SCHEME_UPSD_TRIGGER = 1,
374 CONF_PS_SCHEME_LEGACY_PSPOLL = 2,
375 CONF_PS_SCHEME_SAPSD = 3,
376};
377
378enum {
379 CONF_ACK_POLICY_LEGACY = 0,
380 CONF_ACK_POLICY_NO_ACK = 1,
381 CONF_ACK_POLICY_BLOCK = 2,
382};
383
384
385struct conf_tx_tid {
386 u8 queue_id;
387 u8 channel_type;
388 u8 tsid;
389 u8 ps_scheme;
390 u8 ack_policy;
391 u32 apsd_conf[2];
392};
393
394struct conf_tx_settings {
395 /*
396 * The TX ED value for TELEC Enable/Disable.
397 *
398 * Range: 0, 1
399 */
400 u8 tx_energy_detection;
401
402 /*
403 * Configuration for rate classes for TX (currently only one
404 * rate class supported.)
405 */
406 struct conf_tx_rate_class rc_conf;
407
408 /*
409 * Configuration for access categories for TX rate control.
410 */
411 u8 ac_conf_count;
412 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
413
414 /*
415 * Configuration for TID parameters.
416 */
417 u8 tid_conf_count;
418 struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT];
419
420 /*
421 * The TX fragmentation threshold.
422 *
423 * Range: u16
424 */
425 u16 frag_threshold;
426
427 /*
428 * Max time in msec the FW may delay frame TX-Complete interrupt.
429 *
430 * Range: u16
431 */
432 u16 tx_compl_timeout;
433
434 /*
435 * Completed TX packet count which requires to issue the TX-Complete
436 * interrupt.
437 *
438 * Range: u16
439 */
440 u16 tx_compl_threshold;
441
442};
443
444enum {
445 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
446 CONF_WAKE_UP_EVENT_DTIM = 0x02, /* Wake on every DTIM*/
447 CONF_WAKE_UP_EVENT_N_DTIM = 0x04, /* Wake every Nth DTIM */
448 CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */
449 CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F
450};
451
452#define CONF_MAX_BCN_FILT_IE_COUNT 32
453
454#define CONF_BCN_RULE_PASS_ON_CHANGE BIT(0)
455#define CONF_BCN_RULE_PASS_ON_APPEARANCE BIT(1)
456
457#define CONF_BCN_IE_OUI_LEN 3
458#define CONF_BCN_IE_VER_LEN 2
459
460struct conf_bcn_filt_rule {
461 /*
462 * IE number to which to associate a rule.
463 *
464 * Range: u8
465 */
466 u8 ie;
467
468 /*
469 * Rule to associate with the specific ie.
470 *
471 * Range: CONF_BCN_RULE_PASS_ON_*
472 */
473 u8 rule;
474
475 /*
476 * OUI for the vendor specifie IE (221)
477 */
478 u8 oui[CONF_BCN_IE_OUI_LEN];
479
480 /*
481 * Type for the vendor specifie IE (221)
482 */
483 u8 type;
484
485 /*
486 * Version for the vendor specifie IE (221)
487 */
488 u8 version[CONF_BCN_IE_VER_LEN];
489};
490
491#define CONF_MAX_RSSI_SNR_TRIGGERS 8
492
493enum {
494 CONF_TRIG_METRIC_RSSI_BEACON = 0,
495 CONF_TRIG_METRIC_RSSI_DATA,
496 CONF_TRIG_METRIC_SNR_BEACON,
497 CONF_TRIG_METRIC_SNR_DATA
498};
499
500enum {
501 CONF_TRIG_EVENT_TYPE_LEVEL = 0,
502 CONF_TRIG_EVENT_TYPE_EDGE
503};
504
505enum {
506 CONF_TRIG_EVENT_DIR_LOW = 0,
507 CONF_TRIG_EVENT_DIR_HIGH,
508 CONF_TRIG_EVENT_DIR_BIDIR
509};
510
511
512struct conf_sig_trigger {
513 /*
514 * The RSSI / SNR threshold value.
515 *
516 * FIXME: what is the range?
517 */
518 s16 threshold;
519
520 /*
521 * Minimum delay between two trigger events for this trigger in ms.
522 *
523 * Range: 0 - 60000
524 */
525 u16 pacing;
526
527 /*
528 * The measurement data source for this trigger.
529 *
530 * Range: CONF_TRIG_METRIC_*
531 */
532 u8 metric;
533
534 /*
535 * The trigger type of this trigger.
536 *
537 * Range: CONF_TRIG_EVENT_TYPE_*
538 */
539 u8 type;
540
541 /*
542 * The direction of the trigger.
543 *
544 * Range: CONF_TRIG_EVENT_DIR_*
545 */
546 u8 direction;
547
548 /*
549 * Hysteresis range of the trigger around the threshold (in dB)
550 *
551 * Range: u8
552 */
553 u8 hysteresis;
554
555 /*
556 * Index of the trigger rule.
557 *
558 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
559 */
560 u8 index;
561
562 /*
563 * Enable / disable this rule (to use for clearing rules.)
564 *
565 * Range: 1 - Enabled, 2 - Not enabled
566 */
567 u8 enable;
568};
569
570struct conf_sig_weights {
571
572 /*
573 * RSSI from beacons average weight.
574 *
575 * Range: u8
576 */
577 u8 rssi_bcn_avg_weight;
578
579 /*
580 * RSSI from data average weight.
581 *
582 * Range: u8
583 */
584 u8 rssi_pkt_avg_weight;
585
586 /*
587 * SNR from beacons average weight.
588 *
589 * Range: u8
590 */
591 u8 snr_bcn_avg_weight;
592
593 /*
594 * SNR from data average weight.
595 *
596 * Range: u8
597 */
598 u8 snr_pkt_avg_weight;
599};
600
601enum conf_bcn_filt_mode {
602 CONF_BCN_FILT_MODE_DISABLED = 0,
603 CONF_BCN_FILT_MODE_ENABLED = 1
604};
605
606enum conf_bet_mode {
607 CONF_BET_MODE_DISABLE = 0,
608 CONF_BET_MODE_ENABLE = 1,
609};
610
611struct conf_conn_settings {
612 /*
613 * Firmware wakeup conditions configuration. The host may set only
614 * one bit.
615 *
616 * Range: CONF_WAKE_UP_EVENT_*
617 */
618 u8 wake_up_event;
619
620 /*
621 * Listen interval for beacons or Dtims.
622 *
623 * Range: 0 for beacon and Dtim wakeup
624 * 1-10 for x Dtims
625 * 1-255 for x beacons
626 */
627 u8 listen_interval;
628
629 /*
630 * Enable or disable the beacon filtering.
631 *
632 * Range: CONF_BCN_FILT_MODE_*
633 */
634 enum conf_bcn_filt_mode bcn_filt_mode;
635
636 /*
637 * Configure Beacon filter pass-thru rules.
638 */
639 u8 bcn_filt_ie_count;
640 struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
641
642 /*
643 * The number of consequtive beacons to lose, before the firmware
644 * becomes out of synch.
645 *
646 * Range: u32
647 */
648 u32 synch_fail_thold;
649
650 /*
651 * After out-of-synch, the number of TU's to wait without a further
652 * received beacon (or probe response) before issuing the BSS_EVENT_LOSE
653 * event.
654 *
655 * Range: u32
656 */
657 u32 bss_lose_timeout;
658
659 /*
660 * Beacon receive timeout.
661 *
662 * Range: u32
663 */
664 u32 beacon_rx_timeout;
665
666 /*
667 * Broadcast receive timeout.
668 *
669 * Range: u32
670 */
671 u32 broadcast_timeout;
672
673 /*
674 * Enable/disable reception of broadcast packets in power save mode
675 *
676 * Range: 1 - enable, 0 - disable
677 */
678 u8 rx_broadcast_in_ps;
679
680 /*
681 * Consequtive PS Poll failures before sending event to driver
682 *
683 * Range: u8
684 */
685 u8 ps_poll_threshold;
686
687 /*
688 * Configuration of signal (rssi/snr) triggers.
689 */
690 u8 sig_trigger_count;
691 struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
692
693 /*
694 * Configuration of signal average weights.
695 */
696 struct conf_sig_weights sig_weights;
697
698 /*
699 * Specifies if beacon early termination procedure is enabled or
700 * disabled.
701 *
702 * Range: CONF_BET_MODE_*
703 */
704 u8 bet_enable;
705
706 /*
707 * Specifies the maximum number of consecutive beacons that may be
708 * early terminated. After this number is reached at least one full
709 * beacon must be correctly received in FW before beacon ET
710 * resumes.
711 *
712 * Range 0 - 255
713 */
714 u8 bet_max_consecutive;
715
716 /*
717 * Specifies the maximum number of times to try PSM entry if it fails
718 * (if sending the appropriate null-func message fails.)
719 *
720 * Range 0 - 255
721 */
722 u8 psm_entry_retries;
723};
724
725#define CONF_SR_ERR_TBL_MAX_VALUES 14
726
727struct conf_mart_reflex_err_table {
728 /*
729 * Length of the error table values table.
730 *
731 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
732 */
733 u8 len;
734
735 /*
736 * Smart Reflex error table upper limit.
737 *
738 * Range: s8
739 */
740 s8 upper_limit;
741
742 /*
743 * Smart Reflex error table values.
744 *
745 * Range: s8
746 */
747 s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
748};
749
750enum {
751 CONF_REF_CLK_19_2_E,
752 CONF_REF_CLK_26_E,
753 CONF_REF_CLK_38_4_E,
754 CONF_REF_CLK_52_E
755};
756
757enum single_dual_band_enum {
758 CONF_SINGLE_BAND,
759 CONF_DUAL_BAND
760};
761
762struct conf_general_parms {
763 /*
764 * RF Reference Clock type / speed
765 *
766 * Range: CONF_REF_CLK_*
767 */
768 u8 ref_clk;
769
770 /*
771 * Settling time of the reference clock after boot.
772 *
773 * Range: u8
774 */
775 u8 settling_time;
776
777 /*
778 * Flag defining whether clock is valid on wakeup.
779 *
780 * Range: 0 - not valid on wakeup, 1 - valid on wakeup
781 */
782 u8 clk_valid_on_wakeup;
783
784 /*
785 * DC-to-DC mode.
786 *
787 * Range: Unknown
788 */
789 u8 dc2dcmode;
790
791 /*
792 * Flag defining whether used as single or dual-band.
793 *
794 * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
795 */
796 u8 single_dual_band;
797
798 /*
799 * TX bip fem autodetect flag.
800 *
801 * Range: Unknown
802 */
803 u8 tx_bip_fem_autodetect;
804
805 /*
806 * TX bip gem manufacturer.
807 *
808 * Range: Unknown
809 */
810 u8 tx_bip_fem_manufacturer;
811
812 /*
813 * Settings flags.
814 *
815 * Range: Unknown
816 */
817 u8 settings;
818};
819
820#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
821#define CONF_NUMBER_OF_SUB_BANDS_5 7
822#define CONF_NUMBER_OF_RATE_GROUPS 6
823#define CONF_NUMBER_OF_CHANNELS_2_4 14
824#define CONF_NUMBER_OF_CHANNELS_5 35
825
826struct conf_radio_parms {
827 /*
828 * Static radio parameters for 2.4GHz
829 *
830 * Range: unknown
831 */
832 u8 rx_trace_loss;
833 u8 tx_trace_loss;
834 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
835
836 /*
837 * Static radio parameters for 5GHz
838 *
839 * Range: unknown
840 */
841 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
842 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
843 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
844
845 /*
846 * Dynamic radio parameters for 2.4GHz
847 *
848 * Range: unknown
849 */
850 s16 tx_ref_pd_voltage;
851 s8 tx_ref_power;
852 s8 tx_offset_db;
853
854 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
855 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
856
857 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
858 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
859 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
860
861 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
862 u8 rx_fem_insertion_loss;
863
864 /*
865 * Dynamic radio parameters for 5GHz
866 *
867 * Range: unknown
868 */
869 s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
870 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
871 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
872
873 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
874 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
875
876 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
877 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
878
879 /* FIXME: this is inconsistent with the types for 2.4GHz */
880 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
881 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
882};
883
884#define CONF_SR_ERR_TBL_COUNT 3
885
886struct conf_init_settings {
887 /*
888 * Configure Smart Reflex error table values.
889 */
890 struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
891
892 /*
893 * Smart Reflex enable flag.
894 *
895 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
896 */
897 u8 sr_enable;
898
899 /*
900 * Configure general parameters.
901 */
902 struct conf_general_parms genparam;
903
904 /*
905 * Configure radio parameters.
906 */
907 struct conf_radio_parms radioparam;
908
909};
910
911struct conf_drv_settings {
912 struct conf_sg_settings sg;
913 struct conf_rx_settings rx;
914 struct conf_tx_settings tx;
915 struct conf_conn_settings conn;
916 struct conf_init_settings init;
917};
918
919#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index f3afd4a6ff33..e135d894b42a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -26,23 +26,82 @@
26#include "wl1271_spi.h" 26#include "wl1271_spi.h"
27#include "wl1271_event.h" 27#include "wl1271_event.h"
28#include "wl1271_ps.h" 28#include "wl1271_ps.h"
29#include "wl12xx_80211.h"
29 30
30static int wl1271_event_scan_complete(struct wl1271 *wl, 31static int wl1271_event_scan_complete(struct wl1271 *wl,
31 struct event_mailbox *mbox) 32 struct event_mailbox *mbox)
32{ 33{
34 int size = sizeof(struct wl12xx_probe_req_template);
33 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 35 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
34 mbox->scheduled_scan_status); 36 mbox->scheduled_scan_status);
35 37
36 if (wl->scanning) { 38 if (wl->scanning) {
37 mutex_unlock(&wl->mutex); 39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
38 ieee80211_scan_completed(wl->hw, false); 40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
39 mutex_lock(&wl->mutex); 41 NULL, size);
40 wl->scanning = false; 42 /* 2.4 GHz band scanned, scan 5 GHz band, pretend
43 * to the wl1271_cmd_scan function that we are not
44 * scanning as it checks that.
45 */
46 wl->scanning = false;
47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
48 wl->scan.active,
49 wl->scan.high_prio,
50 WL1271_SCAN_BAND_5_GHZ,
51 wl->scan.probe_requests);
52 } else {
53 if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
54 wl1271_cmd_template_set(wl,
55 CMD_TEMPL_CFG_PROBE_REQ_2_4,
56 NULL, size);
57 else
58 wl1271_cmd_template_set(wl,
59 CMD_TEMPL_CFG_PROBE_REQ_5,
60 NULL, size);
61
62 mutex_unlock(&wl->mutex);
63 ieee80211_scan_completed(wl->hw, false);
64 mutex_lock(&wl->mutex);
65 wl->scanning = false;
66 }
41 } 67 }
42
43 return 0; 68 return 0;
44} 69}
45 70
71static int wl1271_event_ps_report(struct wl1271 *wl,
72 struct event_mailbox *mbox,
73 bool *beacon_loss)
74{
75 int ret = 0;
76
77 wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status);
78
79 switch (mbox->ps_status) {
80 case EVENT_ENTER_POWER_SAVE_FAIL:
81 if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) {
82 wl->psm_entry_retry++;
83 wl1271_error("PSM entry failed, retrying %d\n",
84 wl->psm_entry_retry);
85 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
86 } else {
87 wl->psm_entry_retry = 0;
88 *beacon_loss = true;
89 }
90 break;
91 case EVENT_ENTER_POWER_SAVE_SUCCESS:
92 wl->psm_entry_retry = 0;
93 break;
94 case EVENT_EXIT_POWER_SAVE_FAIL:
95 wl1271_info("PSM exit failed");
96 break;
97 case EVENT_EXIT_POWER_SAVE_SUCCESS:
98 default:
99 break;
100 }
101
102 return ret;
103}
104
46static void wl1271_event_mbox_dump(struct event_mailbox *mbox) 105static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
47{ 106{
48 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); 107 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -54,10 +113,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
54{ 113{
55 int ret; 114 int ret;
56 u32 vector; 115 u32 vector;
116 bool beacon_loss = false;
57 117
58 wl1271_event_mbox_dump(mbox); 118 wl1271_event_mbox_dump(mbox);
59 119
60 vector = mbox->events_vector & ~(mbox->events_mask); 120 vector = le32_to_cpu(mbox->events_vector);
121 vector &= ~(le32_to_cpu(mbox->events_mask));
61 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); 122 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
62 123
63 if (vector & SCAN_COMPLETE_EVENT_ID) { 124 if (vector & SCAN_COMPLETE_EVENT_ID) {
@@ -66,14 +127,34 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
66 return ret; 127 return ret;
67 } 128 }
68 129
69 if (vector & BSS_LOSE_EVENT_ID) { 130 /*
131 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
132 * filtering) is enabled. Without PSM, the stack will receive all
133 * beacons and can detect beacon loss by itself.
134 */
135 if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
70 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 136 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
71 137
72 if (wl->psm_requested && wl->psm) { 138 /* indicate to the stack, that beacons have been lost */
73 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); 139 beacon_loss = true;
74 if (ret < 0) 140 }
75 return ret; 141
76 } 142 if (vector & PS_REPORT_EVENT_ID) {
143 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
144 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
145 if (ret < 0)
146 return ret;
147 }
148
149 if (beacon_loss) {
150 /* Obviously, it's dangerous to release the mutex while
151 we are holding many of the variables in the wl struct.
152 That's why it's done last in the function, and care must
153 be taken that nothing more is done after this function
154 returns. */
155 mutex_unlock(&wl->mutex);
156 ieee80211_beacon_loss(wl->vif);
157 mutex_lock(&wl->mutex);
77 } 158 }
78 159
79 return 0; 160 return 0;
@@ -92,14 +173,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
92 173
93void wl1271_event_mbox_config(struct wl1271 *wl) 174void wl1271_event_mbox_config(struct wl1271 *wl)
94{ 175{
95 wl->mbox_ptr[0] = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); 176 wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
96 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); 177 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
97 178
98 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", 179 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
99 wl->mbox_ptr[0], wl->mbox_ptr[1]); 180 wl->mbox_ptr[0], wl->mbox_ptr[1]);
100} 181}
101 182
102int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) 183int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
103{ 184{
104 struct event_mailbox mbox; 185 struct event_mailbox mbox;
105 int ret; 186 int ret;
@@ -110,8 +191,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
110 return -EINVAL; 191 return -EINVAL;
111 192
112 /* first we read the mbox descriptor */ 193 /* first we read the mbox descriptor */
113 wl1271_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, 194 wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
114 sizeof(struct event_mailbox)); 195 sizeof(struct event_mailbox), false);
115 196
116 /* process the descriptor */ 197 /* process the descriptor */
117 ret = wl1271_event_process(wl, &mbox); 198 ret = wl1271_event_process(wl, &mbox);
@@ -119,7 +200,9 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
119 return ret; 200 return ret;
120 201
121 /* then we let the firmware know it can go on...*/ 202 /* then we let the firmware know it can go on...*/
122 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); 203 if (do_ack)
204 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
205 INTR_TRIG_EVENT_ACK);
123 206
124 return 0; 207 return 0;
125} 208}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 2cdce7c34bf0..4e3f55ebb1a8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -63,36 +63,43 @@ enum {
63 EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, 63 EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
64}; 64};
65 65
66enum {
67 EVENT_ENTER_POWER_SAVE_FAIL = 0,
68 EVENT_ENTER_POWER_SAVE_SUCCESS,
69 EVENT_EXIT_POWER_SAVE_FAIL,
70 EVENT_EXIT_POWER_SAVE_SUCCESS,
71};
72
66struct event_debug_report { 73struct event_debug_report {
67 u8 debug_event_id; 74 u8 debug_event_id;
68 u8 num_params; 75 u8 num_params;
69 u16 pad; 76 __le16 pad;
70 u32 report_1; 77 __le32 report_1;
71 u32 report_2; 78 __le32 report_2;
72 u32 report_3; 79 __le32 report_3;
73} __attribute__ ((packed)); 80} __attribute__ ((packed));
74 81
75#define NUM_OF_RSSI_SNR_TRIGGERS 8 82#define NUM_OF_RSSI_SNR_TRIGGERS 8
76 83
77struct event_mailbox { 84struct event_mailbox {
78 u32 events_vector; 85 __le32 events_vector;
79 u32 events_mask; 86 __le32 events_mask;
80 u32 reserved_1; 87 __le32 reserved_1;
81 u32 reserved_2; 88 __le32 reserved_2;
82 89
83 u8 dbg_event_id; 90 u8 dbg_event_id;
84 u8 num_relevant_params; 91 u8 num_relevant_params;
85 u16 reserved_3; 92 __le16 reserved_3;
86 u32 event_report_p1; 93 __le32 event_report_p1;
87 u32 event_report_p2; 94 __le32 event_report_p2;
88 u32 event_report_p3; 95 __le32 event_report_p3;
89 96
90 u8 number_of_scan_results; 97 u8 number_of_scan_results;
91 u8 scan_tag; 98 u8 scan_tag;
92 u8 reserved_4[2]; 99 u8 reserved_4[2];
93 u32 compl_scheduled_scan_status; 100 __le32 compl_scheduled_scan_status;
94 101
95 u16 scheduled_scan_attended_channels; 102 __le16 scheduled_scan_attended_channels;
96 u8 soft_gemini_sense_info; 103 u8 soft_gemini_sense_info;
97 u8 soft_gemini_protective_info; 104 u8 soft_gemini_protective_info;
98 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; 105 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
@@ -105,6 +112,6 @@ struct event_mailbox {
105 112
106int wl1271_event_unmask(struct wl1271 *wl); 113int wl1271_event_unmask(struct wl1271 *wl);
107void wl1271_event_mbox_config(struct wl1271 *wl); 114void wl1271_event_mbox_config(struct wl1271 *wl);
108int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 115int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
109 116
110#endif 117#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 490df217605a..7c2017f480ea 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -59,6 +59,14 @@ static int wl1271_init_templates_config(struct wl1271 *wl)
59 if (ret < 0) 59 if (ret < 0)
60 return ret; 60 return ret;
61 61
62 if (wl1271_11a_enabled()) {
63 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
64 NULL,
65 sizeof(struct wl12xx_probe_req_template));
66 if (ret < 0)
67 return ret;
68 }
69
62 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, 70 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
63 sizeof(struct wl12xx_null_data_template)); 71 sizeof(struct wl12xx_null_data_template));
64 if (ret < 0) 72 if (ret < 0)
@@ -94,7 +102,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
94{ 102{
95 int ret; 103 int ret;
96 104
97 ret = wl1271_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); 105 ret = wl1271_acx_rx_msdu_life_time(wl);
98 if (ret < 0) 106 if (ret < 0)
99 return ret; 107 return ret;
100 108
@@ -117,7 +125,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
117 if (ret < 0) 125 if (ret < 0)
118 return ret; 126 return ret;
119 127
120 ret = wl1271_acx_group_address_tbl(wl); 128 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
121 if (ret < 0) 129 if (ret < 0)
122 return ret; 130 return ret;
123 131
@@ -125,7 +133,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
125 if (ret < 0) 133 if (ret < 0)
126 return ret; 134 return ret;
127 135
128 ret = wl1271_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); 136 ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold);
129 if (ret < 0) 137 if (ret < 0)
130 return ret; 138 return ret;
131 139
@@ -136,7 +144,8 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
136{ 144{
137 int ret; 145 int ret;
138 146
139 ret = wl1271_acx_beacon_filter_opt(wl); 147 /* disable beacon filtering at this stage */
148 ret = wl1271_acx_beacon_filter_opt(wl, false);
140 if (ret < 0) 149 if (ret < 0)
141 return ret; 150 return ret;
142 151
@@ -187,6 +196,7 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
187static int wl1271_init_general_parms(struct wl1271 *wl) 196static int wl1271_init_general_parms(struct wl1271 *wl)
188{ 197{
189 struct wl1271_general_parms *gen_parms; 198 struct wl1271_general_parms *gen_parms;
199 struct conf_general_parms *g = &wl->conf.init.genparam;
190 int ret; 200 int ret;
191 201
192 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); 202 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
@@ -195,15 +205,14 @@ static int wl1271_init_general_parms(struct wl1271 *wl)
195 205
196 gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM; 206 gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM;
197 207
198 gen_parms->ref_clk = REF_CLK_38_4_E; 208 gen_parms->ref_clk = g->ref_clk;
199 /* FIXME: magic numbers */ 209 gen_parms->settling_time = g->settling_time;
200 gen_parms->settling_time = 5; 210 gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
201 gen_parms->clk_valid_on_wakeup = 0; 211 gen_parms->dc2dcmode = g->dc2dcmode;
202 gen_parms->dc2dcmode = 0; 212 gen_parms->single_dual_band = g->single_dual_band;
203 gen_parms->single_dual_band = 0; 213 gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
204 gen_parms->tx_bip_fem_autodetect = 1; 214 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
205 gen_parms->tx_bip_fem_manufacturer = 1; 215 gen_parms->settings = g->settings;
206 gen_parms->settings = 1;
207 216
208 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 217 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
209 if (ret < 0) { 218 if (ret < 0) {
@@ -217,32 +226,9 @@ static int wl1271_init_general_parms(struct wl1271 *wl)
217 226
218static int wl1271_init_radio_parms(struct wl1271 *wl) 227static int wl1271_init_radio_parms(struct wl1271 *wl)
219{ 228{
220 /*
221 * FIXME: All these magic numbers should be moved to some place where
222 * they can be configured (separate file?)
223 */
224
225 struct wl1271_radio_parms *radio_parms; 229 struct wl1271_radio_parms *radio_parms;
226 int ret; 230 struct conf_radio_parms *r = &wl->conf.init.radioparam;
227 u8 compensation[] = { 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 0xfc, 0x00, 231 int i, ret;
228 0x08, 0x10, 0xf0, 0xf8, 0x00, 0x0a, 0x14 };
229
230 u8 tx_rate_limits_normal[] = { 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 };
231 u8 tx_rate_limits_degraded[] = { 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 };
232
233 u8 tx_channel_limits_11b[] = { 0x22, 0x50, 0x50, 0x50,
234 0x50, 0x50, 0x50, 0x50,
235 0x50, 0x50, 0x22, 0x50,
236 0x22, 0x50 };
237
238 u8 tx_channel_limits_ofdm[] = { 0x20, 0x50, 0x50, 0x50,
239 0x50, 0x50, 0x50, 0x50,
240 0x50, 0x50, 0x20, 0x50,
241 0x20, 0x50 };
242
243 u8 tx_pdv_rate_offsets[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
244
245 u8 tx_ibias[] = { 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 };
246 232
247 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); 233 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
248 if (!radio_parms) 234 if (!radio_parms)
@@ -251,33 +237,59 @@ static int wl1271_init_radio_parms(struct wl1271 *wl)
251 radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM; 237 radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM;
252 238
253 /* Static radio parameters */ 239 /* Static radio parameters */
254 radio_parms->rx_trace_loss = 10; 240 radio_parms->rx_trace_loss = r->rx_trace_loss;
255 radio_parms->tx_trace_loss = 10; 241 radio_parms->tx_trace_loss = r->tx_trace_loss;
256 memcpy(radio_parms->rx_rssi_and_proc_compens, compensation, 242 memcpy(radio_parms->rx_rssi_and_proc_compens,
257 sizeof(compensation)); 243 r->rx_rssi_and_proc_compens,
258 244 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
259 /* We don't set the 5GHz -- N/A */ 245
246 memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
247 CONF_NUMBER_OF_SUB_BANDS_5);
248 memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
249 CONF_NUMBER_OF_SUB_BANDS_5);
250 memcpy(radio_parms->rx_rssi_and_proc_compens_5,
251 r->rx_rssi_and_proc_compens_5,
252 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
260 253
261 /* Dynamic radio parameters */ 254 /* Dynamic radio parameters */
262 radio_parms->tx_ref_pd_voltage = cpu_to_le16(0x24e); 255 radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
263 radio_parms->tx_ref_power = 0x78; 256 radio_parms->tx_ref_power = r->tx_ref_power;
264 radio_parms->tx_offset_db = 0x0; 257 radio_parms->tx_offset_db = r->tx_offset_db;
265 258
266 memcpy(radio_parms->tx_rate_limits_normal, tx_rate_limits_normal, 259 memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
267 sizeof(tx_rate_limits_normal)); 260 CONF_NUMBER_OF_RATE_GROUPS);
268 memcpy(radio_parms->tx_rate_limits_degraded, tx_rate_limits_degraded, 261 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
269 sizeof(tx_rate_limits_degraded)); 262 CONF_NUMBER_OF_RATE_GROUPS);
270 263
271 memcpy(radio_parms->tx_channel_limits_11b, tx_channel_limits_11b, 264 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
272 sizeof(tx_channel_limits_11b)); 265 CONF_NUMBER_OF_CHANNELS_2_4);
273 memcpy(radio_parms->tx_channel_limits_ofdm, tx_channel_limits_ofdm, 266 memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
274 sizeof(tx_channel_limits_ofdm)); 267 CONF_NUMBER_OF_CHANNELS_2_4);
275 memcpy(radio_parms->tx_pdv_rate_offsets, tx_pdv_rate_offsets, 268 memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
276 sizeof(tx_pdv_rate_offsets)); 269 CONF_NUMBER_OF_RATE_GROUPS);
277 memcpy(radio_parms->tx_ibias, tx_ibias, 270 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
278 sizeof(tx_ibias)); 271
279 272 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
280 radio_parms->rx_fem_insertion_loss = 0x14; 273
274 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
275 radio_parms->tx_ref_pd_voltage_5[i] =
276 cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
277 memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
278 CONF_NUMBER_OF_SUB_BANDS_5);
279 memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
280 CONF_NUMBER_OF_SUB_BANDS_5);
281 memcpy(radio_parms->tx_rate_limits_normal_5,
282 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
283 memcpy(radio_parms->tx_rate_limits_degraded_5,
284 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
285 memcpy(radio_parms->tx_channel_limits_ofdm_5,
286 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
287 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
288 CONF_NUMBER_OF_RATE_GROUPS);
289 memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
290 CONF_NUMBER_OF_RATE_GROUPS);
291 memcpy(radio_parms->rx_fem_insertion_loss_5,
292 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
281 293
282 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); 294 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
283 if (ret < 0) 295 if (ret < 0)
@@ -291,12 +303,15 @@ int wl1271_hw_init(struct wl1271 *wl)
291{ 303{
292 int ret; 304 int ret;
293 305
306 /* FIXME: the following parameter setting functions return error
307 * codes - the reason is so far unknown. The -EIO is therefore
308 * ignored for the time being. */
294 ret = wl1271_init_general_parms(wl); 309 ret = wl1271_init_general_parms(wl);
295 if (ret < 0) 310 if (ret < 0 && ret != -EIO)
296 return ret; 311 return ret;
297 312
298 ret = wl1271_init_radio_parms(wl); 313 ret = wl1271_init_radio_parms(wl);
299 if (ret < 0) 314 if (ret < 0 && ret != -EIO)
300 return ret; 315 return ret;
301 316
302 /* Template settings */ 317 /* Template settings */
@@ -311,8 +326,8 @@ int wl1271_hw_init(struct wl1271 *wl)
311 326
312 /* RX config */ 327 /* RX config */
313 ret = wl1271_init_rx_config(wl, 328 ret = wl1271_init_rx_config(wl,
314 RX_CFG_PROMISCUOUS | RX_CFG_TSF, 329 RX_CFG_PROMISCUOUS | RX_CFG_TSF,
315 RX_FILTER_OPTION_DEF); 330 RX_FILTER_OPTION_DEF);
316 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, 331 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
317 RX_FILTER_OPTION_FILTER_ALL); */ 332 RX_FILTER_OPTION_FILTER_ALL); */
318 if (ret < 0) 333 if (ret < 0)
@@ -323,6 +338,11 @@ int wl1271_hw_init(struct wl1271 *wl)
323 if (ret < 0) 338 if (ret < 0)
324 goto out_free_memmap; 339 goto out_free_memmap;
325 340
341 /* Initialize connection monitoring thresholds */
342 ret = wl1271_acx_conn_monit_params(wl);
343 if (ret < 0)
344 goto out_free_memmap;
345
326 /* Beacon filtering */ 346 /* Beacon filtering */
327 ret = wl1271_init_beacon_filter(wl); 347 ret = wl1271_init_beacon_filter(wl);
328 if (ret < 0) 348 if (ret < 0)
@@ -369,7 +389,7 @@ int wl1271_hw_init(struct wl1271 *wl)
369 goto out_free_memmap; 389 goto out_free_memmap;
370 390
371 /* Configure TX rate classes */ 391 /* Configure TX rate classes */
372 ret = wl1271_acx_rate_policies(wl); 392 ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
373 if (ret < 0) 393 if (ret < 0)
374 goto out_free_memmap; 394 goto out_free_memmap;
375 395
@@ -388,10 +408,16 @@ int wl1271_hw_init(struct wl1271 *wl)
388 if (ret < 0) 408 if (ret < 0)
389 goto out_free_memmap; 409 goto out_free_memmap;
390 410
411 /* Configure smart reflex */
412 ret = wl1271_acx_smart_reflex(wl);
413 if (ret < 0)
414 goto out_free_memmap;
415
391 return 0; 416 return 0;
392 417
393 out_free_memmap: 418 out_free_memmap:
394 kfree(wl->target_mem_map); 419 kfree(wl->target_mem_map);
420 wl->target_mem_map = NULL;
395 421
396 return ret; 422 return ret;
397} 423}
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index bd8ff0fa2272..6e21ceee76a6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -48,19 +48,6 @@ struct wl1271_general_parms {
48 u8 settings; 48 u8 settings;
49} __attribute__ ((packed)); 49} __attribute__ ((packed));
50 50
51enum ref_clk_enum {
52 REF_CLK_19_2_E,
53 REF_CLK_26_E,
54 REF_CLK_38_4_E,
55 REF_CLK_52_E
56};
57
58#define RSSI_AND_PROCESS_COMPENSATION_SIZE 15
59#define NUMBER_OF_SUB_BANDS_5 7
60#define NUMBER_OF_RATE_GROUPS 6
61#define NUMBER_OF_CHANNELS_2_4 14
62#define NUMBER_OF_CHANNELS_5 35
63
64struct wl1271_radio_parms { 51struct wl1271_radio_parms {
65 u8 id; 52 u8 id;
66 u8 padding[3]; 53 u8 padding[3];
@@ -69,45 +56,45 @@ struct wl1271_radio_parms {
69 /* 2.4GHz */ 56 /* 2.4GHz */
70 u8 rx_trace_loss; 57 u8 rx_trace_loss;
71 u8 tx_trace_loss; 58 u8 tx_trace_loss;
72 s8 rx_rssi_and_proc_compens[RSSI_AND_PROCESS_COMPENSATION_SIZE]; 59 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
73 60
74 /* 5GHz */ 61 /* 5GHz */
75 u8 rx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; 62 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
76 u8 tx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; 63 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
77 s8 rx_rssi_and_proc_compens_5[RSSI_AND_PROCESS_COMPENSATION_SIZE]; 64 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
78 65
79 /* Dynamic radio parameters */ 66 /* Dynamic radio parameters */
80 /* 2.4GHz */ 67 /* 2.4GHz */
81 s16 tx_ref_pd_voltage; 68 __le16 tx_ref_pd_voltage;
82 s8 tx_ref_power; 69 s8 tx_ref_power;
83 s8 tx_offset_db; 70 s8 tx_offset_db;
84 71
85 s8 tx_rate_limits_normal[NUMBER_OF_RATE_GROUPS]; 72 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
86 s8 tx_rate_limits_degraded[NUMBER_OF_RATE_GROUPS]; 73 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
87 74
88 s8 tx_channel_limits_11b[NUMBER_OF_CHANNELS_2_4]; 75 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
89 s8 tx_channel_limits_ofdm[NUMBER_OF_CHANNELS_2_4]; 76 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
90 s8 tx_pdv_rate_offsets[NUMBER_OF_RATE_GROUPS]; 77 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
91 78
92 u8 tx_ibias[NUMBER_OF_RATE_GROUPS]; 79 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
93 u8 rx_fem_insertion_loss; 80 u8 rx_fem_insertion_loss;
94 81
95 u8 padding2; 82 u8 padding2;
96 83
97 /* 5GHz */ 84 /* 5GHz */
98 s16 tx_ref_pd_voltage_5[NUMBER_OF_SUB_BANDS_5]; 85 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
99 s8 tx_ref_power_5[NUMBER_OF_SUB_BANDS_5]; 86 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
100 s8 tx_offset_db_5[NUMBER_OF_SUB_BANDS_5]; 87 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
101 88
102 s8 tx_rate_limits_normal_5[NUMBER_OF_RATE_GROUPS]; 89 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
103 s8 tx_rate_limits_degraded_5[NUMBER_OF_RATE_GROUPS]; 90 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
104 91
105 s8 tx_channel_limits_ofdm_5[NUMBER_OF_CHANNELS_5]; 92 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
106 s8 tx_pdv_rate_offsets_5[NUMBER_OF_RATE_GROUPS]; 93 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
107 94
108 /* FIXME: this is inconsistent with the types for 2.4GHz */ 95 /* FIXME: this is inconsistent with the types for 2.4GHz */
109 s8 tx_ibias_5[NUMBER_OF_RATE_GROUPS]; 96 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
110 s8 rx_fem_insertion_loss_5[NUMBER_OF_SUB_BANDS_5]; 97 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
111 98
112 u8 padding3[2]; 99 u8 padding3[2];
113} __attribute__ ((packed)); 100} __attribute__ ((packed));
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 27298b19d5bd..00ddcc2d37c1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -30,7 +30,9 @@
30#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
31#include <linux/crc32.h> 31#include <linux/crc32.h>
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/vmalloc.h>
33#include <linux/spi/wl12xx.h> 34#include <linux/spi/wl12xx.h>
35#include <linux/inetdevice.h>
34 36
35#include "wl1271.h" 37#include "wl1271.h"
36#include "wl12xx_80211.h" 38#include "wl12xx_80211.h"
@@ -45,6 +47,309 @@
45#include "wl1271_cmd.h" 47#include "wl1271_cmd.h"
46#include "wl1271_boot.h" 48#include "wl1271_boot.h"
47 49
50static struct conf_drv_settings default_conf = {
51 .sg = {
52 .per_threshold = 7500,
53 .max_scan_compensation_time = 120000,
54 .nfs_sample_interval = 400,
55 .load_ratio = 50,
56 .auto_ps_mode = 0,
57 .probe_req_compensation = 170,
58 .scan_window_compensation = 50,
59 .antenna_config = 0,
60 .beacon_miss_threshold = 60,
61 .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS,
62 .rate_adaptation_snr = 0
63 },
64 .rx = {
65 .rx_msdu_life_time = 512000,
66 .packet_detection_threshold = 0,
67 .ps_poll_timeout = 15,
68 .upsd_timeout = 15,
69 .rts_threshold = 2347,
70 .rx_cca_threshold = 0xFFEF,
71 .irq_blk_threshold = 0,
72 .irq_pkt_threshold = USHORT_MAX,
73 .irq_timeout = 5,
74 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
75 },
76 .tx = {
77 .tx_energy_detection = 0,
78 .rc_conf = {
79 .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
80 .short_retry_limit = 10,
81 .long_retry_limit = 10,
82 .aflags = 0
83 },
84 .ac_conf_count = 4,
85 .ac_conf = {
86 [0] = {
87 .ac = CONF_TX_AC_BE,
88 .cw_min = 15,
89 .cw_max = 63,
90 .aifsn = 3,
91 .tx_op_limit = 0,
92 },
93 [1] = {
94 .ac = CONF_TX_AC_BK,
95 .cw_min = 15,
96 .cw_max = 63,
97 .aifsn = 7,
98 .tx_op_limit = 0,
99 },
100 [2] = {
101 .ac = CONF_TX_AC_VI,
102 .cw_min = 15,
103 .cw_max = 63,
104 .aifsn = CONF_TX_AIFS_PIFS,
105 .tx_op_limit = 3008,
106 },
107 [3] = {
108 .ac = CONF_TX_AC_VO,
109 .cw_min = 15,
110 .cw_max = 63,
111 .aifsn = CONF_TX_AIFS_PIFS,
112 .tx_op_limit = 1504,
113 },
114 },
115 .tid_conf_count = 7,
116 .tid_conf = {
117 [0] = {
118 .queue_id = 0,
119 .channel_type = CONF_CHANNEL_TYPE_DCF,
120 .tsid = CONF_TX_AC_BE,
121 .ps_scheme = CONF_PS_SCHEME_LEGACY,
122 .ack_policy = CONF_ACK_POLICY_LEGACY,
123 .apsd_conf = {0, 0},
124 },
125 [1] = {
126 .queue_id = 1,
127 .channel_type = CONF_CHANNEL_TYPE_DCF,
128 .tsid = CONF_TX_AC_BE,
129 .ps_scheme = CONF_PS_SCHEME_LEGACY,
130 .ack_policy = CONF_ACK_POLICY_LEGACY,
131 .apsd_conf = {0, 0},
132 },
133 [2] = {
134 .queue_id = 2,
135 .channel_type = CONF_CHANNEL_TYPE_DCF,
136 .tsid = CONF_TX_AC_BE,
137 .ps_scheme = CONF_PS_SCHEME_LEGACY,
138 .ack_policy = CONF_ACK_POLICY_LEGACY,
139 .apsd_conf = {0, 0},
140 },
141 [3] = {
142 .queue_id = 3,
143 .channel_type = CONF_CHANNEL_TYPE_DCF,
144 .tsid = CONF_TX_AC_BE,
145 .ps_scheme = CONF_PS_SCHEME_LEGACY,
146 .ack_policy = CONF_ACK_POLICY_LEGACY,
147 .apsd_conf = {0, 0},
148 },
149 [4] = {
150 .queue_id = 4,
151 .channel_type = CONF_CHANNEL_TYPE_DCF,
152 .tsid = CONF_TX_AC_BE,
153 .ps_scheme = CONF_PS_SCHEME_LEGACY,
154 .ack_policy = CONF_ACK_POLICY_LEGACY,
155 .apsd_conf = {0, 0},
156 },
157 [5] = {
158 .queue_id = 5,
159 .channel_type = CONF_CHANNEL_TYPE_DCF,
160 .tsid = CONF_TX_AC_BE,
161 .ps_scheme = CONF_PS_SCHEME_LEGACY,
162 .ack_policy = CONF_ACK_POLICY_LEGACY,
163 .apsd_conf = {0, 0},
164 },
165 [6] = {
166 .queue_id = 6,
167 .channel_type = CONF_CHANNEL_TYPE_DCF,
168 .tsid = CONF_TX_AC_BE,
169 .ps_scheme = CONF_PS_SCHEME_LEGACY,
170 .ack_policy = CONF_ACK_POLICY_LEGACY,
171 .apsd_conf = {0, 0},
172 }
173 },
174 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
175 .tx_compl_timeout = 5,
176 .tx_compl_threshold = 5
177 },
178 .conn = {
179 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
180 .listen_interval = 0,
181 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
182 .bcn_filt_ie_count = 1,
183 .bcn_filt_ie = {
184 [0] = {
185 .ie = WLAN_EID_CHANNEL_SWITCH,
186 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
187 }
188 },
189 .synch_fail_thold = 5,
190 .bss_lose_timeout = 100,
191 .beacon_rx_timeout = 10000,
192 .broadcast_timeout = 20000,
193 .rx_broadcast_in_ps = 1,
194 .ps_poll_threshold = 4,
195 .sig_trigger_count = 2,
196 .sig_trigger = {
197 [0] = {
198 .threshold = -75,
199 .pacing = 500,
200 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
201 .type = CONF_TRIG_EVENT_TYPE_EDGE,
202 .direction = CONF_TRIG_EVENT_DIR_LOW,
203 .hysteresis = 2,
204 .index = 0,
205 .enable = 1
206 },
207 [1] = {
208 .threshold = -75,
209 .pacing = 500,
210 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
211 .type = CONF_TRIG_EVENT_TYPE_EDGE,
212 .direction = CONF_TRIG_EVENT_DIR_HIGH,
213 .hysteresis = 2,
214 .index = 1,
215 .enable = 1
216 }
217 },
218 .sig_weights = {
219 .rssi_bcn_avg_weight = 10,
220 .rssi_pkt_avg_weight = 10,
221 .snr_bcn_avg_weight = 10,
222 .snr_pkt_avg_weight = 10
223 },
224 .bet_enable = CONF_BET_MODE_ENABLE,
225 .bet_max_consecutive = 100,
226 .psm_entry_retries = 3
227 },
228 .init = {
229 .sr_err_tbl = {
230 [0] = {
231 .len = 7,
232 .upper_limit = 0x03,
233 .values = {
234 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
235 0x00 }
236 },
237 [1] = {
238 .len = 7,
239 .upper_limit = 0x03,
240 .values = {
241 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
242 0x00 }
243 },
244 [2] = {
245 .len = 7,
246 .upper_limit = 0x03,
247 .values = {
248 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
249 0x00 }
250 }
251 },
252 .sr_enable = 1,
253 .genparam = {
254 /*
255 * FIXME: The correct value CONF_REF_CLK_38_4_E
256 * causes the firmware to crash on boot.
257 * The value 5 apparently is an
258 * unnoficial XTAL configuration of the
259 * same frequency, which appears to work.
260 */
261 .ref_clk = 5,
262 .settling_time = 5,
263 .clk_valid_on_wakeup = 0,
264 .dc2dcmode = 0,
265 .single_dual_band = CONF_SINGLE_BAND,
266 .tx_bip_fem_autodetect = 0,
267 .tx_bip_fem_manufacturer = 1,
268 .settings = 1,
269 },
270 .radioparam = {
271 .rx_trace_loss = 10,
272 .tx_trace_loss = 10,
273 .rx_rssi_and_proc_compens = {
274 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
275 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
276 0x00, 0x0a, 0x14 },
277 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
278 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
279 .rx_rssi_and_proc_compens_5 = {
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00 },
283 .tx_ref_pd_voltage = 0x24e,
284 .tx_ref_power = 0x78,
285 .tx_offset_db = 0x0,
286 .tx_rate_limits_normal = {
287 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
288 .tx_rate_limits_degraded = {
289 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
290 .tx_channel_limits_11b = {
291 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
292 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
293 0x22, 0x50 },
294 .tx_channel_limits_ofdm = {
295 0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
296 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
297 0x20, 0x50 },
298 .tx_pdv_rate_offsets = {
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
300 .tx_ibias = {
301 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
302 .rx_fem_insertion_loss = 0x14,
303 .tx_ref_pd_voltage_5 = {
304 0x0190, 0x01a4, 0x01c3, 0x01d8,
305 0x020a, 0x021c },
306 .tx_ref_power_5 = {
307 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
308 .tx_offset_db_5 = {
309 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
310 .tx_rate_limits_normal_5 = {
311 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
312 .tx_rate_limits_degraded_5 = {
313 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
314 .tx_channel_limits_ofdm_5 = {
315 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
316 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
317 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
318 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
319 0x50, 0x50, 0x50 },
320 .tx_pdv_rate_offsets_5 = {
321 0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
322 .tx_ibias_5 = {
323 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
324 .rx_fem_insertion_loss_5 = {
325 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
326 }
327 }
328};
329
330static LIST_HEAD(wl_list);
331
332static void wl1271_conf_init(struct wl1271 *wl)
333{
334
335 /*
336 * This function applies the default configuration to the driver. This
337 * function is invoked upon driver load (spi probe.)
338 *
339 * The configuration is stored in a run-time structure in order to
340 * facilitate for run-time adjustment of any of the parameters. Making
341 * changes to the configuration structure will apply the new values on
342 * the next interface up (wl1271_op_start.)
343 */
344
345 /* apply driver default configuration */
346 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
347
348 if (wl1271_11a_enabled())
349 wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
350}
351
352
48static int wl1271_plt_init(struct wl1271 *wl) 353static int wl1271_plt_init(struct wl1271 *wl)
49{ 354{
50 int ret; 355 int ret;
@@ -75,20 +380,14 @@ static void wl1271_power_on(struct wl1271 *wl)
75 wl->set_power(true); 380 wl->set_power(true);
76} 381}
77 382
78static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status) 383static void wl1271_fw_status(struct wl1271 *wl,
384 struct wl1271_fw_status *status)
79{ 385{
80 u32 total = 0; 386 u32 total = 0;
81 int i; 387 int i;
82 388
83 /* 389 wl1271_spi_read(wl, FW_STATUS_ADDR, status,
84 * FIXME: Reading the FW status directly from the registers seems to 390 sizeof(*status), false);
85 * be the right thing to do, but it doesn't work. And in the
86 * reference driver, there is a workaround called
87 * USE_SDIO_24M_WORKAROUND, which reads the status from memory
88 * instead, so we do the same here.
89 */
90
91 wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status));
92 391
93 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 392 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
94 "drv_rx_counter = %d, tx_results_counter = %d)", 393 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -99,25 +398,28 @@ static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
99 398
100 /* update number of available TX blocks */ 399 /* update number of available TX blocks */
101 for (i = 0; i < NUM_TX_QUEUES; i++) { 400 for (i = 0; i < NUM_TX_QUEUES; i++) {
102 u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i]; 401 u32 cnt = le32_to_cpu(status->tx_released_blks[i]) -
103 wl->tx_blocks_freed[i] = status->tx_released_blks[i]; 402 wl->tx_blocks_freed[i];
403
404 wl->tx_blocks_freed[i] =
405 le32_to_cpu(status->tx_released_blks[i]);
104 wl->tx_blocks_available += cnt; 406 wl->tx_blocks_available += cnt;
105 total += cnt; 407 total += cnt;
106 } 408 }
107 409
108 /* if more blocks are available now, schedule some tx work */ 410 /* if more blocks are available now, schedule some tx work */
109 if (total && !skb_queue_empty(&wl->tx_queue)) 411 if (total && !skb_queue_empty(&wl->tx_queue))
110 schedule_work(&wl->tx_work); 412 ieee80211_queue_work(wl->hw, &wl->tx_work);
111 413
112 /* update the host-chipset time offset */ 414 /* update the host-chipset time offset */
113 wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime; 415 wl->time_offset = jiffies_to_usecs(jiffies) -
416 le32_to_cpu(status->fw_localtime);
114} 417}
115 418
116#define WL1271_IRQ_MAX_LOOPS 10
117static void wl1271_irq_work(struct work_struct *work) 419static void wl1271_irq_work(struct work_struct *work)
118{ 420{
119 u32 intr, ctr = WL1271_IRQ_MAX_LOOPS;
120 int ret; 421 int ret;
422 u32 intr;
121 struct wl1271 *wl = 423 struct wl1271 *wl =
122 container_of(work, struct wl1271, irq_work); 424 container_of(work, struct wl1271, irq_work);
123 425
@@ -132,9 +434,10 @@ static void wl1271_irq_work(struct work_struct *work)
132 if (ret < 0) 434 if (ret < 0)
133 goto out; 435 goto out;
134 436
135 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 437 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
136 438
137 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 439 wl1271_fw_status(wl, wl->fw_status);
440 intr = le32_to_cpu(wl->fw_status->intr);
138 if (!intr) { 441 if (!intr) {
139 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 442 wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
140 goto out_sleep; 443 goto out_sleep;
@@ -142,46 +445,39 @@ static void wl1271_irq_work(struct work_struct *work)
142 445
143 intr &= WL1271_INTR_MASK; 446 intr &= WL1271_INTR_MASK;
144 447
145 do { 448 if (intr & WL1271_ACX_INTR_EVENT_A) {
146 wl1271_fw_status(wl, wl->fw_status); 449 bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
147 450 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
148 451 wl1271_event_handle(wl, 0, do_ack);
149 if (intr & (WL1271_ACX_INTR_EVENT_A | 452 }
150 WL1271_ACX_INTR_EVENT_B)) {
151 wl1271_debug(DEBUG_IRQ,
152 "WL1271_ACX_INTR_EVENT (0x%x)", intr);
153 if (intr & WL1271_ACX_INTR_EVENT_A)
154 wl1271_event_handle(wl, 0);
155 else
156 wl1271_event_handle(wl, 1);
157 }
158 453
159 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 454 if (intr & WL1271_ACX_INTR_EVENT_B) {
160 wl1271_debug(DEBUG_IRQ, 455 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
161 "WL1271_ACX_INTR_INIT_COMPLETE"); 456 wl1271_event_handle(wl, 1, true);
457 }
162 458
163 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 459 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
164 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 460 wl1271_debug(DEBUG_IRQ,
461 "WL1271_ACX_INTR_INIT_COMPLETE");
165 462
166 if (intr & WL1271_ACX_INTR_DATA) { 463 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
167 u8 tx_res_cnt = wl->fw_status->tx_results_counter - 464 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
168 wl->tx_results_count;
169 465
170 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 466 if (intr & WL1271_ACX_INTR_DATA) {
467 u8 tx_res_cnt = wl->fw_status->tx_results_counter -
468 wl->tx_results_count;
171 469
172 /* check for tx results */ 470 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
173 if (tx_res_cnt)
174 wl1271_tx_complete(wl, tx_res_cnt);
175 471
176 wl1271_rx(wl, wl->fw_status); 472 /* check for tx results */
177 } 473 if (tx_res_cnt)
474 wl1271_tx_complete(wl, tx_res_cnt);
178 475
179 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 476 wl1271_rx(wl, wl->fw_status);
180 intr &= WL1271_INTR_MASK; 477 }
181 } while (intr && --ctr);
182 478
183out_sleep: 479out_sleep:
184 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 480 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
185 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 481 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
186 wl1271_ps_elp_sleep(wl); 482 wl1271_ps_elp_sleep(wl);
187 483
@@ -205,7 +501,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
205 wl->elp_compl = NULL; 501 wl->elp_compl = NULL;
206 } 502 }
207 503
208 schedule_work(&wl->irq_work); 504 ieee80211_queue_work(wl->hw, &wl->irq_work);
209 spin_unlock_irqrestore(&wl->wl_lock, flags); 505 spin_unlock_irqrestore(&wl->wl_lock, flags);
210 506
211 return IRQ_HANDLED; 507 return IRQ_HANDLED;
@@ -231,7 +527,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
231 } 527 }
232 528
233 wl->fw_len = fw->size; 529 wl->fw_len = fw->size;
234 wl->fw = kmalloc(wl->fw_len, GFP_KERNEL); 530 wl->fw = vmalloc(wl->fw_len);
235 531
236 if (!wl->fw) { 532 if (!wl->fw) {
237 wl1271_error("could not allocate memory for the firmware"); 533 wl1271_error("could not allocate memory for the firmware");
@@ -292,7 +588,7 @@ static void wl1271_fw_wakeup(struct wl1271 *wl)
292 u32 elp_reg; 588 u32 elp_reg;
293 589
294 elp_reg = ELPCTRL_WAKE_UP; 590 elp_reg = ELPCTRL_WAKE_UP;
295 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); 591 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
296} 592}
297 593
298static int wl1271_setup(struct wl1271 *wl) 594static int wl1271_setup(struct wl1271 *wl)
@@ -314,6 +610,7 @@ static int wl1271_setup(struct wl1271 *wl)
314 610
315static int wl1271_chip_wakeup(struct wl1271 *wl) 611static int wl1271_chip_wakeup(struct wl1271 *wl)
316{ 612{
613 struct wl1271_partition_set partition;
317 int ret = 0; 614 int ret = 0;
318 615
319 wl1271_power_on(wl); 616 wl1271_power_on(wl);
@@ -323,11 +620,10 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
323 620
324 /* We don't need a real memory partition here, because we only want 621 /* We don't need a real memory partition here, because we only want
325 * to use the registers at this point. */ 622 * to use the registers at this point. */
326 wl1271_set_partition(wl, 623 memset(&partition, 0, sizeof(partition));
327 0x00000000, 624 partition.reg.start = REGISTERS_BASE;
328 0x00000000, 625 partition.reg.size = REGISTERS_DOWN_SIZE;
329 REGISTERS_BASE, 626 wl1271_set_partition(wl, &partition);
330 REGISTERS_DOWN_SIZE);
331 627
332 /* ELP module wake up */ 628 /* ELP module wake up */
333 wl1271_fw_wakeup(wl); 629 wl1271_fw_wakeup(wl);
@@ -335,7 +631,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
335 /* whal_FwCtrl_BootSm() */ 631 /* whal_FwCtrl_BootSm() */
336 632
337 /* 0. read chip id from CHIP_ID */ 633 /* 0. read chip id from CHIP_ID */
338 wl->chip.id = wl1271_reg_read32(wl, CHIP_ID_B); 634 wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
339 635
340 /* 1. check if chip id is valid */ 636 /* 1. check if chip id is valid */
341 637
@@ -346,7 +642,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
346 642
347 ret = wl1271_setup(wl); 643 ret = wl1271_setup(wl);
348 if (ret < 0) 644 if (ret < 0)
349 goto out; 645 goto out_power_off;
350 break; 646 break;
351 case CHIP_ID_1271_PG20: 647 case CHIP_ID_1271_PG20:
352 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 648 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -354,56 +650,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
354 650
355 ret = wl1271_setup(wl); 651 ret = wl1271_setup(wl);
356 if (ret < 0) 652 if (ret < 0)
357 goto out; 653 goto out_power_off;
358 break; 654 break;
359 default: 655 default:
360 wl1271_error("unsupported chip id: 0x%x", wl->chip.id); 656 wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
361 ret = -ENODEV; 657 ret = -ENODEV;
362 goto out; 658 goto out_power_off;
363 } 659 }
364 660
365 if (wl->fw == NULL) { 661 if (wl->fw == NULL) {
366 ret = wl1271_fetch_firmware(wl); 662 ret = wl1271_fetch_firmware(wl);
367 if (ret < 0) 663 if (ret < 0)
368 goto out; 664 goto out_power_off;
369 } 665 }
370 666
371 /* No NVS from netlink, try to get it from the filesystem */ 667 /* No NVS from netlink, try to get it from the filesystem */
372 if (wl->nvs == NULL) { 668 if (wl->nvs == NULL) {
373 ret = wl1271_fetch_nvs(wl); 669 ret = wl1271_fetch_nvs(wl);
374 if (ret < 0) 670 if (ret < 0)
375 goto out; 671 goto out_power_off;
376 } 672 }
377 673
378out: 674 goto out;
379 return ret;
380}
381 675
382static void wl1271_filter_work(struct work_struct *work) 676out_power_off:
383{ 677 wl1271_power_off(wl);
384 struct wl1271 *wl =
385 container_of(work, struct wl1271, filter_work);
386 int ret;
387
388 mutex_lock(&wl->mutex);
389
390 if (wl->state == WL1271_STATE_OFF)
391 goto out;
392
393 ret = wl1271_ps_elp_wakeup(wl, false);
394 if (ret < 0)
395 goto out;
396
397 /* FIXME: replace the magic numbers with proper definitions */
398 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
399 if (ret < 0)
400 goto out_sleep;
401
402out_sleep:
403 wl1271_ps_elp_sleep(wl);
404 678
405out: 679out:
406 mutex_unlock(&wl->mutex); 680 return ret;
407} 681}
408 682
409int wl1271_plt_start(struct wl1271 *wl) 683int wl1271_plt_start(struct wl1271 *wl)
@@ -429,13 +703,26 @@ int wl1271_plt_start(struct wl1271 *wl)
429 703
430 ret = wl1271_boot(wl); 704 ret = wl1271_boot(wl);
431 if (ret < 0) 705 if (ret < 0)
432 goto out; 706 goto out_power_off;
433 707
434 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver); 708 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
435 709
436 ret = wl1271_plt_init(wl); 710 ret = wl1271_plt_init(wl);
437 if (ret < 0) 711 if (ret < 0)
438 goto out; 712 goto out_irq_disable;
713
714 /* Make sure power saving is disabled */
715 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
716 if (ret < 0)
717 goto out_irq_disable;
718
719 goto out;
720
721out_irq_disable:
722 wl1271_disable_interrupts(wl);
723
724out_power_off:
725 wl1271_power_off(wl);
439 726
440out: 727out:
441 mutex_unlock(&wl->mutex); 728 mutex_unlock(&wl->mutex);
@@ -462,6 +749,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
462 wl1271_power_off(wl); 749 wl1271_power_off(wl);
463 750
464 wl->state = WL1271_STATE_OFF; 751 wl->state = WL1271_STATE_OFF;
752 wl->rx_counter = 0;
465 753
466out: 754out:
467 mutex_unlock(&wl->mutex); 755 mutex_unlock(&wl->mutex);
@@ -481,7 +769,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
481 * before that, the tx_work will not be initialized! 769 * before that, the tx_work will not be initialized!
482 */ 770 */
483 771
484 schedule_work(&wl->tx_work); 772 ieee80211_queue_work(wl->hw, &wl->tx_work);
485 773
486 /* 774 /*
487 * The workqueue is slow to process the tx_queue and we need stop 775 * The workqueue is slow to process the tx_queue and we need stop
@@ -501,6 +789,93 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
501 return NETDEV_TX_OK; 789 return NETDEV_TX_OK;
502} 790}
503 791
792static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
793 void *arg)
794{
795 struct net_device *dev;
796 struct wireless_dev *wdev;
797 struct wiphy *wiphy;
798 struct ieee80211_hw *hw;
799 struct wl1271 *wl;
800 struct wl1271 *wl_temp;
801 struct in_device *idev;
802 struct in_ifaddr *ifa = arg;
803 int ret = 0;
804
805 /* FIXME: this ugly function should probably be implemented in the
806 * mac80211, and here should only be a simple callback handling actual
807 * setting of the filters. Now we need to dig up references to
808 * various structures to gain access to what we need.
809 * Also, because of this, there is no "initial" setting of the filter
810 * in "op_start", because we don't want to dig up struct net_device
811 * there - the filter will be set upon first change of the interface
812 * IP address. */
813
814 dev = ifa->ifa_dev->dev;
815
816 wdev = dev->ieee80211_ptr;
817 if (wdev == NULL)
818 return -ENODEV;
819
820 wiphy = wdev->wiphy;
821 if (wiphy == NULL)
822 return -ENODEV;
823
824 hw = wiphy_priv(wiphy);
825 if (hw == NULL)
826 return -ENODEV;
827
828 /* Check that the interface is one supported by this driver. */
829 wl_temp = hw->priv;
830 list_for_each_entry(wl, &wl_list, list) {
831 if (wl == wl_temp)
832 break;
833 }
834 if (wl == NULL)
835 return -ENODEV;
836
837 /* Get the interface IP address for the device. "ifa" will become
838 NULL if:
839 - there is no IPV4 protocol address configured
840 - there are multiple (virtual) IPV4 addresses configured
841 When "ifa" is NULL, filtering will be disabled.
842 */
843 ifa = NULL;
844 idev = dev->ip_ptr;
845 if (idev)
846 ifa = idev->ifa_list;
847
848 if (ifa && ifa->ifa_next)
849 ifa = NULL;
850
851 mutex_lock(&wl->mutex);
852
853 if (wl->state == WL1271_STATE_OFF)
854 goto out;
855
856 ret = wl1271_ps_elp_wakeup(wl, false);
857 if (ret < 0)
858 goto out;
859 if (ifa)
860 ret = wl1271_acx_arp_ip_filter(wl, true,
861 (u8 *)&ifa->ifa_address,
862 ACX_IPV4_VERSION);
863 else
864 ret = wl1271_acx_arp_ip_filter(wl, false, NULL,
865 ACX_IPV4_VERSION);
866 wl1271_ps_elp_sleep(wl);
867
868out:
869 mutex_unlock(&wl->mutex);
870
871 return ret;
872}
873
874static struct notifier_block wl1271_dev_notifier = {
875 .notifier_call = wl1271_dev_notify,
876};
877
878
504static int wl1271_op_start(struct ieee80211_hw *hw) 879static int wl1271_op_start(struct ieee80211_hw *hw)
505{ 880{
506 struct wl1271 *wl = hw->priv; 881 struct wl1271 *wl = hw->priv;
@@ -523,22 +898,32 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
523 898
524 ret = wl1271_boot(wl); 899 ret = wl1271_boot(wl);
525 if (ret < 0) 900 if (ret < 0)
526 goto out; 901 goto out_power_off;
527 902
528 ret = wl1271_hw_init(wl); 903 ret = wl1271_hw_init(wl);
529 if (ret < 0) 904 if (ret < 0)
530 goto out; 905 goto out_irq_disable;
531 906
532 wl->state = WL1271_STATE_ON; 907 wl->state = WL1271_STATE_ON;
533 908
534 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 909 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
535 910
536out: 911 goto out;
537 if (ret < 0)
538 wl1271_power_off(wl);
539 912
913out_irq_disable:
914 wl1271_disable_interrupts(wl);
915
916out_power_off:
917 wl1271_power_off(wl);
918
919out:
540 mutex_unlock(&wl->mutex); 920 mutex_unlock(&wl->mutex);
541 921
922 if (!ret) {
923 list_add(&wl->list, &wl_list);
924 register_inetaddr_notifier(&wl1271_dev_notifier);
925 }
926
542 return ret; 927 return ret;
543} 928}
544 929
@@ -551,6 +936,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
551 936
552 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 937 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
553 938
939 unregister_inetaddr_notifier(&wl1271_dev_notifier);
940 list_del(&wl->list);
941
554 mutex_lock(&wl->mutex); 942 mutex_lock(&wl->mutex);
555 943
556 WARN_ON(wl->state != WL1271_STATE_ON); 944 WARN_ON(wl->state != WL1271_STATE_ON);
@@ -570,7 +958,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
570 958
571 cancel_work_sync(&wl->irq_work); 959 cancel_work_sync(&wl->irq_work);
572 cancel_work_sync(&wl->tx_work); 960 cancel_work_sync(&wl->tx_work);
573 cancel_work_sync(&wl->filter_work);
574 961
575 mutex_lock(&wl->mutex); 962 mutex_lock(&wl->mutex);
576 963
@@ -581,19 +968,25 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
581 memset(wl->bssid, 0, ETH_ALEN); 968 memset(wl->bssid, 0, ETH_ALEN);
582 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); 969 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
583 wl->ssid_len = 0; 970 wl->ssid_len = 0;
584 wl->listen_int = 1;
585 wl->bss_type = MAX_BSS_TYPE; 971 wl->bss_type = MAX_BSS_TYPE;
972 wl->band = IEEE80211_BAND_2GHZ;
586 973
587 wl->rx_counter = 0; 974 wl->rx_counter = 0;
588 wl->elp = false; 975 wl->elp = false;
589 wl->psm = 0; 976 wl->psm = 0;
977 wl->psm_entry_retry = 0;
590 wl->tx_queue_stopped = false; 978 wl->tx_queue_stopped = false;
591 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 979 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
592 wl->tx_blocks_available = 0; 980 wl->tx_blocks_available = 0;
593 wl->tx_results_count = 0; 981 wl->tx_results_count = 0;
594 wl->tx_packets_count = 0; 982 wl->tx_packets_count = 0;
983 wl->tx_security_last_seq = 0;
984 wl->tx_security_seq_16 = 0;
985 wl->tx_security_seq_32 = 0;
595 wl->time_offset = 0; 986 wl->time_offset = 0;
596 wl->session_counter = 0; 987 wl->session_counter = 0;
988 wl->joined = false;
989
597 for (i = 0; i < NUM_TX_QUEUES; i++) 990 for (i = 0; i < NUM_TX_QUEUES; i++)
598 wl->tx_blocks_freed[i] = 0; 991 wl->tx_blocks_freed[i] = 0;
599 992
@@ -611,6 +1004,12 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
611 conf->type, conf->mac_addr); 1004 conf->type, conf->mac_addr);
612 1005
613 mutex_lock(&wl->mutex); 1006 mutex_lock(&wl->mutex);
1007 if (wl->vif) {
1008 ret = -EBUSY;
1009 goto out;
1010 }
1011
1012 wl->vif = conf->vif;
614 1013
615 switch (conf->type) { 1014 switch (conf->type) {
616 case NL80211_IFTYPE_STATION: 1015 case NL80211_IFTYPE_STATION:
@@ -634,7 +1033,12 @@ out:
634static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1033static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
635 struct ieee80211_if_init_conf *conf) 1034 struct ieee80211_if_init_conf *conf)
636{ 1035{
1036 struct wl1271 *wl = hw->priv;
1037
1038 mutex_lock(&wl->mutex);
637 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 1039 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1040 wl->vif = NULL;
1041 mutex_unlock(&wl->mutex);
638} 1042}
639 1043
640#if 0 1044#if 0
@@ -657,23 +1061,24 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
657 if (ret < 0) 1061 if (ret < 0)
658 goto out; 1062 goto out;
659 1063
660 memcpy(wl->bssid, conf->bssid, ETH_ALEN); 1064 if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
1065 wl1271_debug(DEBUG_MAC80211, "bssid changed");
661 1066
662 ret = wl1271_cmd_build_null_data(wl); 1067 memcpy(wl->bssid, conf->bssid, ETH_ALEN);
663 if (ret < 0)
664 goto out_sleep;
665 1068
666 wl->ssid_len = conf->ssid_len; 1069 ret = wl1271_cmd_join(wl);
667 if (wl->ssid_len) 1070 if (ret < 0)
668 memcpy(wl->ssid, conf->ssid, wl->ssid_len); 1071 goto out_sleep;
669 1072
670 if (wl->bss_type != BSS_TYPE_IBSS) { 1073 ret = wl1271_cmd_build_null_data(wl);
671 /* FIXME: replace the magic numbers with proper definitions */
672 ret = wl1271_cmd_join(wl, wl->bss_type, 5, 100, 1);
673 if (ret < 0) 1074 if (ret < 0)
674 goto out_sleep; 1075 goto out_sleep;
675 } 1076 }
676 1077
1078 wl->ssid_len = conf->ssid_len;
1079 if (wl->ssid_len)
1080 memcpy(wl->ssid, conf->ssid, wl->ssid_len);
1081
677 if (conf->changed & IEEE80211_IFCC_BEACON) { 1082 if (conf->changed & IEEE80211_IFCC_BEACON) {
678 beacon = ieee80211_beacon_get(hw, vif); 1083 beacon = ieee80211_beacon_get(hw, vif);
679 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1084 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
@@ -691,12 +1096,6 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
691 1096
692 if (ret < 0) 1097 if (ret < 0)
693 goto out_sleep; 1098 goto out_sleep;
694
695 /* FIXME: replace the magic numbers with proper definitions */
696 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
697
698 if (ret < 0)
699 goto out_sleep;
700 } 1099 }
701 1100
702out_sleep: 1101out_sleep:
@@ -724,26 +1123,22 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
724 1123
725 mutex_lock(&wl->mutex); 1124 mutex_lock(&wl->mutex);
726 1125
1126 wl->band = conf->channel->band;
1127
727 ret = wl1271_ps_elp_wakeup(wl, false); 1128 ret = wl1271_ps_elp_wakeup(wl, false);
728 if (ret < 0) 1129 if (ret < 0)
729 goto out; 1130 goto out;
730 1131
731 if (channel != wl->channel) { 1132 if (channel != wl->channel) {
732 u8 old_channel = wl->channel; 1133 /*
1134 * We assume that the stack will configure the right channel
1135 * before associating, so we don't need to send a join
1136 * command here. We will join the right channel when the
1137 * BSSID changes
1138 */
733 wl->channel = channel; 1139 wl->channel = channel;
734
735 /* FIXME: use beacon interval provided by mac80211 */
736 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
737 if (ret < 0) {
738 wl->channel = old_channel;
739 goto out_sleep;
740 }
741 } 1140 }
742 1141
743 ret = wl1271_cmd_build_null_data(wl);
744 if (ret < 0)
745 goto out_sleep;
746
747 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { 1142 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
748 wl1271_info("psm enabled"); 1143 wl1271_info("psm enabled");
749 1144
@@ -768,7 +1163,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
768 if (conf->power_level != wl->power_level) { 1163 if (conf->power_level != wl->power_level) {
769 ret = wl1271_acx_tx_power(wl, conf->power_level); 1164 ret = wl1271_acx_tx_power(wl, conf->power_level);
770 if (ret < 0) 1165 if (ret < 0)
771 goto out; 1166 goto out_sleep;
772 1167
773 wl->power_level = conf->power_level; 1168 wl->power_level = conf->power_level;
774 } 1169 }
@@ -782,6 +1177,45 @@ out:
782 return ret; 1177 return ret;
783} 1178}
784 1179
1180struct wl1271_filter_params {
1181 bool enabled;
1182 int mc_list_length;
1183 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
1184};
1185
1186static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
1187 struct dev_addr_list *mc_list)
1188{
1189 struct wl1271_filter_params *fp;
1190 int i;
1191
1192 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
1193 if (!fp) {
1194 wl1271_error("Out of memory setting filters.");
1195 return 0;
1196 }
1197
1198 /* update multicast filtering parameters */
1199 fp->enabled = true;
1200 if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
1201 mc_count = 0;
1202 fp->enabled = false;
1203 }
1204
1205 fp->mc_list_length = 0;
1206 for (i = 0; i < mc_count; i++) {
1207 if (mc_list->da_addrlen == ETH_ALEN) {
1208 memcpy(fp->mc_list[fp->mc_list_length],
1209 mc_list->da_addr, ETH_ALEN);
1210 fp->mc_list_length++;
1211 } else
1212 wl1271_warning("Unknown mc address length.");
1213 mc_list = mc_list->next;
1214 }
1215
1216 return (u64)(unsigned long)fp;
1217}
1218
785#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ 1219#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
786 FIF_ALLMULTI | \ 1220 FIF_ALLMULTI | \
787 FIF_FCSFAIL | \ 1221 FIF_FCSFAIL | \
@@ -791,28 +1225,53 @@ out:
791 1225
792static void wl1271_op_configure_filter(struct ieee80211_hw *hw, 1226static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
793 unsigned int changed, 1227 unsigned int changed,
794 unsigned int *total,u64 multicast) 1228 unsigned int *total, u64 multicast)
795{ 1229{
1230 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
796 struct wl1271 *wl = hw->priv; 1231 struct wl1271 *wl = hw->priv;
1232 int ret;
797 1233
798 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); 1234 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
799 1235
1236 mutex_lock(&wl->mutex);
1237
1238 if (wl->state == WL1271_STATE_OFF)
1239 goto out;
1240
1241 ret = wl1271_ps_elp_wakeup(wl, false);
1242 if (ret < 0)
1243 goto out;
1244
800 *total &= WL1271_SUPPORTED_FILTERS; 1245 *total &= WL1271_SUPPORTED_FILTERS;
801 changed &= WL1271_SUPPORTED_FILTERS; 1246 changed &= WL1271_SUPPORTED_FILTERS;
802 1247
1248 if (*total & FIF_ALLMULTI)
1249 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
1250 else if (fp)
1251 ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
1252 fp->mc_list,
1253 fp->mc_list_length);
1254 if (ret < 0)
1255 goto out_sleep;
1256
1257 kfree(fp);
1258
1259 /* FIXME: We still need to set our filters properly */
1260
1261 /* determine, whether supported filter values have changed */
803 if (changed == 0) 1262 if (changed == 0)
804 return; 1263 goto out_sleep;
805 1264
806 /* FIXME: wl->rx_config and wl->rx_filter are not protected */ 1265 /* apply configured filters */
807 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1266 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
808 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1267 if (ret < 0)
1268 goto out_sleep;
809 1269
810 /* 1270out_sleep:
811 * FIXME: workqueues need to be properly cancelled on stop(), for 1271 wl1271_ps_elp_sleep(wl);
812 * now let's just disable changing the filter settings. They will 1272
813 * be updated any on config(). 1273out:
814 */ 1274 mutex_unlock(&wl->mutex);
815 /* schedule_work(&wl->filter_work); */
816} 1275}
817 1276
818static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1277static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -823,6 +1282,8 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
823 struct wl1271 *wl = hw->priv; 1282 struct wl1271 *wl = hw->priv;
824 const u8 *addr; 1283 const u8 *addr;
825 int ret; 1284 int ret;
1285 u32 tx_seq_32 = 0;
1286 u16 tx_seq_16 = 0;
826 u8 key_type; 1287 u8 key_type;
827 1288
828 static const u8 bcast_addr[ETH_ALEN] = 1289 static const u8 bcast_addr[ETH_ALEN] =
@@ -861,11 +1322,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
861 key_type = KEY_TKIP; 1322 key_type = KEY_TKIP;
862 1323
863 key_conf->hw_key_idx = key_conf->keyidx; 1324 key_conf->hw_key_idx = key_conf->keyidx;
1325 tx_seq_32 = wl->tx_security_seq_32;
1326 tx_seq_16 = wl->tx_security_seq_16;
864 break; 1327 break;
865 case ALG_CCMP: 1328 case ALG_CCMP:
866 key_type = KEY_AES; 1329 key_type = KEY_AES;
867 1330
868 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1331 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1332 tx_seq_32 = wl->tx_security_seq_32;
1333 tx_seq_16 = wl->tx_security_seq_16;
869 break; 1334 break;
870 default: 1335 default:
871 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1336 wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -879,7 +1344,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
879 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, 1344 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
880 key_conf->keyidx, key_type, 1345 key_conf->keyidx, key_type,
881 key_conf->keylen, key_conf->key, 1346 key_conf->keylen, key_conf->key,
882 addr); 1347 addr, tx_seq_32, tx_seq_16);
883 if (ret < 0) { 1348 if (ret < 0) {
884 wl1271_error("Could not add or replace key"); 1349 wl1271_error("Could not add or replace key");
885 goto out_sleep; 1350 goto out_sleep;
@@ -890,7 +1355,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
890 ret = wl1271_cmd_set_key(wl, KEY_REMOVE, 1355 ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
891 key_conf->keyidx, key_type, 1356 key_conf->keyidx, key_type,
892 key_conf->keylen, key_conf->key, 1357 key_conf->keylen, key_conf->key,
893 addr); 1358 addr, 0, 0);
894 if (ret < 0) { 1359 if (ret < 0) {
895 wl1271_error("Could not remove key"); 1360 wl1271_error("Could not remove key");
896 goto out_sleep; 1361 goto out_sleep;
@@ -921,13 +1386,13 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
921 struct wl1271 *wl = hw->priv; 1386 struct wl1271 *wl = hw->priv;
922 int ret; 1387 int ret;
923 u8 *ssid = NULL; 1388 u8 *ssid = NULL;
924 size_t ssid_len = 0; 1389 size_t len = 0;
925 1390
926 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); 1391 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
927 1392
928 if (req->n_ssids) { 1393 if (req->n_ssids) {
929 ssid = req->ssids[0].ssid; 1394 ssid = req->ssids[0].ssid;
930 ssid_len = req->ssids[0].ssid_len; 1395 len = req->ssids[0].ssid_len;
931 } 1396 }
932 1397
933 mutex_lock(&wl->mutex); 1398 mutex_lock(&wl->mutex);
@@ -936,7 +1401,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
936 if (ret < 0) 1401 if (ret < 0)
937 goto out; 1402 goto out;
938 1403
939 ret = wl1271_cmd_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3); 1404 if (wl1271_11a_enabled())
1405 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
1406 WL1271_SCAN_BAND_DUAL, 3);
1407 else
1408 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
1409 WL1271_SCAN_BAND_2_4_GHZ, 3);
940 1410
941 wl1271_ps_elp_sleep(wl); 1411 wl1271_ps_elp_sleep(wl);
942 1412
@@ -969,6 +1439,22 @@ out:
969 return ret; 1439 return ret;
970} 1440}
971 1441
1442static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
1443{
1444 struct ieee80211_supported_band *band;
1445 u32 enabled_rates = 0;
1446 int bit;
1447
1448 band = wl->hw->wiphy->bands[wl->band];
1449 for (bit = 0; bit < band->n_bitrates; bit++) {
1450 if (basic_rate_set & 0x1)
1451 enabled_rates |= band->bitrates[bit].hw_value;
1452 basic_rate_set >>= 1;
1453 }
1454
1455 return enabled_rates;
1456}
1457
972static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1458static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
973 struct ieee80211_vif *vif, 1459 struct ieee80211_vif *vif,
974 struct ieee80211_bss_conf *bss_conf, 1460 struct ieee80211_bss_conf *bss_conf,
@@ -990,6 +1476,12 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
990 if (bss_conf->assoc) { 1476 if (bss_conf->assoc) {
991 wl->aid = bss_conf->aid; 1477 wl->aid = bss_conf->aid;
992 1478
1479 /*
1480 * with wl1271, we don't need to update the
1481 * beacon_int and dtim_period, because the firmware
1482 * updates it by itself when the first beacon is
1483 * received after a join.
1484 */
993 ret = wl1271_cmd_build_ps_poll(wl, wl->aid); 1485 ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
994 if (ret < 0) 1486 if (ret < 0)
995 goto out_sleep; 1487 goto out_sleep;
@@ -1005,8 +1497,14 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1005 if (ret < 0) 1497 if (ret < 0)
1006 goto out_sleep; 1498 goto out_sleep;
1007 } 1499 }
1500 } else {
1501 /* use defaults when not associated */
1502 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
1503 wl->aid = 0;
1008 } 1504 }
1505
1009 } 1506 }
1507
1010 if (changed & BSS_CHANGED_ERP_SLOT) { 1508 if (changed & BSS_CHANGED_ERP_SLOT) {
1011 if (bss_conf->use_short_slot) 1509 if (bss_conf->use_short_slot)
1012 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); 1510 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
@@ -1036,6 +1534,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1036 } 1534 }
1037 } 1535 }
1038 1536
1537 if (changed & BSS_CHANGED_BASIC_RATES) {
1538 wl->basic_rate_set = wl1271_enabled_rates_get(
1539 wl, bss_conf->basic_rates);
1540
1541 ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
1542 if (ret < 0) {
1543 wl1271_warning("Set rate policies failed %d", ret);
1544 goto out_sleep;
1545 }
1546 }
1547
1039out_sleep: 1548out_sleep:
1040 wl1271_ps_elp_sleep(wl); 1549 wl1271_ps_elp_sleep(wl);
1041 1550
@@ -1047,44 +1556,44 @@ out:
1047/* can't be const, mac80211 writes to this */ 1556/* can't be const, mac80211 writes to this */
1048static struct ieee80211_rate wl1271_rates[] = { 1557static struct ieee80211_rate wl1271_rates[] = {
1049 { .bitrate = 10, 1558 { .bitrate = 10,
1050 .hw_value = 0x1, 1559 .hw_value = CONF_HW_BIT_RATE_1MBPS,
1051 .hw_value_short = 0x1, }, 1560 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
1052 { .bitrate = 20, 1561 { .bitrate = 20,
1053 .hw_value = 0x2, 1562 .hw_value = CONF_HW_BIT_RATE_2MBPS,
1054 .hw_value_short = 0x2, 1563 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
1055 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1564 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1056 { .bitrate = 55, 1565 { .bitrate = 55,
1057 .hw_value = 0x4, 1566 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
1058 .hw_value_short = 0x4, 1567 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
1059 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1568 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1060 { .bitrate = 110, 1569 { .bitrate = 110,
1061 .hw_value = 0x20, 1570 .hw_value = CONF_HW_BIT_RATE_11MBPS,
1062 .hw_value_short = 0x20, 1571 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
1063 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1572 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1064 { .bitrate = 60, 1573 { .bitrate = 60,
1065 .hw_value = 0x8, 1574 .hw_value = CONF_HW_BIT_RATE_6MBPS,
1066 .hw_value_short = 0x8, }, 1575 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
1067 { .bitrate = 90, 1576 { .bitrate = 90,
1068 .hw_value = 0x10, 1577 .hw_value = CONF_HW_BIT_RATE_9MBPS,
1069 .hw_value_short = 0x10, }, 1578 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
1070 { .bitrate = 120, 1579 { .bitrate = 120,
1071 .hw_value = 0x40, 1580 .hw_value = CONF_HW_BIT_RATE_12MBPS,
1072 .hw_value_short = 0x40, }, 1581 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
1073 { .bitrate = 180, 1582 { .bitrate = 180,
1074 .hw_value = 0x80, 1583 .hw_value = CONF_HW_BIT_RATE_18MBPS,
1075 .hw_value_short = 0x80, }, 1584 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
1076 { .bitrate = 240, 1585 { .bitrate = 240,
1077 .hw_value = 0x200, 1586 .hw_value = CONF_HW_BIT_RATE_24MBPS,
1078 .hw_value_short = 0x200, }, 1587 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
1079 { .bitrate = 360, 1588 { .bitrate = 360,
1080 .hw_value = 0x400, 1589 .hw_value = CONF_HW_BIT_RATE_36MBPS,
1081 .hw_value_short = 0x400, }, 1590 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
1082 { .bitrate = 480, 1591 { .bitrate = 480,
1083 .hw_value = 0x800, 1592 .hw_value = CONF_HW_BIT_RATE_48MBPS,
1084 .hw_value_short = 0x800, }, 1593 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
1085 { .bitrate = 540, 1594 { .bitrate = 540,
1086 .hw_value = 0x1000, 1595 .hw_value = CONF_HW_BIT_RATE_54MBPS,
1087 .hw_value_short = 0x1000, }, 1596 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1088}; 1597};
1089 1598
1090/* can't be const, mac80211 writes to this */ 1599/* can't be const, mac80211 writes to this */
@@ -1112,6 +1621,88 @@ static struct ieee80211_supported_band wl1271_band_2ghz = {
1112 .n_bitrates = ARRAY_SIZE(wl1271_rates), 1621 .n_bitrates = ARRAY_SIZE(wl1271_rates),
1113}; 1622};
1114 1623
1624/* 5 GHz data rates for WL1273 */
1625static struct ieee80211_rate wl1271_rates_5ghz[] = {
1626 { .bitrate = 60,
1627 .hw_value = CONF_HW_BIT_RATE_6MBPS,
1628 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
1629 { .bitrate = 90,
1630 .hw_value = CONF_HW_BIT_RATE_9MBPS,
1631 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
1632 { .bitrate = 120,
1633 .hw_value = CONF_HW_BIT_RATE_12MBPS,
1634 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
1635 { .bitrate = 180,
1636 .hw_value = CONF_HW_BIT_RATE_18MBPS,
1637 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
1638 { .bitrate = 240,
1639 .hw_value = CONF_HW_BIT_RATE_24MBPS,
1640 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
1641 { .bitrate = 360,
1642 .hw_value = CONF_HW_BIT_RATE_36MBPS,
1643 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
1644 { .bitrate = 480,
1645 .hw_value = CONF_HW_BIT_RATE_48MBPS,
1646 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
1647 { .bitrate = 540,
1648 .hw_value = CONF_HW_BIT_RATE_54MBPS,
1649 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1650};
1651
1652/* 5 GHz band channels for WL1273 */
1653static struct ieee80211_channel wl1271_channels_5ghz[] = {
1654 { .hw_value = 183, .center_freq = 4915},
1655 { .hw_value = 184, .center_freq = 4920},
1656 { .hw_value = 185, .center_freq = 4925},
1657 { .hw_value = 187, .center_freq = 4935},
1658 { .hw_value = 188, .center_freq = 4940},
1659 { .hw_value = 189, .center_freq = 4945},
1660 { .hw_value = 192, .center_freq = 4960},
1661 { .hw_value = 196, .center_freq = 4980},
1662 { .hw_value = 7, .center_freq = 5035},
1663 { .hw_value = 8, .center_freq = 5040},
1664 { .hw_value = 9, .center_freq = 5045},
1665 { .hw_value = 11, .center_freq = 5055},
1666 { .hw_value = 12, .center_freq = 5060},
1667 { .hw_value = 16, .center_freq = 5080},
1668 { .hw_value = 34, .center_freq = 5170},
1669 { .hw_value = 36, .center_freq = 5180},
1670 { .hw_value = 38, .center_freq = 5190},
1671 { .hw_value = 40, .center_freq = 5200},
1672 { .hw_value = 42, .center_freq = 5210},
1673 { .hw_value = 44, .center_freq = 5220},
1674 { .hw_value = 46, .center_freq = 5230},
1675 { .hw_value = 48, .center_freq = 5240},
1676 { .hw_value = 52, .center_freq = 5260},
1677 { .hw_value = 56, .center_freq = 5280},
1678 { .hw_value = 60, .center_freq = 5300},
1679 { .hw_value = 64, .center_freq = 5320},
1680 { .hw_value = 100, .center_freq = 5500},
1681 { .hw_value = 104, .center_freq = 5520},
1682 { .hw_value = 108, .center_freq = 5540},
1683 { .hw_value = 112, .center_freq = 5560},
1684 { .hw_value = 116, .center_freq = 5580},
1685 { .hw_value = 120, .center_freq = 5600},
1686 { .hw_value = 124, .center_freq = 5620},
1687 { .hw_value = 128, .center_freq = 5640},
1688 { .hw_value = 132, .center_freq = 5660},
1689 { .hw_value = 136, .center_freq = 5680},
1690 { .hw_value = 140, .center_freq = 5700},
1691 { .hw_value = 149, .center_freq = 5745},
1692 { .hw_value = 153, .center_freq = 5765},
1693 { .hw_value = 157, .center_freq = 5785},
1694 { .hw_value = 161, .center_freq = 5805},
1695 { .hw_value = 165, .center_freq = 5825},
1696};
1697
1698
1699static struct ieee80211_supported_band wl1271_band_5ghz = {
1700 .channels = wl1271_channels_5ghz,
1701 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
1702 .bitrates = wl1271_rates_5ghz,
1703 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
1704};
1705
1115static const struct ieee80211_ops wl1271_ops = { 1706static const struct ieee80211_ops wl1271_ops = {
1116 .start = wl1271_op_start, 1707 .start = wl1271_op_start,
1117 .stop = wl1271_op_stop, 1708 .stop = wl1271_op_stop,
@@ -1119,6 +1710,7 @@ static const struct ieee80211_ops wl1271_ops = {
1119 .remove_interface = wl1271_op_remove_interface, 1710 .remove_interface = wl1271_op_remove_interface,
1120 .config = wl1271_op_config, 1711 .config = wl1271_op_config,
1121/* .config_interface = wl1271_op_config_interface, */ 1712/* .config_interface = wl1271_op_config_interface, */
1713 .prepare_multicast = wl1271_op_prepare_multicast,
1122 .configure_filter = wl1271_op_configure_filter, 1714 .configure_filter = wl1271_op_configure_filter,
1123 .tx = wl1271_op_tx, 1715 .tx = wl1271_op_tx,
1124 .set_key = wl1271_op_set_key, 1716 .set_key = wl1271_op_set_key,
@@ -1151,24 +1743,25 @@ static int wl1271_register_hw(struct wl1271 *wl)
1151 1743
1152static int wl1271_init_ieee80211(struct wl1271 *wl) 1744static int wl1271_init_ieee80211(struct wl1271 *wl)
1153{ 1745{
1154 /* 1746 /* The tx descriptor buffer and the TKIP space. */
1155 * The tx descriptor buffer and the TKIP space. 1747 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
1156 * 1748 sizeof(struct wl1271_tx_hw_descr);
1157 * FIXME: add correct 1271 descriptor size
1158 */
1159 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE;
1160 1749
1161 /* unit us */ 1750 /* unit us */
1162 /* FIXME: find a proper value */ 1751 /* FIXME: find a proper value */
1163 wl->hw->channel_change_time = 10000; 1752 wl->hw->channel_change_time = 10000;
1164 1753
1165 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1754 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1166 IEEE80211_HW_NOISE_DBM; 1755 IEEE80211_HW_NOISE_DBM |
1756 IEEE80211_HW_BEACON_FILTER;
1167 1757
1168 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1758 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1169 wl->hw->wiphy->max_scan_ssids = 1; 1759 wl->hw->wiphy->max_scan_ssids = 1;
1170 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 1760 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
1171 1761
1762 if (wl1271_11a_enabled())
1763 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
1764
1172 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); 1765 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
1173 1766
1174 return 0; 1767 return 0;
@@ -1213,29 +1806,33 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1213 wl = hw->priv; 1806 wl = hw->priv;
1214 memset(wl, 0, sizeof(*wl)); 1807 memset(wl, 0, sizeof(*wl));
1215 1808
1809 INIT_LIST_HEAD(&wl->list);
1810
1216 wl->hw = hw; 1811 wl->hw = hw;
1217 dev_set_drvdata(&spi->dev, wl); 1812 dev_set_drvdata(&spi->dev, wl);
1218 wl->spi = spi; 1813 wl->spi = spi;
1219 1814
1220 skb_queue_head_init(&wl->tx_queue); 1815 skb_queue_head_init(&wl->tx_queue);
1221 1816
1222 INIT_WORK(&wl->filter_work, wl1271_filter_work); 1817 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
1223 wl->channel = WL1271_DEFAULT_CHANNEL; 1818 wl->channel = WL1271_DEFAULT_CHANNEL;
1224 wl->scanning = false; 1819 wl->scanning = false;
1225 wl->default_key = 0; 1820 wl->default_key = 0;
1226 wl->listen_int = 1;
1227 wl->rx_counter = 0; 1821 wl->rx_counter = 0;
1228 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1822 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1229 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1823 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1230 wl->elp = false; 1824 wl->elp = false;
1231 wl->psm = 0; 1825 wl->psm = 0;
1232 wl->psm_requested = false; 1826 wl->psm_requested = false;
1827 wl->psm_entry_retry = 0;
1233 wl->tx_queue_stopped = false; 1828 wl->tx_queue_stopped = false;
1234 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1829 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1830 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
1831 wl->band = IEEE80211_BAND_2GHZ;
1832 wl->vif = NULL;
1833 wl->joined = false;
1235 1834
1236 /* We use the default power on sleep time until we know which chip 1835 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
1237 * we're using */
1238 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
1239 wl->tx_frames[i] = NULL; 1836 wl->tx_frames[i] = NULL;
1240 1837
1241 spin_lock_init(&wl->wl_lock); 1838 spin_lock_init(&wl->wl_lock);
@@ -1250,13 +1847,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1250 wl->state = WL1271_STATE_OFF; 1847 wl->state = WL1271_STATE_OFF;
1251 mutex_init(&wl->mutex); 1848 mutex_init(&wl->mutex);
1252 1849
1253 wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
1254 if (!wl->rx_descriptor) {
1255 wl1271_error("could not allocate memory for rx descriptor");
1256 ret = -ENOMEM;
1257 goto out_free;
1258 }
1259
1260 /* This is the only SPI value that we need to set here, the rest 1850 /* This is the only SPI value that we need to set here, the rest
1261 * comes from the board-peripherals file */ 1851 * comes from the board-peripherals file */
1262 spi->bits_per_word = 32; 1852 spi->bits_per_word = 32;
@@ -1298,6 +1888,9 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1298 } 1888 }
1299 dev_set_drvdata(&wl1271_device.dev, wl); 1889 dev_set_drvdata(&wl1271_device.dev, wl);
1300 1890
1891 /* Apply default driver configuration. */
1892 wl1271_conf_init(wl);
1893
1301 ret = wl1271_init_ieee80211(wl); 1894 ret = wl1271_init_ieee80211(wl);
1302 if (ret) 1895 if (ret)
1303 goto out_platform; 1896 goto out_platform;
@@ -1319,9 +1912,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1319 free_irq(wl->irq, wl); 1912 free_irq(wl->irq, wl);
1320 1913
1321 out_free: 1914 out_free:
1322 kfree(wl->rx_descriptor);
1323 wl->rx_descriptor = NULL;
1324
1325 ieee80211_free_hw(hw); 1915 ieee80211_free_hw(hw);
1326 1916
1327 return ret; 1917 return ret;
@@ -1337,14 +1927,11 @@ static int __devexit wl1271_remove(struct spi_device *spi)
1337 platform_device_unregister(&wl1271_device); 1927 platform_device_unregister(&wl1271_device);
1338 free_irq(wl->irq, wl); 1928 free_irq(wl->irq, wl);
1339 kfree(wl->target_mem_map); 1929 kfree(wl->target_mem_map);
1340 kfree(wl->fw); 1930 vfree(wl->fw);
1341 wl->fw = NULL; 1931 wl->fw = NULL;
1342 kfree(wl->nvs); 1932 kfree(wl->nvs);
1343 wl->nvs = NULL; 1933 wl->nvs = NULL;
1344 1934
1345 kfree(wl->rx_descriptor);
1346 wl->rx_descriptor = NULL;
1347
1348 kfree(wl->fw_status); 1935 kfree(wl->fw_status);
1349 kfree(wl->tx_res_if); 1936 kfree(wl->tx_res_if);
1350 1937
@@ -1391,3 +1978,5 @@ module_exit(wl1271_exit);
1391 1978
1392MODULE_LICENSE("GPL"); 1979MODULE_LICENSE("GPL");
1393MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 1980MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
1981MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
1982MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 1dc74b0c7736..507cd91d7eed 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -27,25 +27,38 @@
27 27
28#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
29 29
30void wl1271_elp_work(struct work_struct *work)
31{
32 struct delayed_work *dwork;
33 struct wl1271 *wl;
34
35 dwork = container_of(work, struct delayed_work, work);
36 wl = container_of(dwork, struct wl1271, elp_work);
37
38 wl1271_debug(DEBUG_PSM, "elp work");
39
40 mutex_lock(&wl->mutex);
41
42 if (wl->elp || !wl->psm)
43 goto out;
44
45 wl1271_debug(DEBUG_PSM, "chip to elp");
46 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
47 wl->elp = true;
48
49out:
50 mutex_unlock(&wl->mutex);
51}
52
53#define ELP_ENTRY_DELAY 5
54
30/* Routines to toggle sleep mode while in ELP */ 55/* Routines to toggle sleep mode while in ELP */
31void wl1271_ps_elp_sleep(struct wl1271 *wl) 56void wl1271_ps_elp_sleep(struct wl1271 *wl)
32{ 57{
33 /* 58 if (wl->psm) {
34 * FIXME: due to a problem in the firmware (causing a firmware 59 cancel_delayed_work(&wl->elp_work);
35 * crash), ELP entry is prevented below. Remove the "true" to 60 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
36 * re-enable ELP entry. 61 msecs_to_jiffies(ELP_ENTRY_DELAY));
37 */
38 if (true || wl->elp || !wl->psm)
39 return;
40
41 /*
42 * Go to ELP unless there is work already pending - pending work
43 * will immediately wakeup the chipset anyway.
44 */
45 if (!work_pending(&wl->irq_work) && !work_pending(&wl->tx_work)) {
46 wl1271_debug(DEBUG_PSM, "chip to elp");
47 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
48 wl->elp = true;
49 } 62 }
50} 63}
51 64
@@ -73,7 +86,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
73 wl->elp_compl = &compl; 86 wl->elp_compl = &compl;
74 spin_unlock_irqrestore(&wl->wl_lock, flags); 87 spin_unlock_irqrestore(&wl->wl_lock, flags);
75 88
76 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 89 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
77 90
78 if (!pending) { 91 if (!pending) {
79 ret = wait_for_completion_timeout( 92 ret = wait_for_completion_timeout(
@@ -111,6 +124,17 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
111 switch (mode) { 124 switch (mode) {
112 case STATION_POWER_SAVE_MODE: 125 case STATION_POWER_SAVE_MODE:
113 wl1271_debug(DEBUG_PSM, "entering psm"); 126 wl1271_debug(DEBUG_PSM, "entering psm");
127
128 /* enable beacon filtering */
129 ret = wl1271_acx_beacon_filter_opt(wl, true);
130 if (ret < 0)
131 return ret;
132
133 /* enable beacon early termination */
134 ret = wl1271_acx_bet_enable(wl, true);
135 if (ret < 0)
136 return ret;
137
114 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 138 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
115 if (ret < 0) 139 if (ret < 0)
116 return ret; 140 return ret;
@@ -128,6 +152,16 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
128 if (ret < 0) 152 if (ret < 0)
129 return ret; 153 return ret;
130 154
155 /* disable beacon early termination */
156 ret = wl1271_acx_bet_enable(wl, false);
157 if (ret < 0)
158 return ret;
159
160 /* disable beacon filtering */
161 ret = wl1271_acx_beacon_filter_opt(wl, false);
162 if (ret < 0)
163 return ret;
164
131 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); 165 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
132 if (ret < 0) 166 if (ret < 0)
133 return ret; 167 return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index de2bd3c7dc9c..779653d0ae85 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -30,6 +30,6 @@
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode); 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
31void wl1271_ps_elp_sleep(struct wl1271 *wl); 31void wl1271_ps_elp_sleep(struct wl1271 *wl);
32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
33 33void wl1271_elp_work(struct work_struct *work);
34 34
35#endif /* __WL1271_PS_H__ */ 35#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index f8ed4a4fc691..1f237389d1c7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -34,7 +34,7 @@
34#define REGISTERS_WORK_SIZE 0x0000b000 34#define REGISTERS_WORK_SIZE 0x0000b000
35 35
36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC 36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC
37#define STATUS_MEM_ADDRESS 0x40400 37#define FW_STATUS_ADDR (0x14FC0 + 0xA000)
38 38
39/* ELP register commands */ 39/* ELP register commands */
40#define ELPCTRL_WAKE_UP 0x1 40#define ELPCTRL_WAKE_UP 0x1
@@ -213,7 +213,6 @@
213==============================================*/ 213==============================================*/
214#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) 214#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0)
215 215
216#define RX_DRIVER_DUMMY_WRITE_ADDRESS (REGISTERS_BASE + 0x0534)
217#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) 216#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538)
218 217
219/* Device Configuration registers*/ 218/* Device Configuration registers*/
@@ -614,50 +613,6 @@ enum {
614 MAX_RADIO_BANDS = 0xFF 613 MAX_RADIO_BANDS = 0xFF
615}; 614};
616 615
617enum {
618 NO_RATE = 0,
619 RATE_1MBPS = 0x0A,
620 RATE_2MBPS = 0x14,
621 RATE_5_5MBPS = 0x37,
622 RATE_6MBPS = 0x0B,
623 RATE_9MBPS = 0x0F,
624 RATE_11MBPS = 0x6E,
625 RATE_12MBPS = 0x0A,
626 RATE_18MBPS = 0x0E,
627 RATE_22MBPS = 0xDC,
628 RATE_24MBPS = 0x09,
629 RATE_36MBPS = 0x0D,
630 RATE_48MBPS = 0x08,
631 RATE_54MBPS = 0x0C
632};
633
634enum {
635 RATE_INDEX_1MBPS = 0,
636 RATE_INDEX_2MBPS = 1,
637 RATE_INDEX_5_5MBPS = 2,
638 RATE_INDEX_6MBPS = 3,
639 RATE_INDEX_9MBPS = 4,
640 RATE_INDEX_11MBPS = 5,
641 RATE_INDEX_12MBPS = 6,
642 RATE_INDEX_18MBPS = 7,
643 RATE_INDEX_22MBPS = 8,
644 RATE_INDEX_24MBPS = 9,
645 RATE_INDEX_36MBPS = 10,
646 RATE_INDEX_48MBPS = 11,
647 RATE_INDEX_54MBPS = 12,
648 RATE_INDEX_MAX = RATE_INDEX_54MBPS,
649 MAX_RATE_INDEX,
650 INVALID_RATE_INDEX = MAX_RATE_INDEX,
651 RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF
652};
653
654enum {
655 RATE_MASK_1MBPS = 0x1,
656 RATE_MASK_2MBPS = 0x2,
657 RATE_MASK_5_5MBPS = 0x4,
658 RATE_MASK_11MBPS = 0x20,
659};
660
661#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ 616#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */
662#define OFDM_RATE_BIT BIT(6) 617#define OFDM_RATE_BIT BIT(6)
663#define PBCC_RATE_BIT BIT(7) 618#define PBCC_RATE_BIT BIT(7)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ad8b6904c5eb..ca645f38109b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -30,14 +30,15 @@
30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
31 u32 drv_rx_counter) 31 u32 drv_rx_counter)
32{ 32{
33 return status->rx_pkt_descs[drv_rx_counter] & RX_MEM_BLOCK_MASK; 33 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
34 RX_MEM_BLOCK_MASK;
34} 35}
35 36
36static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, 37static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
37 u32 drv_rx_counter) 38 u32 drv_rx_counter)
38{ 39{
39 return (status->rx_pkt_descs[drv_rx_counter] & RX_BUF_SIZE_MASK) >> 40 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
40 RX_BUF_SIZE_SHIFT_DIV; 41 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
41} 42}
42 43
43/* The values of this table must match the wl1271_rates[] array */ 44/* The values of this table must match the wl1271_rates[] array */
@@ -70,6 +71,36 @@ static u8 wl1271_rx_rate_to_idx[] = {
70 0 /* WL1271_RATE_1 */ 71 0 /* WL1271_RATE_1 */
71}; 72};
72 73
74/* The values of this table must match the wl1271_rates[] array */
75static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
76 /* MCS rates are used only with 11n */
77 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
78 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
79 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
80 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
81 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
82 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
83 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
84 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
85
86 7, /* WL1271_RATE_54 */
87 6, /* WL1271_RATE_48 */
88 5, /* WL1271_RATE_36 */
89 4, /* WL1271_RATE_24 */
90
91 /* TI-specific rate */
92 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
93
94 3, /* WL1271_RATE_18 */
95 2, /* WL1271_RATE_12 */
96 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
97 1, /* WL1271_RATE_9 */
98 0, /* WL1271_RATE_6 */
99 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
100 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
101 WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
102};
103
73static void wl1271_rx_status(struct wl1271 *wl, 104static void wl1271_rx_status(struct wl1271 *wl,
74 struct wl1271_rx_descriptor *desc, 105 struct wl1271_rx_descriptor *desc,
75 struct ieee80211_rx_status *status, 106 struct ieee80211_rx_status *status,
@@ -77,12 +108,21 @@ static void wl1271_rx_status(struct wl1271 *wl,
77{ 108{
78 memset(status, 0, sizeof(struct ieee80211_rx_status)); 109 memset(status, 0, sizeof(struct ieee80211_rx_status));
79 110
80 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) 111 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
112 WL1271_RX_DESC_BAND_BG) {
81 status->band = IEEE80211_BAND_2GHZ; 113 status->band = IEEE80211_BAND_2GHZ;
82 else 114 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
115 } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
116 WL1271_RX_DESC_BAND_A) {
117 status->band = IEEE80211_BAND_5GHZ;
118 status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
119 } else
83 wl1271_warning("unsupported band 0x%x", 120 wl1271_warning("unsupported band 0x%x",
84 desc->flags & WL1271_RX_DESC_BAND_MASK); 121 desc->flags & WL1271_RX_DESC_BAND_MASK);
85 122
123 if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
124 wl1271_warning("unsupported rate");
125
86 /* 126 /*
87 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the 127 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
88 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we 128 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we
@@ -91,12 +131,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
91 */ 131 */
92 status->signal = desc->rssi; 132 status->signal = desc->rssi;
93 133
94 /* FIXME: Should this be optimized? */
95 status->qual = (desc->rssi - WL1271_RX_MIN_RSSI) * 100 /
96 (WL1271_RX_MAX_RSSI - WL1271_RX_MIN_RSSI);
97 status->qual = min(status->qual, 100);
98 status->qual = max(status->qual, 0);
99
100 /* 134 /*
101 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we 135 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
102 * need to divide by two for now, but TI has been discussing about 136 * need to divide by two for now, but TI has been discussing about
@@ -109,17 +143,11 @@ static void wl1271_rx_status(struct wl1271 *wl,
109 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 143 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
110 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 144 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
111 145
112 if (likely(!(desc->flags & WL1271_RX_DESC_DECRYPT_FAIL))) 146 if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL)))
113 status->flag |= RX_FLAG_DECRYPTED; 147 status->flag |= RX_FLAG_DECRYPTED;
114 148 if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
115 if (unlikely(desc->flags & WL1271_RX_DESC_MIC_FAIL))
116 status->flag |= RX_FLAG_MMIC_ERROR; 149 status->flag |= RX_FLAG_MMIC_ERROR;
117 } 150 }
118
119 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
120
121 if (status->rate_idx == WL1271_RX_RATE_UNSUPPORTED)
122 wl1271_warning("unsupported rate");
123} 151}
124 152
125static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) 153static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
@@ -131,14 +159,14 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
131 u8 *buf; 159 u8 *buf;
132 u8 beacon = 0; 160 u8 beacon = 0;
133 161
134 skb = dev_alloc_skb(length); 162 skb = __dev_alloc_skb(length, GFP_KERNEL);
135 if (!skb) { 163 if (!skb) {
136 wl1271_error("Couldn't allocate RX frame"); 164 wl1271_error("Couldn't allocate RX frame");
137 return; 165 return;
138 } 166 }
139 167
140 buf = skb_put(skb, length); 168 buf = skb_put(skb, length);
141 wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); 169 wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
142 170
143 /* the data read starts with the descriptor */ 171 /* the data read starts with the descriptor */
144 desc = (struct wl1271_rx_descriptor *) buf; 172 desc = (struct wl1271_rx_descriptor *) buf;
@@ -156,7 +184,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
156 beacon ? "beacon" : ""); 184 beacon ? "beacon" : "");
157 185
158 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 186 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
159 ieee80211_rx(wl->hw, skb); 187 ieee80211_rx_ni(wl->hw, skb);
160} 188}
161 189
162void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) 190void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
@@ -176,15 +204,15 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
176 break; 204 break;
177 } 205 }
178 206
179 wl->rx_mem_pool_addr.addr = 207 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
180 (mem_block << 8) + wl_mem_map->packet_memory_pool_start; 208 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
181 wl->rx_mem_pool_addr.addr_extra = 209 wl->rx_mem_pool_addr.addr_extra =
182 wl->rx_mem_pool_addr.addr + 4; 210 wl->rx_mem_pool_addr.addr + 4;
183 211
184 /* Choose the block we want to read */ 212 /* Choose the block we want to read */
185 wl1271_spi_reg_write(wl, WL1271_SLV_REG_DATA, 213 wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
186 &wl->rx_mem_pool_addr, 214 &wl->rx_mem_pool_addr,
187 sizeof(wl->rx_mem_pool_addr), false); 215 sizeof(wl->rx_mem_pool_addr), false);
188 216
189 wl1271_rx_handle_data(wl, buf_size); 217 wl1271_rx_handle_data(wl, buf_size);
190 218
@@ -192,9 +220,5 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
192 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 220 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
193 } 221 }
194 222
195 wl1271_reg_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 223 wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
196
197 /* This is a workaround for some problems in the chip */
198 wl1271_reg_write32(wl, RX_DRIVER_DUMMY_WRITE_ADDRESS, 0x1);
199
200} 224}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index d1ca60e43a25..1ae6d1783ed4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -102,14 +102,14 @@
102#define RX_BUF_SIZE_SHIFT_DIV 6 102#define RX_BUF_SIZE_SHIFT_DIV 6
103 103
104struct wl1271_rx_descriptor { 104struct wl1271_rx_descriptor {
105 u16 length; 105 __le16 length;
106 u8 status; 106 u8 status;
107 u8 flags; 107 u8 flags;
108 u8 rate; 108 u8 rate;
109 u8 channel; 109 u8 channel;
110 s8 rssi; 110 s8 rssi;
111 u8 snr; 111 u8 snr;
112 u32 timestamp; 112 __le32 timestamp;
113 u8 packet_class; 113 u8 packet_class;
114 u8 process_id; 114 u8 process_id;
115 u8 pad_len; 115 u8 pad_len;
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4a12880c16a8..02978a16e732 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,17 +30,29 @@
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_spi.h" 31#include "wl1271_spi.h"
32 32
33static int wl1271_translate_reg_addr(struct wl1271 *wl, int addr) 33static int wl1271_translate_addr(struct wl1271 *wl, int addr)
34{ 34{
35 return addr - wl->physical_reg_addr + wl->virtual_reg_addr; 35 /*
36} 36 * To translate, first check to which window of addresses the
37 37 * particular address belongs. Then subtract the starting address
38static int wl1271_translate_mem_addr(struct wl1271 *wl, int addr) 38 * of that window from the address. Then, add offset of the
39{ 39 * translated region.
40 return addr - wl->physical_mem_addr + wl->virtual_mem_addr; 40 *
41 * The translated regions occur next to each other in physical device
42 * memory, so just add the sizes of the preceeding address regions to
43 * get the offset to the new region.
44 *
45 * Currently, only the two first regions are addressed, and the
46 * assumption is that all addresses will fall into either of those
47 * two.
48 */
49 if ((addr >= wl->part.reg.start) &&
50 (addr < wl->part.reg.start + wl->part.reg.size))
51 return addr - wl->part.reg.start + wl->part.mem.size;
52 else
53 return addr - wl->part.mem.start;
41} 54}
42 55
43
44void wl1271_spi_reset(struct wl1271 *wl) 56void wl1271_spi_reset(struct wl1271 *wl)
45{ 57{
46 u8 *cmd; 58 u8 *cmd;
@@ -123,133 +135,137 @@ void wl1271_spi_init(struct wl1271 *wl)
123 135
124/* Set the SPI partitions to access the chip addresses 136/* Set the SPI partitions to access the chip addresses
125 * 137 *
126 * There are two VIRTUAL (SPI) partitions (the memory partition and the 138 * To simplify driver code, a fixed (virtual) memory map is defined for
127 * registers partition), which are mapped to two different areas of the 139 * register and memory addresses. Because in the chipset, in different stages
128 * PHYSICAL (hardware) memory. This function also makes other checks to 140 * of operation, those addresses will move around, an address translation
129 * ensure that the partitions are not overlapping. In the diagram below, the 141 * mechanism is required.
130 * memory partition comes before the register partition, but the opposite is
131 * also supported.
132 * 142 *
133 * PHYSICAL address 143 * There are four partitions (three memory and one register partition),
144 * which are mapped to two different areas of the hardware memory.
145 *
146 * Virtual address
134 * space 147 * space
135 * 148 *
136 * | | 149 * | |
137 * ...+----+--> mem_start 150 * ...+----+--> mem.start
138 * VIRTUAL address ... | | 151 * Physical address ... | |
139 * space ... | | [PART_0] 152 * space ... | | [PART_0]
140 * ... | | 153 * ... | |
141 * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size 154 * 00000000 <--+----+... ...+----+--> mem.start + mem.size
142 * | | ... | | 155 * | | ... | |
143 * |MEM | ... | | 156 * |MEM | ... | |
144 * | | ... | | 157 * | | ... | |
145 * part_size <--+----+... | | {unused area) 158 * mem.size <--+----+... | | {unused area)
146 * | | ... | | 159 * | | ... | |
147 * |REG | ... | | 160 * |REG | ... | |
148 * part_size | | ... | | 161 * mem.size | | ... | |
149 * + <--+----+... ...+----+--> reg_start 162 * + <--+----+... ...+----+--> reg.start
150 * reg_size ... | | 163 * reg.size | | ... | |
151 * ... | | [PART_1] 164 * |MEM2| ... | | [PART_1]
152 * ... | | 165 * | | ... | |
153 * ...+----+--> reg_start + reg_size 166 * ...+----+--> reg.start + reg.size
154 * | | 167 * | |
155 * 168 *
156 */ 169 */
157int wl1271_set_partition(struct wl1271 *wl, 170int wl1271_set_partition(struct wl1271 *wl,
158 u32 mem_start, u32 mem_size, 171 struct wl1271_partition_set *p)
159 u32 reg_start, u32 reg_size)
160{ 172{
161 struct wl1271_partition *partition; 173 /* copy partition info */
162 struct spi_transfer t; 174 memcpy(&wl->part, p, sizeof(*p));
163 struct spi_message m;
164 size_t len, cmd_len;
165 u32 *cmd;
166 int addr;
167
168 cmd_len = sizeof(u32) + 2 * sizeof(struct wl1271_partition);
169 cmd = kzalloc(cmd_len, GFP_KERNEL);
170 if (!cmd)
171 return -ENOMEM;
172
173 spi_message_init(&m);
174 memset(&t, 0, sizeof(t));
175
176 partition = (struct wl1271_partition *) (cmd + 1);
177 addr = HW_ACCESS_PART0_SIZE_ADDR;
178 len = 2 * sizeof(struct wl1271_partition);
179
180 *cmd |= WSPI_CMD_WRITE;
181 *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
182 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
183 175
184 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 176 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
185 mem_start, mem_size); 177 p->mem.start, p->mem.size);
186 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 178 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
187 reg_start, reg_size); 179 p->reg.start, p->reg.size);
188 180 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
189 /* Make sure that the two partitions together don't exceed the 181 p->mem2.start, p->mem2.size);
190 * address range */ 182 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
191 if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) { 183 p->mem3.start, p->mem3.size);
192 wl1271_debug(DEBUG_SPI, "Total size exceeds maximum virtual" 184
193 " address range. Truncating partition[0]."); 185 /* write partition info to the chipset */
194 mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size; 186 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
195 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 187 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
196 mem_start, mem_size); 188 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
197 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 189 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
198 reg_start, reg_size); 190 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
199 } 191 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
192 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
200 193
201 if ((mem_start < reg_start) && 194 return 0;
202 ((mem_start + mem_size) > reg_start)) { 195}
203 /* Guarantee that the memory partition doesn't overlap the
204 * registers partition */
205 wl1271_debug(DEBUG_SPI, "End of partition[0] is "
206 "overlapping partition[1]. Adjusted.");
207 mem_size = reg_start - mem_start;
208 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
209 mem_start, mem_size);
210 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
211 reg_start, reg_size);
212 } else if ((reg_start < mem_start) &&
213 ((reg_start + reg_size) > mem_start)) {
214 /* Guarantee that the register partition doesn't overlap the
215 * memory partition */
216 wl1271_debug(DEBUG_SPI, "End of partition[1] is"
217 " overlapping partition[0]. Adjusted.");
218 reg_size = mem_start - reg_start;
219 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
220 mem_start, mem_size);
221 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
222 reg_start, reg_size);
223 }
224 196
225 partition[0].start = mem_start; 197#define WL1271_BUSY_WORD_TIMEOUT 1000
226 partition[0].size = mem_size;
227 partition[1].start = reg_start;
228 partition[1].size = reg_size;
229 198
230 wl->physical_mem_addr = mem_start; 199/* FIXME: Check busy words, removed due to SPI bug */
231 wl->physical_reg_addr = reg_start; 200#if 0
201static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
202{
203 struct spi_transfer t[1];
204 struct spi_message m;
205 u32 *busy_buf;
206 int num_busy_bytes = 0;
232 207
233 wl->virtual_mem_addr = 0; 208 wl1271_info("spi read BUSY!");
234 wl->virtual_reg_addr = mem_size;
235 209
236 t.tx_buf = cmd; 210 /*
237 t.len = cmd_len; 211 * Look for the non-busy word in the read buffer, and if found,
238 spi_message_add_tail(&t, &m); 212 * read in the remaining data into the buffer.
213 */
214 busy_buf = (u32 *)buf;
215 for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
216 num_busy_bytes += sizeof(u32);
217 if (*busy_buf & 0x1) {
218 spi_message_init(&m);
219 memset(t, 0, sizeof(t));
220 memmove(buf, busy_buf, len - num_busy_bytes);
221 t[0].rx_buf = buf + (len - num_busy_bytes);
222 t[0].len = num_busy_bytes;
223 spi_message_add_tail(&t[0], &m);
224 spi_sync(wl->spi, &m);
225 return;
226 }
227 }
239 228
240 spi_sync(wl->spi, &m); 229 /*
230 * Read further busy words from SPI until a non-busy word is
231 * encountered, then read the data itself into the buffer.
232 */
233 wl1271_info("spi read BUSY-polling needed!");
241 234
242 kfree(cmd); 235 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
236 busy_buf = wl->buffer_busyword;
237 while (num_busy_bytes) {
238 num_busy_bytes--;
239 spi_message_init(&m);
240 memset(t, 0, sizeof(t));
241 t[0].rx_buf = busy_buf;
242 t[0].len = sizeof(u32);
243 spi_message_add_tail(&t[0], &m);
244 spi_sync(wl->spi, &m);
245
246 if (*busy_buf & 0x1) {
247 spi_message_init(&m);
248 memset(t, 0, sizeof(t));
249 t[0].rx_buf = buf;
250 t[0].len = len;
251 spi_message_add_tail(&t[0], &m);
252 spi_sync(wl->spi, &m);
253 return;
254 }
255 }
243 256
244 return 0; 257 /* The SPI bus is unresponsive, the read failed. */
258 memset(buf, 0, len);
259 wl1271_error("SPI read busy-word timeout!\n");
245} 260}
261#endif
246 262
247void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, 263void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
248 size_t len, bool fixed) 264 size_t len, bool fixed)
249{ 265{
250 struct spi_transfer t[3]; 266 struct spi_transfer t[3];
251 struct spi_message m; 267 struct spi_message m;
252 u8 *busy_buf; 268 u32 *busy_buf;
253 u32 *cmd; 269 u32 *cmd;
254 270
255 cmd = &wl->buffer_cmd; 271 cmd = &wl->buffer_cmd;
@@ -281,14 +297,16 @@ void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
281 297
282 spi_sync(wl->spi, &m); 298 spi_sync(wl->spi, &m);
283 299
284 /* FIXME: check busy words */ 300 /* FIXME: Check busy words, removed due to SPI bug */
301 /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
302 wl1271_spi_read_busy(wl, buf, len); */
285 303
286 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); 304 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
287 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); 305 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
288} 306}
289 307
290void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, 308void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
291 size_t len, bool fixed) 309 size_t len, bool fixed)
292{ 310{
293 struct spi_transfer t[2]; 311 struct spi_transfer t[2];
294 struct spi_message m; 312 struct spi_message m;
@@ -321,62 +339,77 @@ void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
321 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 339 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
322} 340}
323 341
324void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, 342void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
325 size_t len) 343 bool fixed)
326{ 344{
327 int physical; 345 int physical;
328 346
329 physical = wl1271_translate_mem_addr(wl, addr); 347 physical = wl1271_translate_addr(wl, addr);
330 348
331 wl1271_spi_read(wl, physical, buf, len, false); 349 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
332} 350}
333 351
334void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, 352void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
335 size_t len) 353 bool fixed)
336{ 354{
337 int physical; 355 int physical;
338 356
339 physical = wl1271_translate_mem_addr(wl, addr); 357 physical = wl1271_translate_addr(wl, addr);
340 358
341 wl1271_spi_write(wl, physical, buf, len, false); 359 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
342} 360}
343 361
344void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, 362u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
345 bool fixed)
346{ 363{
347 int physical; 364 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
348 365}
349 physical = wl1271_translate_reg_addr(wl, addr);
350 366
351 wl1271_spi_read(wl, physical, buf, len, fixed); 367void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
368{
369 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
352} 370}
353 371
354void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len, 372void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
355 bool fixed)
356{ 373{
357 int physical; 374 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
375 addr = (addr >> 1) + 0x30000;
376 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
358 377
359 physical = wl1271_translate_reg_addr(wl, addr); 378 /* write value to OCP_POR_WDATA */
379 wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
360 380
361 wl1271_spi_write(wl, physical, buf, len, fixed); 381 /* write 1 to OCP_CMD */
382 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
362} 383}
363 384
364u32 wl1271_mem_read32(struct wl1271 *wl, int addr) 385u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
365{ 386{
366 return wl1271_read32(wl, wl1271_translate_mem_addr(wl, addr)); 387 u32 val;
367} 388 int timeout = OCP_CMD_LOOP;
368 389
369void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val) 390 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
370{ 391 addr = (addr >> 1) + 0x30000;
371 wl1271_write32(wl, wl1271_translate_mem_addr(wl, addr), val); 392 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
372}
373 393
374u32 wl1271_reg_read32(struct wl1271 *wl, int addr) 394 /* write 2 to OCP_CMD */
375{ 395 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
376 return wl1271_read32(wl, wl1271_translate_reg_addr(wl, addr));
377}
378 396
379void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val) 397 /* poll for data ready */
380{ 398 do {
381 wl1271_write32(wl, wl1271_translate_reg_addr(wl, addr), val); 399 val = wl1271_spi_read32(wl, OCP_DATA_READ);
400 timeout--;
401 } while (!(val & OCP_READY_MASK) && timeout);
402
403 if (!timeout) {
404 wl1271_warning("Top register access timed out.");
405 return 0xffff;
406 }
407
408 /* check data status and return if OK */
409 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
410 return val & 0xffff;
411 else {
412 wl1271_warning("Top register access returned error.");
413 return 0xffff;
414 }
382} 415}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index 2c9968458646..cb7df1c56314 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -29,10 +29,14 @@
29 29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31 31
32#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0 32#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
33#define HW_ACCESS_PART0_START_ADDR 0x1FFC4 33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8 34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_ACCESS_PART1_START_ADDR 0x1FFCC 35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
36 40
37#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
38 42
@@ -67,47 +71,56 @@
67 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) 71 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
68#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 72#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
69 73
74#define OCP_CMD_LOOP 32
75
76#define OCP_CMD_WRITE 0x1
77#define OCP_CMD_READ 0x2
78
79#define OCP_READY_MASK BIT(18)
80#define OCP_STATUS_MASK (BIT(16) | BIT(17))
81
82#define OCP_STATUS_NO_RESP 0x00000
83#define OCP_STATUS_OK 0x10000
84#define OCP_STATUS_REQ_FAILED 0x20000
85#define OCP_STATUS_RESP_ERROR 0x30000
70 86
71/* Raw target IO, address is not translated */ 87/* Raw target IO, address is not translated */
72void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, 88void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
73 size_t len, bool fixed); 89 size_t len, bool fixed);
74void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, 90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
75 size_t len, bool fixed); 91 size_t len, bool fixed);
76 92
77/* Memory target IO, address is tranlated to partition 0 */ 93/* Translated target IO */
78void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, size_t len); 94void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
79void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, size_t len); 95 bool fixed);
80u32 wl1271_mem_read32(struct wl1271 *wl, int addr); 96void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
81void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val); 97 bool fixed);
98u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
99void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
82 100
83/* Registers IO */ 101/* Top Register IO */
84void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, 102void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
85 bool fixed); 103u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
86void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
87 bool fixed);
88u32 wl1271_reg_read32(struct wl1271 *wl, int addr);
89void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val);
90 104
91/* INIT and RESET words */ 105/* INIT and RESET words */
92void wl1271_spi_reset(struct wl1271 *wl); 106void wl1271_spi_reset(struct wl1271 *wl);
93void wl1271_spi_init(struct wl1271 *wl); 107void wl1271_spi_init(struct wl1271 *wl);
94int wl1271_set_partition(struct wl1271 *wl, 108int wl1271_set_partition(struct wl1271 *wl,
95 u32 part_start, u32 part_size, 109 struct wl1271_partition_set *p);
96 u32 reg_start, u32 reg_size);
97 110
98static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 111static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
99{ 112{
100 wl1271_spi_read(wl, addr, &wl->buffer_32, 113 wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
101 sizeof(wl->buffer_32), false); 114 sizeof(wl->buffer_32), false);
102 115
103 return wl->buffer_32; 116 return wl->buffer_32;
104} 117}
105 118
106static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 119static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
107{ 120{
108 wl->buffer_32 = val; 121 wl->buffer_32 = val;
109 wl1271_spi_write(wl, addr, &wl->buffer_32, 122 wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
110 sizeof(wl->buffer_32), false); 123 sizeof(wl->buffer_32), false);
111} 124}
112 125
113#endif /* __WL1271_SPI_H__ */ 126#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index ff221258b941..00af065c77c2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -33,8 +33,7 @@
33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) 33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 34{
35 int i; 35 int i;
36 36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
37 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
38 if (wl->tx_frames[i] == NULL) { 37 if (wl->tx_frames[i] == NULL) {
39 wl->tx_frames[i] = skb; 38 wl->tx_frames[i] = skb;
40 return i; 39 return i;
@@ -58,8 +57,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
58 /* approximate the number of blocks required for this packet 57 /* approximate the number of blocks required for this packet
59 in the firmware */ 58 in the firmware */
60 /* FIXME: try to figure out what is done here and make it cleaner */ 59 /* FIXME: try to figure out what is done here and make it cleaner */
61 total_blocks = (skb->len) >> TX_HW_BLOCK_SHIFT_DIV; 60 total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
62 excluded = (total_blocks << 2) + (skb->len & 0xff) + 34; 61 excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
63 total_blocks += (excluded > 252) ? 2 : 1; 62 total_blocks += (excluded > 252) ? 2 : 1;
64 total_blocks += TX_HW_BLOCK_SPARE; 63 total_blocks += TX_HW_BLOCK_SPARE;
65 64
@@ -89,15 +88,25 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
89{ 88{
90 struct wl1271_tx_hw_descr *desc; 89 struct wl1271_tx_hw_descr *desc;
91 int pad; 90 int pad;
91 u16 tx_attr;
92 92
93 desc = (struct wl1271_tx_hw_descr *) skb->data; 93 desc = (struct wl1271_tx_hw_descr *) skb->data;
94 94
95 /* relocate space for security header */
96 if (extra) {
97 void *framestart = skb->data + sizeof(*desc);
98 u16 fc = *(u16 *)(framestart + extra);
99 int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
100 memmove(framestart, framestart + extra, hdrlen);
101 }
102
95 /* configure packet life time */ 103 /* configure packet life time */
96 desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset; 104 desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
97 desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU; 105 wl->time_offset);
106 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
98 107
99 /* configure the tx attributes */ 108 /* configure the tx attributes */
100 desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 109 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
101 /* FIXME: do we know the packet priority? can we identify mgmt 110 /* FIXME: do we know the packet priority? can we identify mgmt
102 packets, and use max prio for them at least? */ 111 packets, and use max prio for them at least? */
103 desc->tid = 0; 112 desc->tid = 0;
@@ -106,11 +115,13 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
106 115
107 /* align the length (and store in terms of words) */ 116 /* align the length (and store in terms of words) */
108 pad = WL1271_TX_ALIGN(skb->len); 117 pad = WL1271_TX_ALIGN(skb->len);
109 desc->length = pad >> 2; 118 desc->length = cpu_to_le16(pad >> 2);
110 119
111 /* calculate number of padding bytes */ 120 /* calculate number of padding bytes */
112 pad = pad - skb->len; 121 pad = pad - skb->len;
113 desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
123
124 desc->tx_attr = cpu_to_le16(tx_attr);
114 125
115 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 126 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
116 return 0; 127 return 0;
@@ -147,11 +158,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
147 len = WL1271_TX_ALIGN(skb->len); 158 len = WL1271_TX_ALIGN(skb->len);
148 159
149 /* perform a fixed address block write with the packet */ 160 /* perform a fixed address block write with the packet */
150 wl1271_spi_reg_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true); 161 wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
151 162
152 /* write packet new counter into the write access register */ 163 /* write packet new counter into the write access register */
153 wl->tx_packets_count++; 164 wl->tx_packets_count++;
154 wl1271_reg_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 165 wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
155 166
156 desc = (struct wl1271_tx_hw_descr *) skb->data; 167 desc = (struct wl1271_tx_hw_descr *) skb->data;
157 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", 168 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -254,14 +265,13 @@ out:
254static void wl1271_tx_complete_packet(struct wl1271 *wl, 265static void wl1271_tx_complete_packet(struct wl1271 *wl,
255 struct wl1271_tx_hw_res_descr *result) 266 struct wl1271_tx_hw_res_descr *result)
256{ 267{
257
258 struct ieee80211_tx_info *info; 268 struct ieee80211_tx_info *info;
259 struct sk_buff *skb; 269 struct sk_buff *skb;
260 u32 header_len; 270 u16 seq;
261 int id = result->id; 271 int id = result->id;
262 272
263 /* check for id legality */ 273 /* check for id legality */
264 if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) { 274 if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
265 wl1271_warning("TX result illegal id: %d", id); 275 wl1271_warning("TX result illegal id: %d", id);
266 return; 276 return;
267 } 277 }
@@ -284,22 +294,32 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
284 /* info->status.retry_count = result->ack_failures; */ 294 /* info->status.retry_count = result->ack_failures; */
285 wl->stats.retry_count += result->ack_failures; 295 wl->stats.retry_count += result->ack_failures;
286 296
287 /* get header len */ 297 /* update security sequence number */
298 seq = wl->tx_security_seq_16 +
299 (result->lsb_security_sequence_number -
300 wl->tx_security_last_seq);
301 wl->tx_security_last_seq = result->lsb_security_sequence_number;
302
303 if (seq < wl->tx_security_seq_16)
304 wl->tx_security_seq_32++;
305 wl->tx_security_seq_16 = seq;
306
307 /* remove private header from packet */
308 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
309
310 /* remove TKIP header space if present */
288 if (info->control.hw_key && 311 if (info->control.hw_key &&
289 info->control.hw_key->alg == ALG_TKIP) 312 info->control.hw_key->alg == ALG_TKIP) {
290 header_len = WL1271_TKIP_IV_SPACE + 313 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
291 sizeof(struct wl1271_tx_hw_descr); 314 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
292 else 315 skb_pull(skb, WL1271_TKIP_IV_SPACE);
293 header_len = sizeof(struct wl1271_tx_hw_descr); 316 }
294 317
295 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 318 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
296 " status 0x%x", 319 " status 0x%x",
297 result->id, skb, result->ack_failures, 320 result->id, skb, result->ack_failures,
298 result->rate_class_index, result->status); 321 result->rate_class_index, result->status);
299 322
300 /* remove private header from packet */
301 skb_pull(skb, header_len);
302
303 /* return the packet to the stack */ 323 /* return the packet to the stack */
304 ieee80211_tx_status(wl->hw, skb); 324 ieee80211_tx_status(wl->hw, skb);
305 wl->tx_frames[result->id] = NULL; 325 wl->tx_frames[result->id] = NULL;
@@ -315,8 +335,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
315 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 335 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
316 336
317 /* read the tx results from the chipset */ 337 /* read the tx results from the chipset */
318 wl1271_spi_mem_read(wl, memmap->tx_result, 338 wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
319 wl->tx_res_if, sizeof(*wl->tx_res_if)); 339 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
320 340
321 /* verify that the result buffer is not getting overrun */ 341 /* verify that the result buffer is not getting overrun */
322 if (count > TX_HW_RESULT_QUEUE_LEN) { 342 if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -337,10 +357,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
337 } 357 }
338 358
339 /* write host counter to chipset (to ack) */ 359 /* write host counter to chipset (to ack) */
340 wl1271_mem_write32(wl, memmap->tx_result + 360 wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
341 offsetof(struct wl1271_tx_hw_res_if, 361 offsetof(struct wl1271_tx_hw_res_if,
342 tx_result_host_counter), 362 tx_result_host_counter),
343 wl->tx_res_if->tx_result_fw_counter); 363 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
344} 364}
345 365
346/* caller must hold wl->mutex */ 366/* caller must hold wl->mutex */
@@ -364,7 +384,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
364 ieee80211_tx_status(wl->hw, skb); 384 ieee80211_tx_status(wl->hw, skb);
365 } 385 }
366 386
367 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) 387 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
368 if (wl->tx_frames[i] != NULL) { 388 if (wl->tx_frames[i] != NULL) {
369 skb = wl->tx_frames[i]; 389 skb = wl->tx_frames[i];
370 info = IEEE80211_SKB_CB(skb); 390 info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 4a614067ddba..416396caf0a0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -58,7 +58,7 @@
58 58
59struct wl1271_tx_hw_descr { 59struct wl1271_tx_hw_descr {
60 /* Length of packet in words, including descriptor+header+data */ 60 /* Length of packet in words, including descriptor+header+data */
61 u16 length; 61 __le16 length;
62 /* Number of extra memory blocks to allocate for this packet in 62 /* Number of extra memory blocks to allocate for this packet in
63 addition to the number of blocks derived from the packet length */ 63 addition to the number of blocks derived from the packet length */
64 u8 extra_mem_blocks; 64 u8 extra_mem_blocks;
@@ -67,12 +67,12 @@ struct wl1271_tx_hw_descr {
67 HW!! */ 67 HW!! */
68 u8 total_mem_blocks; 68 u8 total_mem_blocks;
69 /* Device time (in us) when the packet arrived to the driver */ 69 /* Device time (in us) when the packet arrived to the driver */
70 u32 start_time; 70 __le32 start_time;
71 /* Max delay in TUs until transmission. The last device time the 71 /* Max delay in TUs until transmission. The last device time the
72 packet can be transmitted is: startTime+(1024*LifeTime) */ 72 packet can be transmitted is: startTime+(1024*LifeTime) */
73 u16 life_time; 73 __le16 life_time;
74 /* Bitwise fields - see TX_ATTR... definitions above. */ 74 /* Bitwise fields - see TX_ATTR... definitions above. */
75 u16 tx_attr; 75 __le16 tx_attr;
76 /* Packet identifier used also in the Tx-Result. */ 76 /* Packet identifier used also in the Tx-Result. */
77 u8 id; 77 u8 id;
78 /* The packet TID value (as User-Priority) */ 78 /* The packet TID value (as User-Priority) */
@@ -100,12 +100,12 @@ struct wl1271_tx_hw_res_descr {
100 several possible reasons for failure. */ 100 several possible reasons for failure. */
101 u8 status; 101 u8 status;
102 /* Total air access duration including all retrys and overheads.*/ 102 /* Total air access duration including all retrys and overheads.*/
103 u16 medium_usage; 103 __le16 medium_usage;
104 /* The time passed from host xfer to Tx-complete.*/ 104 /* The time passed from host xfer to Tx-complete.*/
105 u32 fw_handling_time; 105 __le32 fw_handling_time;
106 /* Total media delay 106 /* Total media delay
107 (from 1st EDCA AIFS counter until TX Complete). */ 107 (from 1st EDCA AIFS counter until TX Complete). */
108 u32 medium_delay; 108 __le32 medium_delay;
109 /* LS-byte of last TKIP seq-num (saved per AC for recovery). */ 109 /* LS-byte of last TKIP seq-num (saved per AC for recovery). */
110 u8 lsb_security_sequence_number; 110 u8 lsb_security_sequence_number;
111 /* Retry count - number of transmissions without successful ACK.*/ 111 /* Retry count - number of transmissions without successful ACK.*/
@@ -118,8 +118,8 @@ struct wl1271_tx_hw_res_descr {
118} __attribute__ ((packed)); 118} __attribute__ ((packed));
119 119
120struct wl1271_tx_hw_res_if { 120struct wl1271_tx_hw_res_if {
121 u32 tx_result_fw_counter; 121 __le32 tx_result_fw_counter;
122 u32 tx_result_host_counter; 122 __le32 tx_result_host_counter;
123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
124} __attribute__ ((packed)); 124} __attribute__ ((packed));
125 125
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 657c2dbcb7d3..055d7bc6f592 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -122,8 +122,8 @@ struct wl12xx_null_data_template {
122} __attribute__ ((packed)); 122} __attribute__ ((packed));
123 123
124struct wl12xx_ps_poll_template { 124struct wl12xx_ps_poll_template {
125 u16 fc; 125 __le16 fc;
126 u16 aid; 126 __le16 aid;
127 u8 bssid[ETH_ALEN]; 127 u8 bssid[ETH_ALEN];
128 u8 ta[ETH_ALEN]; 128 u8 ta[ETH_ALEN];
129} __attribute__ ((packed)); 129} __attribute__ ((packed));
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 4f1e0cfe609b..891bdab49887 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -381,7 +381,7 @@ static void wl3501_free_tx_buffer(struct wl3501_card *this, u16 ptr)
381 381
382static int wl3501_esbq_req_test(struct wl3501_card *this) 382static int wl3501_esbq_req_test(struct wl3501_card *this)
383{ 383{
384 u8 tmp; 384 u8 tmp = 0;
385 385
386 wl3501_get_from_wla(this, this->esbq_req_head + 3, &tmp, sizeof(tmp)); 386 wl3501_get_from_wla(this, this->esbq_req_head + 3, &tmp, sizeof(tmp));
387 return tmp & 0x80; 387 return tmp & 0x80;
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index bc81974a2bc7..33c8be7ec8e6 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -112,6 +112,9 @@ exit:
112 return err; 112 return err;
113} 113}
114 114
115MODULE_FIRMWARE("zd1201-ap.fw");
116MODULE_FIRMWARE("zd1201.fw");
117
115static void zd1201_usbfree(struct urb *urb) 118static void zd1201_usbfree(struct urb *urb)
116{ 119{
117 struct zd1201 *zd = urb->context; 120 struct zd1201 *zd = urb->context;
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
index 74b31eafe72d..5f809695f71a 100644
--- a/drivers/net/wireless/zd1211rw/Kconfig
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -1,6 +1,6 @@
1config ZD1211RW 1config ZD1211RW
2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" 2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
3 depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on USB && MAC80211 && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless 6 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 4e79a9800134..dfa1b9bc22c8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -755,7 +755,7 @@ static int hw_reset_phy(struct zd_chip *chip)
755static int zd1211_hw_init_hmac(struct zd_chip *chip) 755static int zd1211_hw_init_hmac(struct zd_chip *chip)
756{ 756{
757 static const struct zd_ioreq32 ioreqs[] = { 757 static const struct zd_ioreq32 ioreqs[] = {
758 { CR_ZD1211_RETRY_MAX, 0x2 }, 758 { CR_ZD1211_RETRY_MAX, ZD1211_RETRY_COUNT },
759 { CR_RX_THRESHOLD, 0x000c0640 }, 759 { CR_RX_THRESHOLD, 0x000c0640 },
760 }; 760 };
761 761
@@ -767,7 +767,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
767static int zd1211b_hw_init_hmac(struct zd_chip *chip) 767static int zd1211b_hw_init_hmac(struct zd_chip *chip)
768{ 768{
769 static const struct zd_ioreq32 ioreqs[] = { 769 static const struct zd_ioreq32 ioreqs[] = {
770 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 770 { CR_ZD1211B_RETRY_MAX, ZD1211B_RETRY_COUNT },
771 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f }, 771 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f },
772 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f }, 772 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f },
773 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f }, 773 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 678c139a840c..9fd8f3508d66 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -642,13 +642,29 @@ enum {
642#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 642#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
643#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 643#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
644 644
645/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2,
646 * we use 0. The first rate is tried (count+2), then all next rates are tried
647 * twice, until 1 Mbits is tried. */
648#define ZD1211_RETRY_COUNT 0
649#define ZD1211B_RETRY_COUNT \
650 (ZD1211_RETRY_COUNT << 0)| \
651 (ZD1211_RETRY_COUNT << 8)| \
652 (ZD1211_RETRY_COUNT << 16)| \
653 (ZD1211_RETRY_COUNT << 24)
654
645/* Used to detect PLL lock */ 655/* Used to detect PLL lock */
646#define UW2453_INTR_REG ((zd_addr_t)0x85c1) 656#define UW2453_INTR_REG ((zd_addr_t)0x85c1)
647 657
648#define CWIN_SIZE 0x007f043f 658#define CWIN_SIZE 0x007f043f
649 659
650 660
651#define HWINT_ENABLED 0x004f0000 661#define HWINT_ENABLED \
662 (INT_TX_COMPLETE_EN| \
663 INT_RX_COMPLETE_EN| \
664 INT_RETRY_FAIL_EN| \
665 INT_WAKEUP_EN| \
666 INT_CFG_NEXT_BCN_EN)
667
652#define HWINT_DISABLED 0 668#define HWINT_DISABLED 0
653 669
654#define E2P_PWR_INT_GUARD 8 670#define E2P_PWR_INT_GUARD 8
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6d666359a42f..8a243732c519 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -88,6 +88,34 @@ static const struct ieee80211_rate zd_rates[] = {
88 .flags = 0 }, 88 .flags = 0 },
89}; 89};
90 90
91/*
92 * Zydas retry rates table. Each line is listed in the same order as
93 * in zd_rates[] and contains all the rate used when a packet is sent
94 * starting with a given rates. Let's consider an example :
95 *
96 * "11 Mbits : 4, 3, 2, 1, 0" means :
97 * - packet is sent using 4 different rates
98 * - 1st rate is index 3 (ie 11 Mbits)
99 * - 2nd rate is index 2 (ie 5.5 Mbits)
100 * - 3rd rate is index 1 (ie 2 Mbits)
101 * - 4th rate is index 0 (ie 1 Mbits)
102 */
103
104static const struct tx_retry_rate zd_retry_rates[] = {
105 { /* 1 Mbits */ 1, { 0 }},
106 { /* 2 Mbits */ 2, { 1, 0 }},
107 { /* 5.5 Mbits */ 3, { 2, 1, 0 }},
108 { /* 11 Mbits */ 4, { 3, 2, 1, 0 }},
109 { /* 6 Mbits */ 5, { 4, 3, 2, 1, 0 }},
110 { /* 9 Mbits */ 6, { 5, 4, 3, 2, 1, 0}},
111 { /* 12 Mbits */ 5, { 6, 3, 2, 1, 0 }},
112 { /* 18 Mbits */ 6, { 7, 6, 3, 2, 1, 0 }},
113 { /* 24 Mbits */ 6, { 8, 6, 3, 2, 1, 0 }},
114 { /* 36 Mbits */ 7, { 9, 8, 6, 3, 2, 1, 0 }},
115 { /* 48 Mbits */ 8, {10, 9, 8, 6, 3, 2, 1, 0 }},
116 { /* 54 Mbits */ 9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
117};
118
91static const struct ieee80211_channel zd_channels[] = { 119static const struct ieee80211_channel zd_channels[] = {
92 { .center_freq = 2412, .hw_value = 1 }, 120 { .center_freq = 2412, .hw_value = 1 },
93 { .center_freq = 2417, .hw_value = 2 }, 121 { .center_freq = 2417, .hw_value = 2 },
@@ -282,7 +310,7 @@ static void zd_op_stop(struct ieee80211_hw *hw)
282} 310}
283 311
284/** 312/**
285 * tx_status - reports tx status of a packet if required 313 * zd_mac_tx_status - reports tx status of a packet if required
286 * @hw - a &struct ieee80211_hw pointer 314 * @hw - a &struct ieee80211_hw pointer
287 * @skb - a sk-buffer 315 * @skb - a sk-buffer
288 * @flags: extra flags to set in the TX status info 316 * @flags: extra flags to set in the TX status info
@@ -295,15 +323,49 @@ static void zd_op_stop(struct ieee80211_hw *hw)
295 * 323 *
296 * If no status information has been requested, the skb is freed. 324 * If no status information has been requested, the skb is freed.
297 */ 325 */
298static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 326static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
299 int ackssi, bool success) 327 int ackssi, struct tx_status *tx_status)
300{ 328{
301 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 329 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
330 int i;
331 int success = 1, retry = 1;
332 int first_idx;
333 const struct tx_retry_rate *retries;
302 334
303 ieee80211_tx_info_clear_status(info); 335 ieee80211_tx_info_clear_status(info);
304 336
305 if (success) 337 if (tx_status) {
338 success = !tx_status->failure;
339 retry = tx_status->retry + success;
340 }
341
342 if (success) {
343 /* success */
306 info->flags |= IEEE80211_TX_STAT_ACK; 344 info->flags |= IEEE80211_TX_STAT_ACK;
345 } else {
346 /* failure */
347 info->flags &= ~IEEE80211_TX_STAT_ACK;
348 }
349
350 first_idx = info->status.rates[0].idx;
351 ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
352 retries = &zd_retry_rates[first_idx];
353 ZD_ASSERT(0<=retry && retry<=retries->count);
354
355 info->status.rates[0].idx = retries->rate[0];
356 info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
357
358 for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
359 info->status.rates[i].idx = retries->rate[i];
360 info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
361 }
362 for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
363 info->status.rates[i].idx = retries->rate[retry-1];
364 info->status.rates[i].count = 1; // (success ? 1:2);
365 }
366 if (i<IEEE80211_TX_MAX_RATES)
367 info->status.rates[i].idx = -1; /* terminate */
368
307 info->status.ack_signal = ackssi; 369 info->status.ack_signal = ackssi;
308 ieee80211_tx_status_irqsafe(hw, skb); 370 ieee80211_tx_status_irqsafe(hw, skb);
309} 371}
@@ -316,16 +378,79 @@ static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
316 * transferred. The first frame from the tx queue, will be selected and 378 * transferred. The first frame from the tx queue, will be selected and
317 * reported as error to the upper layers. 379 * reported as error to the upper layers.
318 */ 380 */
319void zd_mac_tx_failed(struct ieee80211_hw *hw) 381void zd_mac_tx_failed(struct urb *urb)
320{ 382{
321 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 383 struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
384 struct zd_mac *mac = zd_hw_mac(hw);
385 struct sk_buff_head *q = &mac->ack_wait_queue;
322 struct sk_buff *skb; 386 struct sk_buff *skb;
387 struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
388 unsigned long flags;
389 int success = !tx_status->failure;
390 int retry = tx_status->retry + success;
391 int found = 0;
392 int i, position = 0;
323 393
324 skb = skb_dequeue(q); 394 q = &mac->ack_wait_queue;
325 if (skb == NULL) 395 spin_lock_irqsave(&q->lock, flags);
326 return; 396
397 skb_queue_walk(q, skb) {
398 struct ieee80211_hdr *tx_hdr;
399 struct ieee80211_tx_info *info;
400 int first_idx, final_idx;
401 const struct tx_retry_rate *retries;
402 u8 final_rate;
403
404 position ++;
405
406 /* if the hardware reports a failure and we had a 802.11 ACK
407 * pending, then we skip the first skb when searching for a
408 * matching frame */
409 if (tx_status->failure && mac->ack_pending &&
410 skb_queue_is_first(q, skb)) {
411 continue;
412 }
413
414 tx_hdr = (struct ieee80211_hdr *)skb->data;
415
416 /* we skip all frames not matching the reported destination */
417 if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
418 continue;
419 }
420
421 /* we skip all frames not matching the reported final rate */
327 422
328 tx_status(hw, skb, 0, 0); 423 info = IEEE80211_SKB_CB(skb);
424 first_idx = info->status.rates[0].idx;
425 ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
426 retries = &zd_retry_rates[first_idx];
427 if (retry < 0 || retry > retries->count) {
428 continue;
429 }
430
431 ZD_ASSERT(0<=retry && retry<=retries->count);
432 final_idx = retries->rate[retry-1];
433 final_rate = zd_rates[final_idx].hw_value;
434
435 if (final_rate != tx_status->rate) {
436 continue;
437 }
438
439 found = 1;
440 break;
441 }
442
443 if (found) {
444 for (i=1; i<=position; i++) {
445 skb = __skb_dequeue(q);
446 zd_mac_tx_status(hw, skb,
447 mac->ack_pending ? mac->ack_signal : 0,
448 i == position ? tx_status : NULL);
449 mac->ack_pending = 0;
450 }
451 }
452
453 spin_unlock_irqrestore(&q->lock, flags);
329} 454}
330 455
331/** 456/**
@@ -342,18 +467,27 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
342{ 467{
343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
344 struct ieee80211_hw *hw = info->rate_driver_data[0]; 469 struct ieee80211_hw *hw = info->rate_driver_data[0];
470 struct zd_mac *mac = zd_hw_mac(hw);
471
472 ieee80211_tx_info_clear_status(info);
345 473
346 skb_pull(skb, sizeof(struct zd_ctrlset)); 474 skb_pull(skb, sizeof(struct zd_ctrlset));
347 if (unlikely(error || 475 if (unlikely(error ||
348 (info->flags & IEEE80211_TX_CTL_NO_ACK))) { 476 (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
349 tx_status(hw, skb, 0, !error); 477 /*
478 * FIXME : do we need to fill in anything ?
479 */
480 ieee80211_tx_status_irqsafe(hw, skb);
350 } else { 481 } else {
351 struct sk_buff_head *q = 482 struct sk_buff_head *q = &mac->ack_wait_queue;
352 &zd_hw_mac(hw)->ack_wait_queue;
353 483
354 skb_queue_tail(q, skb); 484 skb_queue_tail(q, skb);
355 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) 485 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
356 zd_mac_tx_failed(hw); 486 zd_mac_tx_status(hw, skb_dequeue(q),
487 mac->ack_pending ? mac->ack_signal : 0,
488 NULL);
489 mac->ack_pending = 0;
490 }
357 } 491 }
358} 492}
359 493
@@ -606,27 +740,47 @@ fail:
606static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, 740static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
607 struct ieee80211_rx_status *stats) 741 struct ieee80211_rx_status *stats)
608{ 742{
743 struct zd_mac *mac = zd_hw_mac(hw);
609 struct sk_buff *skb; 744 struct sk_buff *skb;
610 struct sk_buff_head *q; 745 struct sk_buff_head *q;
611 unsigned long flags; 746 unsigned long flags;
747 int found = 0;
748 int i, position = 0;
612 749
613 if (!ieee80211_is_ack(rx_hdr->frame_control)) 750 if (!ieee80211_is_ack(rx_hdr->frame_control))
614 return 0; 751 return 0;
615 752
616 q = &zd_hw_mac(hw)->ack_wait_queue; 753 q = &mac->ack_wait_queue;
617 spin_lock_irqsave(&q->lock, flags); 754 spin_lock_irqsave(&q->lock, flags);
618 skb_queue_walk(q, skb) { 755 skb_queue_walk(q, skb) {
619 struct ieee80211_hdr *tx_hdr; 756 struct ieee80211_hdr *tx_hdr;
620 757
758 position ++;
759
760 if (mac->ack_pending && skb_queue_is_first(q, skb))
761 continue;
762
621 tx_hdr = (struct ieee80211_hdr *)skb->data; 763 tx_hdr = (struct ieee80211_hdr *)skb->data;
622 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) 764 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
623 { 765 {
624 __skb_unlink(skb, q); 766 found = 1;
625 tx_status(hw, skb, stats->signal, 1); 767 break;
626 goto out;
627 } 768 }
628 } 769 }
629out: 770
771 if (found) {
772 for (i=1; i<position; i++) {
773 skb = __skb_dequeue(q);
774 zd_mac_tx_status(hw, skb,
775 mac->ack_pending ? mac->ack_signal : 0,
776 NULL);
777 mac->ack_pending = 0;
778 }
779
780 mac->ack_pending = 1;
781 mac->ack_signal = stats->signal;
782 }
783
630 spin_unlock_irqrestore(&q->lock, flags); 784 spin_unlock_irqrestore(&q->lock, flags);
631 return 1; 785 return 1;
632} 786}
@@ -709,6 +863,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
709 skb_reserve(skb, 2); 863 skb_reserve(skb, 2);
710 } 864 }
711 865
866 /* FIXME : could we avoid this big memcpy ? */
712 memcpy(skb_put(skb, length), buffer, length); 867 memcpy(skb_put(skb, length), buffer, length);
713 868
714 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); 869 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
@@ -999,7 +1154,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
999 hw->queues = 1; 1154 hw->queues = 1;
1000 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 1155 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
1001 1156
1157 /*
1158 * Tell mac80211 that we support multi rate retries
1159 */
1160 hw->max_rates = IEEE80211_TX_MAX_RATES;
1161 hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
1162
1002 skb_queue_head_init(&mac->ack_wait_queue); 1163 skb_queue_head_init(&mac->ack_wait_queue);
1164 mac->ack_pending = 0;
1003 1165
1004 zd_chip_init(&mac->chip, hw, intf); 1166 zd_chip_init(&mac->chip, hw, intf);
1005 housekeeping_init(mac); 1167 housekeeping_init(mac);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 7c2759118d13..630c298a730e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -140,6 +140,21 @@ struct rx_status {
140#define ZD_RX_CRC16_ERROR 0x40 140#define ZD_RX_CRC16_ERROR 0x40
141#define ZD_RX_ERROR 0x80 141#define ZD_RX_ERROR 0x80
142 142
143struct tx_retry_rate {
144 int count; /* number of valid element in rate[] array */
145 int rate[10]; /* retry rates, described by an index in zd_rates[] */
146};
147
148struct tx_status {
149 u8 type; /* must always be 0x01 : USB_INT_TYPE */
150 u8 id; /* must always be 0xa0 : USB_INT_ID_RETRY_FAILED */
151 u8 rate;
152 u8 pad;
153 u8 mac[ETH_ALEN];
154 u8 retry;
155 u8 failure;
156} __attribute__((packed));
157
143enum mac_flags { 158enum mac_flags {
144 MAC_FIXED_CHANNEL = 0x01, 159 MAC_FIXED_CHANNEL = 0x01,
145}; 160};
@@ -150,7 +165,7 @@ struct housekeeping {
150 165
151#define ZD_MAC_STATS_BUFFER_SIZE 16 166#define ZD_MAC_STATS_BUFFER_SIZE 16
152 167
153#define ZD_MAC_MAX_ACK_WAITERS 10 168#define ZD_MAC_MAX_ACK_WAITERS 50
154 169
155struct zd_mac { 170struct zd_mac {
156 struct zd_chip chip; 171 struct zd_chip chip;
@@ -184,6 +199,12 @@ struct zd_mac {
184 199
185 /* whether to pass control frames to stack */ 200 /* whether to pass control frames to stack */
186 unsigned int pass_ctrl:1; 201 unsigned int pass_ctrl:1;
202
203 /* whether we have received a 802.11 ACK that is pending */
204 unsigned int ack_pending:1;
205
206 /* signal strength of the last 802.11 ACK received */
207 int ack_signal;
187}; 208};
188 209
189#define ZD_REGDOMAIN_FCC 0x10 210#define ZD_REGDOMAIN_FCC 0x10
@@ -279,7 +300,7 @@ int zd_mac_preinit_hw(struct ieee80211_hw *hw);
279int zd_mac_init_hw(struct ieee80211_hw *hw); 300int zd_mac_init_hw(struct ieee80211_hw *hw);
280 301
281int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length); 302int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
282void zd_mac_tx_failed(struct ieee80211_hw *hw); 303void zd_mac_tx_failed(struct urb *urb);
283void zd_mac_tx_to_dev(struct sk_buff *skb, int error); 304void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
284 305
285#ifdef DEBUG 306#ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 23a6a6d4863b..ac19ecd19cfe 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -318,6 +318,13 @@ error:
318 return r; 318 return r;
319} 319}
320 320
321MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ur");
322MODULE_FIRMWARE(FW_ZD1211_PREFIX "ur");
323MODULE_FIRMWARE(FW_ZD1211B_PREFIX "ub");
324MODULE_FIRMWARE(FW_ZD1211_PREFIX "ub");
325MODULE_FIRMWARE(FW_ZD1211B_PREFIX "uphr");
326MODULE_FIRMWARE(FW_ZD1211_PREFIX "uphr");
327
321/* Read data from device address space using "firmware interface" which does 328/* Read data from device address space using "firmware interface" which does
322 * not require firmware to be loaded. */ 329 * not require firmware to be loaded. */
323int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len) 330int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len)
@@ -419,7 +426,7 @@ static void int_urb_complete(struct urb *urb)
419 handle_regs_int(urb); 426 handle_regs_int(urb);
420 break; 427 break;
421 case USB_INT_ID_RETRY_FAILED: 428 case USB_INT_ID_RETRY_FAILED:
422 zd_mac_tx_failed(zd_usb_to_hw(urb->context)); 429 zd_mac_tx_failed(urb);
423 break; 430 break;
424 default: 431 default:
425 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb, 432 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
@@ -553,6 +560,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
553 560
554 if (length < sizeof(struct rx_length_info)) { 561 if (length < sizeof(struct rx_length_info)) {
555 /* It's not a complete packet anyhow. */ 562 /* It's not a complete packet anyhow. */
563 printk("%s: invalid, small RX packet : %d\n",
564 __func__, length);
556 return; 565 return;
557 } 566 }
558 length_info = (struct rx_length_info *) 567 length_info = (struct rx_length_info *)
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 83a044dbd1d7..8c777ba4e2b3 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -660,7 +660,7 @@ static int xemaclite_open(struct net_device *dev)
660 xemaclite_set_mac_address(lp, dev->dev_addr); 660 xemaclite_set_mac_address(lp, dev->dev_addr);
661 661
662 /* Grab the IRQ */ 662 /* Grab the IRQ */
663 retval = request_irq(dev->irq, &xemaclite_interrupt, 0, dev->name, dev); 663 retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
664 if (retval) { 664 if (retval) {
665 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n", 665 dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
666 dev->irq); 666 dev->irq);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
index 0c44135c0b1f..389ba9df7120 100644
--- a/drivers/net/xtsonic.c
+++ b/drivers/net/xtsonic.c
@@ -92,7 +92,7 @@ static unsigned short known_revisions[] =
92 92
93static int xtsonic_open(struct net_device *dev) 93static int xtsonic_open(struct net_device *dev)
94{ 94{
95 if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) { 95 if (request_irq(dev->irq,sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
96 printk(KERN_ERR "%s: unable to get IRQ %d.\n", 96 printk(KERN_ERR "%s: unable to get IRQ %d.\n",
97 dev->name, dev->irq); 97 dev->name, dev->irq);
98 return -EAGAIN; 98 return -EAGAIN;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 40ad0dee0406..d831dfca0976 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -579,7 +579,7 @@ static int yellowfin_open(struct net_device *dev)
579 /* Reset the chip. */ 579 /* Reset the chip. */
580 iowrite32(0x80000000, ioaddr + DMACtrl); 580 iowrite32(0x80000000, ioaddr + DMACtrl);
581 581
582 ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); 582 ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
583 if (ret) 583 if (ret)
584 return ret; 584 return ret;
585 585
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index b42347333750..a97d894d26fb 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -103,8 +103,7 @@
103#include <asm/io.h> 103#include <asm/io.h>
104#include <asm/dma.h> 104#include <asm/dma.h>
105 105
106/* This include could be elsewhere, since it is not wireless specific */ 106#include <linux/i82593.h>
107#include "wireless/i82593.h"
108 107
109static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n"; 108static char version[] __initdata = "znet.c:v1.02 9/23/94 becker@scyld.com\n";
110 109
@@ -170,7 +169,7 @@ static int znet_request_resources (struct net_device *dev)
170{ 169{
171 struct znet_private *znet = netdev_priv(dev); 170 struct znet_private *znet = netdev_priv(dev);
172 171
173 if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) 172 if (request_irq (dev->irq, znet_interrupt, 0, "ZNet", dev))
174 goto failed; 173 goto failed;
175 if (request_dma (znet->rx_dma, "ZNet rx")) 174 if (request_dma (znet->rx_dma, "ZNet rx"))
176 goto free_irq; 175 goto free_irq;
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 9581d3619450..79caf1ca4a29 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -352,11 +352,9 @@ static __inline__ int led_get_net_activity(void)
352 352
353 rx_total = tx_total = 0; 353 rx_total = tx_total = 0;
354 354
355 /* we are running as a workqueue task, so locking dev_base 355 /* we are running as a workqueue task, so we can use an RCU lookup */
356 * for reading should be OK */
357 read_lock(&dev_base_lock);
358 rcu_read_lock(); 356 rcu_read_lock();
359 for_each_netdev(&init_net, dev) { 357 for_each_netdev_rcu(&init_net, dev) {
360 const struct net_device_stats *stats; 358 const struct net_device_stats *stats;
361 struct in_device *in_dev = __in_dev_get_rcu(dev); 359 struct in_device *in_dev = __in_dev_get_rcu(dev);
362 if (!in_dev || !in_dev->ifa_list) 360 if (!in_dev || !in_dev->ifa_list)
@@ -368,7 +366,6 @@ static __inline__ int led_get_net_activity(void)
368 tx_total += stats->tx_packets; 366 tx_total += stats->tx_packets;
369 } 367 }
370 rcu_read_unlock(); 368 rcu_read_unlock();
371 read_unlock(&dev_base_lock);
372 369
373 retval = 0; 370 retval = 0;
374 371
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d08..6cab5a62f99e 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o
11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
12obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o 13qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb698..3c77bfe0764c 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/types.h> 91#include <linux/types.h>
92 92
93#include "cu3088.h"
94#include "claw.h" 93#include "claw.h"
95 94
96/* 95/*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM; 257 return -EPERM;
259} 258}
260 259
260/* the root device for claw group devices */
261static struct device *claw_root_dev;
262
261/* ccwgroup table */ 263/* ccwgroup table */
262 264
263static struct ccwgroup_driver claw_group_driver = { 265static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
272 .prepare = claw_pm_prepare, 274 .prepare = claw_pm_prepare,
273}; 275};
274 276
277static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
279 {},
280};
281MODULE_DEVICE_TABLE(ccw, claw_ids);
282
283static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
289};
290
291static ssize_t
292claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
294{
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 3, buf);
299 return err ? err : count;
300}
301
302static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
303
304static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
307};
308
309static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
311};
312
313static const struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
316};
317
275/* 318/*
276* Key functions 319* Key functions
277*/ 320*/
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
3326static void __exit 3369static void __exit
3327claw_cleanup(void) 3370claw_cleanup(void)
3328{ 3371{
3329 unregister_cu3088_discipline(&claw_group_driver); 3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3330 claw_unregister_debug_facility(); 3377 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n"); 3378 pr_info("Driver unloaded\n");
3332 3379
@@ -3348,16 +3395,31 @@ claw_init(void)
3348 if (ret) { 3395 if (ret) {
3349 pr_err("Registering with the S/390 debug feature" 3396 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret); 3397 " failed with error code %d\n", ret);
3351 return ret; 3398 goto out_err;
3352 } 3399 }
3353 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver); 3401 claw_root_dev = root_device_register("qeth");
3355 if (ret) { 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3356 CLAW_DBF_TEXT(2, setup, "init_bad"); 3403 if (ret)
3357 claw_unregister_debug_facility(); 3404 goto register_err;
3358 pr_err("Registering with the cu3088 device driver failed " 3405 ret = ccw_driver_register(&claw_ccw_driver);
3359 "with error code %d\n", ret); 3406 if (ret)
3360 } 3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3413
3414ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416ccw_err:
3417 root_device_unregister(claw_root_dev);
3418register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3361 return ret; 3423 return ret;
3362} 3424}
3363 3425
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d3..46d59a13db12 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
129 } \ 129 } \
130 } while (0) 130 } while (0)
131 131
132/**
133 * Enum for classifying detected devices.
134 */
135enum claw_channel_types {
136 /* Device is not a channel */
137 claw_channel_type_none,
138
139 /* Device is a CLAW channel device */
140 claw_channel_type_claw
141};
142
143
132/******************************************************* 144/*******************************************************
133* Define Control Blocks * 145* Define Control Blocks *
134* * 146* *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5ef..70eb7f138414 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
44#include <asm/idals.h> 44#include <asm/idals.h>
45 45
46#include "fsm.h" 46#include "fsm.h"
47#include "cu3088.h"
48 47
49#include "ctcm_dbug.h" 48#include "ctcm_dbug.h"
50#include "ctcm_main.h" 49#include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807a..046d077fabbb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
39#include <asm/idals.h> 39#include <asm/idals.h>
40 40
41#include "fsm.h" 41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h" 42#include "ctcm_main.h"
44 43
45/* 44/*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c5b83874500c..e35713dd0504 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
51 51
52#include <asm/idals.h> 52#include <asm/idals.h>
53 53
54#include "cu3088.h"
55#include "ctcm_fsms.h" 54#include "ctcm_fsms.h"
56#include "ctcm_main.h" 55#include "ctcm_main.h"
57 56
58/* Some common global variables */ 57/* Some common global variables */
59 58
59/**
60 * The root device for ctcm group devices
61 */
62static struct device *ctcm_root_dev;
63
60/* 64/*
61 * Linked list of all detected channels. 65 * Linked list of all detected channels.
62 */ 66 */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
246 * 250 *
247 * returns Pointer to a channel or NULL if no matching channel available. 251 * returns Pointer to a channel or NULL if no matching channel available.
248 */ 252 */
249static struct channel *channel_get(enum channel_types type, 253static struct channel *channel_get(enum ctcm_channel_types type,
250 char *id, int direction) 254 char *id, int direction)
251{ 255{
252 struct channel *ch = channels; 256 struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1342 * 1346 *
1343 * returns 0 on success, !0 on error. 1347 * returns 0 on success, !0 on error.
1344 */ 1348 */
1345static int add_channel(struct ccw_device *cdev, enum channel_types type, 1349static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1346 struct ctcm_priv *priv) 1350 struct ctcm_priv *priv)
1347{ 1351{
1348 struct channel **c = &channels; 1352 struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return: /* note that all channel pointers are 0 or valid */
1501/* 1505/*
1502 * Return type of a detected device. 1506 * Return type of a detected device.
1503 */ 1507 */
1504static enum channel_types get_channel_type(struct ccw_device_id *id) 1508static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1505{ 1509{
1506 enum channel_types type; 1510 enum ctcm_channel_types type;
1507 type = (enum channel_types)id->driver_info; 1511 type = (enum ctcm_channel_types)id->driver_info;
1508 1512
1509 if (type == channel_type_ficon) 1513 if (type == ctcm_channel_type_ficon)
1510 type = channel_type_escon; 1514 type = ctcm_channel_type_escon;
1511 1515
1512 return type; 1516 return type;
1513} 1517}
@@ -1525,16 +1529,21 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525 char read_id[CTCM_ID_SIZE]; 1529 char read_id[CTCM_ID_SIZE];
1526 char write_id[CTCM_ID_SIZE]; 1530 char write_id[CTCM_ID_SIZE];
1527 int direction; 1531 int direction;
1528 enum channel_types type; 1532 enum ctcm_channel_types type;
1529 struct ctcm_priv *priv; 1533 struct ctcm_priv *priv;
1530 struct net_device *dev; 1534 struct net_device *dev;
1531 struct ccw_device *cdev0; 1535 struct ccw_device *cdev0;
1532 struct ccw_device *cdev1; 1536 struct ccw_device *cdev1;
1537 struct channel *readc;
1538 struct channel *writec;
1533 int ret; 1539 int ret;
1540 int result;
1534 1541
1535 priv = dev_get_drvdata(&cgdev->dev); 1542 priv = dev_get_drvdata(&cgdev->dev);
1536 if (!priv) 1543 if (!priv) {
1537 return -ENODEV; 1544 result = -ENODEV;
1545 goto out_err_result;
1546 }
1538 1547
1539 cdev0 = cgdev->cdev[0]; 1548 cdev0 = cgdev->cdev[0];
1540 cdev1 = cgdev->cdev[1]; 1549 cdev1 = cgdev->cdev[1];
@@ -1545,31 +1554,40 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1545 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); 1554 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1546 1555
1547 ret = add_channel(cdev0, type, priv); 1556 ret = add_channel(cdev0, type, priv);
1548 if (ret) 1557 if (ret) {
1549 return ret; 1558 result = ret;
1559 goto out_err_result;
1560 }
1550 ret = add_channel(cdev1, type, priv); 1561 ret = add_channel(cdev1, type, priv);
1551 if (ret) 1562 if (ret) {
1552 return ret; 1563 result = ret;
1564 goto out_remove_channel1;
1565 }
1553 1566
1554 ret = ccw_device_set_online(cdev0); 1567 ret = ccw_device_set_online(cdev0);
1555 if (ret != 0) { 1568 if (ret != 0) {
1556 /* may be ok to fail now - can be done later */
1557 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1569 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1558 "%s(%s) set_online rc=%d", 1570 "%s(%s) set_online rc=%d",
1559 CTCM_FUNTAIL, read_id, ret); 1571 CTCM_FUNTAIL, read_id, ret);
1572 result = -EIO;
1573 goto out_remove_channel2;
1560 } 1574 }
1561 1575
1562 ret = ccw_device_set_online(cdev1); 1576 ret = ccw_device_set_online(cdev1);
1563 if (ret != 0) { 1577 if (ret != 0) {
1564 /* may be ok to fail now - can be done later */
1565 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1578 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1566 "%s(%s) set_online rc=%d", 1579 "%s(%s) set_online rc=%d",
1567 CTCM_FUNTAIL, write_id, ret); 1580 CTCM_FUNTAIL, write_id, ret);
1581
1582 result = -EIO;
1583 goto out_ccw1;
1568 } 1584 }
1569 1585
1570 dev = ctcm_init_netdevice(priv); 1586 dev = ctcm_init_netdevice(priv);
1571 if (dev == NULL) 1587 if (dev == NULL) {
1572 goto out; 1588 result = -ENODEV;
1589 goto out_ccw2;
1590 }
1573 1591
1574 for (direction = READ; direction <= WRITE; direction++) { 1592 for (direction = READ; direction <= WRITE; direction++) {
1575 priv->channel[direction] = 1593 priv->channel[direction] =
@@ -1587,12 +1605,14 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1587 /* sysfs magic */ 1605 /* sysfs magic */
1588 SET_NETDEV_DEV(dev, &cgdev->dev); 1606 SET_NETDEV_DEV(dev, &cgdev->dev);
1589 1607
1590 if (register_netdev(dev)) 1608 if (register_netdev(dev)) {
1591 goto out_dev; 1609 result = -ENODEV;
1610 goto out_dev;
1611 }
1592 1612
1593 if (ctcm_add_attributes(&cgdev->dev)) { 1613 if (ctcm_add_attributes(&cgdev->dev)) {
1594 unregister_netdev(dev); 1614 result = -ENODEV;
1595 goto out_dev; 1615 goto out_unregister;
1596 } 1616 }
1597 1617
1598 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); 1618 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
@@ -1608,13 +1628,22 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1608 priv->channel[WRITE]->id, priv->protocol); 1628 priv->channel[WRITE]->id, priv->protocol);
1609 1629
1610 return 0; 1630 return 0;
1631out_unregister:
1632 unregister_netdev(dev);
1611out_dev: 1633out_dev:
1612 ctcm_free_netdevice(dev); 1634 ctcm_free_netdevice(dev);
1613out: 1635out_ccw2:
1614 ccw_device_set_offline(cgdev->cdev[1]); 1636 ccw_device_set_offline(cgdev->cdev[1]);
1637out_ccw1:
1615 ccw_device_set_offline(cgdev->cdev[0]); 1638 ccw_device_set_offline(cgdev->cdev[0]);
1616 1639out_remove_channel2:
1617 return -ENODEV; 1640 readc = channel_get(type, read_id, READ);
1641 channel_remove(readc);
1642out_remove_channel1:
1643 writec = channel_get(type, write_id, WRITE);
1644 channel_remove(writec);
1645out_err_result:
1646 return result;
1618} 1647}
1619 1648
1620/** 1649/**
@@ -1695,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1695 return 0; 1724 return 0;
1696 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[READ]->netdev);
1697 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[READ]->netdev);
1727 if (!wait_event_timeout(priv->fsm->wait_q,
1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1729 netif_device_attach(priv->channel[READ]->netdev);
1730 return -EBUSY;
1731 }
1698 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
1699 ccw_device_set_offline(gdev->cdev[0]); 1733 ccw_device_set_offline(gdev->cdev[0]);
1700 return 0; 1734 return 0;
@@ -1719,6 +1753,22 @@ err_out:
1719 return rc; 1753 return rc;
1720} 1754}
1721 1755
1756static struct ccw_device_id ctcm_ids[] = {
1757 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1758 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1759 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1760 {},
1761};
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763
1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE,
1766 .name = "ctcm",
1767 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev,
1770};
1771
1722static struct ccwgroup_driver ctcm_group_driver = { 1772static struct ccwgroup_driver ctcm_group_driver = {
1723 .owner = THIS_MODULE, 1773 .owner = THIS_MODULE,
1724 .name = CTC_DRIVER_NAME, 1774 .name = CTC_DRIVER_NAME,
@@ -1733,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
1733 .restore = ctcm_pm_resume, 1783 .restore = ctcm_pm_resume,
1734}; 1784};
1735 1785
1786static ssize_t
1787ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1788 size_t count)
1789{
1790 int err;
1791
1792 err = ccwgroup_create_from_string(ctcm_root_dev,
1793 ctcm_group_driver.driver_id,
1794 &ctcm_ccw_driver, 2, buf);
1795 return err ? err : count;
1796}
1797
1798static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1799
1800static struct attribute *ctcm_group_attrs[] = {
1801 &driver_attr_group.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group ctcm_group_attr_group = {
1806 .attrs = ctcm_group_attrs,
1807};
1808
1809static const struct attribute_group *ctcm_group_attr_groups[] = {
1810 &ctcm_group_attr_group,
1811 NULL,
1812};
1736 1813
1737/* 1814/*
1738 * Module related routines 1815 * Module related routines
@@ -1746,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
1746 */ 1823 */
1747static void __exit ctcm_exit(void) 1824static void __exit ctcm_exit(void)
1748{ 1825{
1749 unregister_cu3088_discipline(&ctcm_group_driver); 1826 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1827 ccwgroup_driver_unregister(&ctcm_group_driver);
1828 ccw_driver_unregister(&ctcm_ccw_driver);
1829 root_device_unregister(ctcm_root_dev);
1750 ctcm_unregister_dbf_views(); 1830 ctcm_unregister_dbf_views();
1751 pr_info("CTCM driver unloaded\n"); 1831 pr_info("CTCM driver unloaded\n");
1752} 1832}
@@ -1772,17 +1852,31 @@ static int __init ctcm_init(void)
1772 channels = NULL; 1852 channels = NULL;
1773 1853
1774 ret = ctcm_register_dbf_views(); 1854 ret = ctcm_register_dbf_views();
1775 if (ret) { 1855 if (ret)
1776 return ret; 1856 goto out_err;
1777 } 1857 ctcm_root_dev = root_device_register("ctcm");
1778 ret = register_cu3088_discipline(&ctcm_group_driver); 1858 ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
1779 if (ret) { 1859 if (ret)
1780 ctcm_unregister_dbf_views(); 1860 goto register_err;
1781 pr_err("%s / register_cu3088_discipline failed, ret = %d\n", 1861 ret = ccw_driver_register(&ctcm_ccw_driver);
1782 __func__, ret); 1862 if (ret)
1783 return ret; 1863 goto ccw_err;
1784 } 1864 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1865 ret = ccwgroup_driver_register(&ctcm_group_driver);
1866 if (ret)
1867 goto ccwgroup_err;
1785 print_banner(); 1868 print_banner();
1869 return 0;
1870
1871ccwgroup_err:
1872 ccw_driver_unregister(&ctcm_ccw_driver);
1873ccw_err:
1874 root_device_unregister(ctcm_root_dev);
1875register_err:
1876 ctcm_unregister_dbf_views();
1877out_err:
1878 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1879 __func__, ret);
1786 return ret; 1880 return ret;
1787} 1881}
1788 1882
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d8..d34fa14f44e7 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17 17
18#include "fsm.h" 18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h" 19#include "ctcm_dbug.h"
21#include "ctcm_mpc.h" 20#include "ctcm_mpc.h"
22 21
@@ -66,6 +65,23 @@
66 ctcmpc_dumpit(buf, len); \ 65 ctcmpc_dumpit(buf, len); \
67 } while (0) 66 } while (0)
68 67
68/**
69 * Enum for classifying detected devices
70 */
71enum ctcm_channel_types {
72 /* Device is not a channel */
73 ctcm_channel_type_none,
74
75 /* Device is a CTC/A */
76 ctcm_channel_type_parallel,
77
78 /* Device is a FICON channel */
79 ctcm_channel_type_ficon,
80
81 /* Device is a ESCON channel */
82 ctcm_channel_type_escon
83};
84
69/* 85/*
70 * CCW commands, used in this driver. 86 * CCW commands, used in this driver.
71 */ 87 */
@@ -121,7 +137,7 @@ struct channel {
121 * Type of this channel. 137 * Type of this channel.
122 * CTC/A or Escon for valid channels. 138 * CTC/A or Escon for valid channels.
123 */ 139 */
124 enum channel_types type; 140 enum ctcm_channel_types type;
125 /* 141 /*
126 * Misc. flags. See CHANNEL_FLAGS_... below 142 * Misc. flags. See CHANNEL_FLAGS_... below
127 */ 143 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8f..5978b390153f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <asm/idals.h> 54#include <asm/idals.h>
55 55
56#include "cu3088.h"
57#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
58#include "ctcm_main.h" 57#include "ctcm_main.h"
59#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d68..738ad26c74a7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -158,6 +158,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
158 return count; 158 return count;
159} 159}
160 160
161const char *ctcm_type[] = {
162 "not a channel",
163 "CTC/A",
164 "FICON channel",
165 "ESCON channel",
166 "unknown channel type",
167 "unsupported channel type",
168};
169
161static ssize_t ctcm_type_show(struct device *dev, 170static ssize_t ctcm_type_show(struct device *dev,
162 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
163{ 172{
@@ -168,7 +177,7 @@ static ssize_t ctcm_type_show(struct device *dev,
168 return -ENODEV; 177 return -ENODEV;
169 178
170 return sprintf(buf, "%s\n", 179 return sprintf(buf, "%s\n",
171 cu3088_type[cgdev->cdev[0]->id.driver_info]); 180 ctcm_type[cgdev->cdev[0]->id.driver_info]);
172} 181}
173 182
174static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); 183static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99b..000000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * CTC / LCS ccw_device driver
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/err.h>
27
28#include <asm/ccwdev.h>
29#include <asm/ccwgroup.h>
30
31#include "cu3088.h"
32
33const char *cu3088_type[] = {
34 "not a channel",
35 "CTC/A",
36 "ESCON channel",
37 "FICON channel",
38 "OSA LCS card",
39 "CLAW channel device",
40 "unknown channel type",
41 "unsupported channel type",
42};
43
44/* static definitions */
45
46static struct ccw_device_id cu3088_ids[] = {
47 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
48 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
49 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
50 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
51 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
52 { /* end of list */ }
53};
54
55static struct ccw_driver cu3088_driver;
56
57static struct device *cu3088_root_dev;
58
59static ssize_t
60group_write(struct device_driver *drv, const char *buf, size_t count)
61{
62 int ret;
63 struct ccwgroup_driver *cdrv;
64
65 cdrv = to_ccwgroupdrv(drv);
66 if (!cdrv)
67 return -EINVAL;
68 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
69 &cu3088_driver, 2, buf);
70
71 return (ret == 0) ? count : ret;
72}
73
74static DRIVER_ATTR(group, 0200, NULL, group_write);
75
76/* Register-unregister for ctc&lcs */
77int
78register_cu3088_discipline(struct ccwgroup_driver *dcp)
79{
80 int rc;
81
82 if (!dcp)
83 return -EINVAL;
84
85 /* Register discipline.*/
86 rc = ccwgroup_driver_register(dcp);
87 if (rc)
88 return rc;
89
90 rc = driver_create_file(&dcp->driver, &driver_attr_group);
91 if (rc)
92 ccwgroup_driver_unregister(dcp);
93
94 return rc;
95
96}
97
98void
99unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
100{
101 if (!dcp)
102 return;
103
104 driver_remove_file(&dcp->driver, &driver_attr_group);
105 ccwgroup_driver_unregister(dcp);
106}
107
108static struct ccw_driver cu3088_driver = {
109 .owner = THIS_MODULE,
110 .ids = cu3088_ids,
111 .name = "cu3088",
112 .probe = ccwgroup_probe_ccwdev,
113 .remove = ccwgroup_remove_ccwdev,
114};
115
116/* module setup */
117static int __init
118cu3088_init (void)
119{
120 int rc;
121
122 cu3088_root_dev = root_device_register("cu3088");
123 if (IS_ERR(cu3088_root_dev))
124 return PTR_ERR(cu3088_root_dev);
125 rc = ccw_driver_register(&cu3088_driver);
126 if (rc)
127 root_device_unregister(cu3088_root_dev);
128
129 return rc;
130}
131
132static void __exit
133cu3088_exit (void)
134{
135 ccw_driver_unregister(&cu3088_driver);
136 root_device_unregister(cu3088_root_dev);
137}
138
139MODULE_DEVICE_TABLE(ccw,cu3088_ids);
140MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
141MODULE_LICENSE("GPL");
142
143module_init(cu3088_init);
144module_exit(cu3088_exit);
145
146EXPORT_SYMBOL_GPL(cu3088_type);
147EXPORT_SYMBOL_GPL(register_cu3088_discipline);
148EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a5..000000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a OSA2 card */
21 channel_type_osa2,
22
23 /* Device is a CLAW channel device */
24 channel_type_claw,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7c..cae48cbc5e96 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -27,6 +27,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
27 return NULL; 27 return NULL;
28 } 28 }
29 strlcpy(this->name, name, sizeof(this->name)); 29 strlcpy(this->name, name, sizeof(this->name));
30 init_waitqueue_head(&this->wait_q);
30 31
31 f = kzalloc(sizeof(fsm), order); 32 f = kzalloc(sizeof(fsm), order);
32 if (f == NULL) { 33 if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1bd..1e8b235d95b5 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
66 char name[16]; 66 char name[16];
67 void *userdata; 67 void *userdata;
68 int userint; 68 int userint;
69 wait_queue_head_t wait_q;
69#if FSM_DEBUG_HISTORY 70#if FSM_DEBUG_HISTORY
70 int history_index; 71 int history_index;
71 int history_size; 72 int history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
197 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, 198 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
198 fi->f->state_names[newstate]); 199 fi->f->state_names[newstate]);
199#endif 200#endif
201 wake_up(&fi->wait_q);
200} 202}
201 203
202/** 204/**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a70de9b4bf29..f6cc46dc0501 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -47,7 +47,6 @@
47#include <asm/ccwgroup.h> 47#include <asm/ccwgroup.h>
48 48
49#include "lcs.h" 49#include "lcs.h"
50#include "cu3088.h"
51 50
52 51
53#if !defined(CONFIG_NET_ETHERNET) && \ 52#if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +59,11 @@
60 */ 59 */
61 60
62static char version[] __initdata = "LCS driver"; 61static char version[] __initdata = "LCS driver";
63static char debug_buffer[255]; 62
63/**
64 * the root device for lcs group devices
65 */
66static struct device *lcs_root_dev;
64 67
65/** 68/**
66 * Some prototypes. 69 * Some prototypes.
@@ -76,6 +79,7 @@ static int lcs_recovery(void *ptr);
76/** 79/**
77 * Debug Facility Stuff 80 * Debug Facility Stuff
78 */ 81 */
82static char debug_buffer[255];
79static debug_info_t *lcs_dbf_setup; 83static debug_info_t *lcs_dbf_setup;
80static debug_info_t *lcs_dbf_trace; 84static debug_info_t *lcs_dbf_trace;
81 85
@@ -889,7 +893,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
889 rc = lcs_ready_buffer(&card->write, buffer); 893 rc = lcs_ready_buffer(&card->write, buffer);
890 if (rc) 894 if (rc)
891 return rc; 895 return rc;
892 init_timer(&timer); 896 init_timer_on_stack(&timer);
893 timer.function = lcs_lancmd_timeout; 897 timer.function = lcs_lancmd_timeout;
894 timer.data = (unsigned long) reply; 898 timer.data = (unsigned long) reply;
895 timer.expires = jiffies + HZ*card->lancmd_timeout; 899 timer.expires = jiffies + HZ*card->lancmd_timeout;
@@ -1968,6 +1972,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1968 1972
1969static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1970 1974
1975const char *lcs_type[] = {
1976 "not a channel",
1977 "2216 parallel",
1978 "2216 channel",
1979 "OSA LCS card",
1980 "unknown channel type",
1981 "unsupported channel type",
1982};
1983
1971static ssize_t 1984static ssize_t
1972lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1985lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1973{ 1986{
@@ -1977,7 +1990,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1977 if (!cgdev) 1990 if (!cgdev)
1978 return -ENODEV; 1991 return -ENODEV;
1979 1992
1980 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); 1993 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1981} 1994}
1982 1995
1983static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1996static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2130,8 +2143,12 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2130 card->write.ccwdev = ccwgdev->cdev[1]; 2143 card->write.ccwdev = ccwgdev->cdev[1];
2131 2144
2132 recover_state = card->state; 2145 recover_state = card->state;
2133 ccw_device_set_online(card->read.ccwdev); 2146 rc = ccw_device_set_online(card->read.ccwdev);
2134 ccw_device_set_online(card->write.ccwdev); 2147 if (rc)
2148 goto out_err;
2149 rc = ccw_device_set_online(card->write.ccwdev);
2150 if (rc)
2151 goto out_werr;
2135 2152
2136 LCS_DBF_TEXT(3, setup, "lcsnewdv"); 2153 LCS_DBF_TEXT(3, setup, "lcsnewdv");
2137 2154
@@ -2210,8 +2227,10 @@ netdev_out:
2210 return 0; 2227 return 0;
2211out: 2228out:
2212 2229
2213 ccw_device_set_offline(card->read.ccwdev);
2214 ccw_device_set_offline(card->write.ccwdev); 2230 ccw_device_set_offline(card->write.ccwdev);
2231out_werr:
2232 ccw_device_set_offline(card->read.ccwdev);
2233out_err:
2215 return -ENODEV; 2234 return -ENODEV;
2216} 2235}
2217 2236
@@ -2364,6 +2383,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
2364 return lcs_pm_resume(card); 2383 return lcs_pm_resume(card);
2365} 2384}
2366 2385
2386static struct ccw_device_id lcs_ids[] = {
2387 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2388 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2389 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2390 {},
2391};
2392MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393
2394static struct ccw_driver lcs_ccw_driver = {
2395 .owner = THIS_MODULE,
2396 .name = "lcs",
2397 .ids = lcs_ids,
2398 .probe = ccwgroup_probe_ccwdev,
2399 .remove = ccwgroup_remove_ccwdev,
2400};
2401
2367/** 2402/**
2368 * LCS ccwgroup driver registration 2403 * LCS ccwgroup driver registration
2369 */ 2404 */
@@ -2383,6 +2418,33 @@ static struct ccwgroup_driver lcs_group_driver = {
2383 .restore = lcs_restore, 2418 .restore = lcs_restore,
2384}; 2419};
2385 2420
2421static ssize_t
2422lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2423 size_t count)
2424{
2425 int err;
2426 err = ccwgroup_create_from_string(lcs_root_dev,
2427 lcs_group_driver.driver_id,
2428 &lcs_ccw_driver, 2, buf);
2429 return err ? err : count;
2430}
2431
2432static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2433
2434static struct attribute *lcs_group_attrs[] = {
2435 &driver_attr_group.attr,
2436 NULL,
2437};
2438
2439static struct attribute_group lcs_group_attr_group = {
2440 .attrs = lcs_group_attrs,
2441};
2442
2443static const struct attribute_group *lcs_group_attr_groups[] = {
2444 &lcs_group_attr_group,
2445 NULL,
2446};
2447
2386/** 2448/**
2387 * LCS Module/Kernel initialization function 2449 * LCS Module/Kernel initialization function
2388 */ 2450 */
@@ -2394,17 +2456,30 @@ __init lcs_init_module(void)
2394 pr_info("Loading %s\n", version); 2456 pr_info("Loading %s\n", version);
2395 rc = lcs_register_debug_facility(); 2457 rc = lcs_register_debug_facility();
2396 LCS_DBF_TEXT(0, setup, "lcsinit"); 2458 LCS_DBF_TEXT(0, setup, "lcsinit");
2397 if (rc) { 2459 if (rc)
2398 pr_err("Initialization failed\n"); 2460 goto out_err;
2399 return rc; 2461 lcs_root_dev = root_device_register("lcs");
2400 } 2462 rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2401 2463 if (rc)
2402 rc = register_cu3088_discipline(&lcs_group_driver); 2464 goto register_err;
2403 if (rc) { 2465 rc = ccw_driver_register(&lcs_ccw_driver);
2404 pr_err("Initialization failed\n"); 2466 if (rc)
2405 return rc; 2467 goto ccw_err;
2406 } 2468 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2469 rc = ccwgroup_driver_register(&lcs_group_driver);
2470 if (rc)
2471 goto ccwgroup_err;
2407 return 0; 2472 return 0;
2473
2474ccwgroup_err:
2475 ccw_driver_unregister(&lcs_ccw_driver);
2476ccw_err:
2477 root_device_unregister(lcs_root_dev);
2478register_err:
2479 lcs_unregister_debug_facility();
2480out_err:
2481 pr_err("Initializing the lcs device driver failed\n");
2482 return rc;
2408} 2483}
2409 2484
2410 2485
@@ -2416,7 +2491,11 @@ __exit lcs_cleanup_module(void)
2416{ 2491{
2417 pr_info("Terminating lcs module.\n"); 2492 pr_info("Terminating lcs module.\n");
2418 LCS_DBF_TEXT(0, trace, "cleanup"); 2493 LCS_DBF_TEXT(0, trace, "cleanup");
2419 unregister_cu3088_discipline(&lcs_group_driver); 2494 driver_remove_file(&lcs_group_driver.driver,
2495 &driver_attr_group);
2496 ccwgroup_driver_unregister(&lcs_group_driver);
2497 ccw_driver_unregister(&lcs_ccw_driver);
2498 root_device_unregister(lcs_root_dev);
2420 lcs_unregister_debug_facility(); 2499 lcs_unregister_debug_facility();
2421} 2500}
2422 2501
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af27..8c03392ac833 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) dev_get_drvdata( \ 37 (struct lcs_card *) dev_get_drvdata( \
38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39
40/**
41 * Enum for classifying detected devices.
42 */
43enum lcs_channel_types {
44 /* Device is not a channel */
45 lcs_channel_type_none,
46
47 /* Device is a 2216 channel */
48 lcs_channel_type_parallel,
49
50 /* Device is a 2216 channel */
51 lcs_channel_type_2216,
52
53 /* Device is a OSA2 card */
54 lcs_channel_type_osa2
55};
56
39/** 57/**
40 * CCW commands used in this driver 58 * CCW commands used in this driver
41 */ 59 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602a..395c04c2b00f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
741 if (single_flag) { 741 if (single_flag) {
742 if ((skb = skb_dequeue(&conn->commit_queue))) { 742 if ((skb = skb_dequeue(&conn->commit_queue))) {
743 atomic_dec(&skb->users); 743 atomic_dec(&skb->users);
744 dev_kfree_skb_any(skb);
745 if (privptr) { 744 if (privptr) {
746 privptr->stats.tx_packets++; 745 privptr->stats.tx_packets++;
747 privptr->stats.tx_bytes += 746 privptr->stats.tx_bytes +=
748 (skb->len - NETIUCV_HDRLEN 747 (skb->len - NETIUCV_HDRLEN
749 - NETIUCV_HDRLEN); 748 - NETIUCV_HDRLEN);
750 } 749 }
750 dev_kfree_skb_any(skb);
751 } 751 }
752 } 752 }
753 conn->tx_buff->data = conn->tx_buff->head; 753 conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 31a2b4e502ce..b232693378cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
122 __u64 outbound_do_qdio_start_time; 122 __u64 outbound_do_qdio_start_time;
123 unsigned int outbound_do_qdio_cnt; 123 unsigned int outbound_do_qdio_cnt;
124 unsigned int outbound_do_qdio_time; 124 unsigned int outbound_do_qdio_time;
125 /* eddp data */
126 unsigned int large_send_bytes; 125 unsigned int large_send_bytes;
127 unsigned int large_send_cnt; 126 unsigned int large_send_cnt;
128 unsigned int sg_skbs_sent; 127 unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
135 unsigned int sg_frags_rx; 134 unsigned int sg_frags_rx;
136 unsigned int sg_alloc_page_rx; 135 unsigned int sg_alloc_page_rx;
137 unsigned int tx_csum; 136 unsigned int tx_csum;
137 unsigned int tx_lin;
138}; 138};
139 139
140/* Routing stuff */ 140/* Routing stuff */
@@ -648,6 +648,7 @@ struct qeth_card_options {
648 enum qeth_large_send_types large_send; 648 enum qeth_large_send_types large_send;
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation;
651}; 652};
652 653
653/* 654/*
@@ -776,7 +777,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 777 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777} 778}
778 779
779struct qeth_eddp_context;
780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
782const char *qeth_get_cardname_short(struct qeth_card *); 782const char *qeth_get_cardname_short(struct qeth_card *);
@@ -836,7 +836,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
837int qeth_mdio_read(struct net_device *, int, int); 837int qeth_mdio_read(struct net_device *, int, int);
838int qeth_snmp_command(struct qeth_card *, char __user *); 838int qeth_snmp_command(struct qeth_card *, char __user *);
839int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
840struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 839struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
841int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 840int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
842 unsigned long); 841 unsigned long);
@@ -849,13 +848,14 @@ int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
849 struct sk_buff *, struct qeth_hdr *, int, int, int); 848 struct sk_buff *, struct qeth_hdr *, int, int, int);
850int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 849int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
851 struct sk_buff *, struct qeth_hdr *, int); 850 struct sk_buff *, struct qeth_hdr *, int);
852int qeth_core_get_stats_count(struct net_device *); 851int qeth_core_get_sset_count(struct net_device *, int);
853void qeth_core_get_ethtool_stats(struct net_device *, 852void qeth_core_get_ethtool_stats(struct net_device *,
854 struct ethtool_stats *, u64 *); 853 struct ethtool_stats *, u64 *);
855void qeth_core_get_strings(struct net_device *, u32, u8 *); 854void qeth_core_get_strings(struct net_device *, u32, u8 *);
856void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 855void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
857void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); 856void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
858int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 857int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
858int qeth_set_access_ctrl_online(struct qeth_card *card);
859 859
860/* exports for OSN */ 860/* exports for OSN */
861int qeth_osn_assist(struct net_device *, void *, int); 861int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c4a42d970158..d34804d5ece1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -270,41 +270,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272 272
273int qeth_set_large_send(struct qeth_card *card,
274 enum qeth_large_send_types type)
275{
276 int rc = 0;
277
278 if (card->dev == NULL) {
279 card->options.large_send = type;
280 return 0;
281 }
282 if (card->state == CARD_STATE_UP)
283 netif_tx_disable(card->dev);
284 card->options.large_send = type;
285 switch (card->options.large_send) {
286 case QETH_LARGE_SEND_TSO:
287 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 } else {
291 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM);
293 card->options.large_send = QETH_LARGE_SEND_NO;
294 rc = -EOPNOTSUPP;
295 }
296 break;
297 default: /* includes QETH_LARGE_SEND_NO */
298 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
299 NETIF_F_HW_CSUM);
300 break;
301 }
302 if (card->state == CARD_STATE_UP)
303 netif_wake_queue(card->dev);
304 return rc;
305}
306EXPORT_SYMBOL_GPL(qeth_set_large_send);
307
308static int qeth_issue_next_read(struct qeth_card *card) 273static int qeth_issue_next_read(struct qeth_card *card)
309{ 274{
310 int rc; 275 int rc;
@@ -1079,6 +1044,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1079 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1044 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1080 card->options.performance_stats = 0; 1045 card->options.performance_stats = 0;
1081 card->options.rx_sg_cb = QETH_RX_SG_CB; 1046 card->options.rx_sg_cb = QETH_RX_SG_CB;
1047 card->options.isolation = ISOLATION_MODE_NONE;
1082} 1048}
1083 1049
1084static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1050static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -3389,6 +3355,156 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3389} 3355}
3390EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 3356EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3391 3357
3358static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3359 struct qeth_reply *reply, unsigned long data)
3360{
3361 struct qeth_ipa_cmd *cmd;
3362 struct qeth_set_access_ctrl *access_ctrl_req;
3363 int rc;
3364
3365 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3366
3367 cmd = (struct qeth_ipa_cmd *) data;
3368 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3369 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3370 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3371 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3372 cmd->data.setadapterparms.hdr.return_code);
3373 switch (cmd->data.setadapterparms.hdr.return_code) {
3374 case SET_ACCESS_CTRL_RC_SUCCESS:
3375 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3376 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3377 {
3378 card->options.isolation = access_ctrl_req->subcmd_code;
3379 if (card->options.isolation == ISOLATION_MODE_NONE) {
3380 dev_info(&card->gdev->dev,
3381 "QDIO data connection isolation is deactivated\n");
3382 } else {
3383 dev_info(&card->gdev->dev,
3384 "QDIO data connection isolation is activated\n");
3385 }
3386 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3387 card->gdev->dev.kobj.name,
3388 access_ctrl_req->subcmd_code,
3389 cmd->data.setadapterparms.hdr.return_code);
3390 rc = 0;
3391 break;
3392 }
3393 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3394 {
3395 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3396 card->gdev->dev.kobj.name,
3397 access_ctrl_req->subcmd_code,
3398 cmd->data.setadapterparms.hdr.return_code);
3399 dev_err(&card->gdev->dev, "Adapter does not "
3400 "support QDIO data connection isolation\n");
3401
3402 /* ensure isolation mode is "none" */
3403 card->options.isolation = ISOLATION_MODE_NONE;
3404 rc = -EOPNOTSUPP;
3405 break;
3406 }
3407 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3408 {
3409 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3410 card->gdev->dev.kobj.name,
3411 access_ctrl_req->subcmd_code,
3412 cmd->data.setadapterparms.hdr.return_code);
3413 dev_err(&card->gdev->dev,
3414 "Adapter is dedicated. "
3415 "QDIO data connection isolation not supported\n");
3416
3417 /* ensure isolation mode is "none" */
3418 card->options.isolation = ISOLATION_MODE_NONE;
3419 rc = -EOPNOTSUPP;
3420 break;
3421 }
3422 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3423 {
3424 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3425 card->gdev->dev.kobj.name,
3426 access_ctrl_req->subcmd_code,
3427 cmd->data.setadapterparms.hdr.return_code);
3428 dev_err(&card->gdev->dev,
3429 "TSO does not permit QDIO data connection isolation\n");
3430
3431 /* ensure isolation mode is "none" */
3432 card->options.isolation = ISOLATION_MODE_NONE;
3433 rc = -EPERM;
3434 break;
3435 }
3436 default:
3437 {
3438 /* this should never happen */
3439 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3440 "==UNKNOWN\n",
3441 card->gdev->dev.kobj.name,
3442 access_ctrl_req->subcmd_code,
3443 cmd->data.setadapterparms.hdr.return_code);
3444
3445 /* ensure isolation mode is "none" */
3446 card->options.isolation = ISOLATION_MODE_NONE;
3447 rc = 0;
3448 break;
3449 }
3450 }
3451 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3452 return rc;
3453}
3454
3455static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3456 enum qeth_ipa_isolation_modes isolation)
3457{
3458 int rc;
3459 struct qeth_cmd_buffer *iob;
3460 struct qeth_ipa_cmd *cmd;
3461 struct qeth_set_access_ctrl *access_ctrl_req;
3462
3463 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3464
3465 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3466 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3467
3468 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3469 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3470 sizeof(struct qeth_set_access_ctrl));
3471 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3472 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3473 access_ctrl_req->subcmd_code = isolation;
3474
3475 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3476 NULL);
3477 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3478 return rc;
3479}
3480
3481int qeth_set_access_ctrl_online(struct qeth_card *card)
3482{
3483 int rc = 0;
3484
3485 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3486
3487 if (card->info.type == QETH_CARD_TYPE_OSAE &&
3488 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3489 rc = qeth_setadpparms_set_access_ctrl(card,
3490 card->options.isolation);
3491 if (rc) {
3492 QETH_DBF_MESSAGE(3,
3493 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
3494 card->gdev->dev.kobj.name,
3495 rc);
3496 }
3497 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3498 card->options.isolation = ISOLATION_MODE_NONE;
3499
3500 dev_err(&card->gdev->dev, "Adapter does not "
3501 "support QDIO data connection isolation\n");
3502 rc = -EOPNOTSUPP;
3503 }
3504 return rc;
3505}
3506EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3507
3392void qeth_tx_timeout(struct net_device *dev) 3508void qeth_tx_timeout(struct net_device *dev)
3393{ 3509{
3394 struct qeth_card *card; 3510 struct qeth_card *card;
@@ -3732,30 +3848,36 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3732int qeth_core_hardsetup_card(struct qeth_card *card) 3848int qeth_core_hardsetup_card(struct qeth_card *card)
3733{ 3849{
3734 struct qdio_ssqd_desc *ssqd; 3850 struct qdio_ssqd_desc *ssqd;
3735 int retries = 3; 3851 int retries = 0;
3736 int mpno = 0; 3852 int mpno = 0;
3737 int rc; 3853 int rc;
3738 3854
3739 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3740 atomic_set(&card->force_alloc_skb, 0); 3856 atomic_set(&card->force_alloc_skb, 0);
3741retry: 3857retry:
3742 if (retries < 3) { 3858 if (retries)
3743 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3859 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3744 dev_name(&card->gdev->dev)); 3860 dev_name(&card->gdev->dev));
3745 ccw_device_set_offline(CARD_DDEV(card)); 3861 ccw_device_set_offline(CARD_DDEV(card));
3746 ccw_device_set_offline(CARD_WDEV(card)); 3862 ccw_device_set_offline(CARD_WDEV(card));
3747 ccw_device_set_offline(CARD_RDEV(card)); 3863 ccw_device_set_offline(CARD_RDEV(card));
3748 ccw_device_set_online(CARD_RDEV(card)); 3864 rc = ccw_device_set_online(CARD_RDEV(card));
3749 ccw_device_set_online(CARD_WDEV(card)); 3865 if (rc)
3750 ccw_device_set_online(CARD_DDEV(card)); 3866 goto retriable;
3751 } 3867 rc = ccw_device_set_online(CARD_WDEV(card));
3868 if (rc)
3869 goto retriable;
3870 rc = ccw_device_set_online(CARD_DDEV(card));
3871 if (rc)
3872 goto retriable;
3752 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 3873 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3874retriable:
3753 if (rc == -ERESTARTSYS) { 3875 if (rc == -ERESTARTSYS) {
3754 QETH_DBF_TEXT(SETUP, 2, "break1"); 3876 QETH_DBF_TEXT(SETUP, 2, "break1");
3755 return rc; 3877 return rc;
3756 } else if (rc) { 3878 } else if (rc) {
3757 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3758 if (--retries < 0) 3880 if (++retries > 3)
3759 goto out; 3881 goto out;
3760 else 3882 else
3761 goto retry; 3883 goto retry;
@@ -4303,13 +4425,19 @@ static struct {
4303 {"tx do_QDIO time"}, 4425 {"tx do_QDIO time"},
4304 {"tx do_QDIO count"}, 4426 {"tx do_QDIO count"},
4305 {"tx csum"}, 4427 {"tx csum"},
4428 {"tx lin"},
4306}; 4429};
4307 4430
4308int qeth_core_get_stats_count(struct net_device *dev) 4431int qeth_core_get_sset_count(struct net_device *dev, int stringset)
4309{ 4432{
4310 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); 4433 switch (stringset) {
4434 case ETH_SS_STATS:
4435 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4436 default:
4437 return -EINVAL;
4438 }
4311} 4439}
4312EXPORT_SYMBOL_GPL(qeth_core_get_stats_count); 4440EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
4313 4441
4314void qeth_core_get_ethtool_stats(struct net_device *dev, 4442void qeth_core_get_ethtool_stats(struct net_device *dev,
4315 struct ethtool_stats *stats, u64 *data) 4443 struct ethtool_stats *stats, u64 *data)
@@ -4355,6 +4483,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4355 data[31] = card->perf_stats.outbound_do_qdio_time; 4483 data[31] = card->perf_stats.outbound_do_qdio_time;
4356 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4484 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4357 data[33] = card->perf_stats.tx_csum; 4485 data[33] = card->perf_stats.tx_csum;
4486 data[34] = card->perf_stats.tx_lin;
4358} 4487}
4359EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4488EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4360 4489
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e85..52c03438dbec 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -234,18 +234,19 @@ enum qeth_ipa_setdelip_flags {
234 234
235/* SETADAPTER IPA Command: ****************************************************/ 235/* SETADAPTER IPA Command: ****************************************************/
236enum qeth_ipa_setadp_cmd { 236enum qeth_ipa_setadp_cmd {
237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, 237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, 238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, 239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, 240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
241 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, 241 IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
242 IPA_SETADP_SET_CONFIG_PARMS = 0x0020, 242 IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, 243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
244 IPA_SETADP_SET_BROADCAST_MODE = 0x0080, 244 IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
245 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 245 IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
246 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x0800, 248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
249}; 250};
250enum qeth_ipa_mac_ops { 251enum qeth_ipa_mac_ops {
251 CHANGE_ADDR_READ_MAC = 0, 252 CHANGE_ADDR_READ_MAC = 0,
@@ -264,6 +265,20 @@ enum qeth_ipa_promisc_modes {
264 SET_PROMISC_MODE_OFF = 0, 265 SET_PROMISC_MODE_OFF = 0,
265 SET_PROMISC_MODE_ON = 1, 266 SET_PROMISC_MODE_ON = 1,
266}; 267};
268enum qeth_ipa_isolation_modes {
269 ISOLATION_MODE_NONE = 0x00000000L,
270 ISOLATION_MODE_FWD = 0x00000001L,
271 ISOLATION_MODE_DROP = 0x00000002L,
272};
273enum qeth_ipa_set_access_mode_rc {
274 SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
275 SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
276 SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
277 SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
278 SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
279 SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
280};
281
267 282
268/* (SET)DELIP(M) IPA stuff ***************************************************/ 283/* (SET)DELIP(M) IPA stuff ***************************************************/
269struct qeth_ipacmd_setdelip4 { 284struct qeth_ipacmd_setdelip4 {
@@ -376,6 +391,11 @@ struct qeth_snmp_ureq {
376 struct qeth_snmp_cmd cmd; 391 struct qeth_snmp_cmd cmd;
377} __attribute__((packed)); 392} __attribute__((packed));
378 393
394/* SET_ACCESS_CONTROL: same format for request and reply */
395struct qeth_set_access_ctrl {
396 __u32 subcmd_code;
397} __attribute__((packed));
398
379struct qeth_ipacmd_setadpparms_hdr { 399struct qeth_ipacmd_setadpparms_hdr {
380 __u32 supp_hw_cmds; 400 __u32 supp_hw_cmds;
381 __u32 reserved1; 401 __u32 reserved1;
@@ -394,6 +414,7 @@ struct qeth_ipacmd_setadpparms {
394 struct qeth_query_cmds_supp query_cmds_supp; 414 struct qeth_query_cmds_supp query_cmds_supp;
395 struct qeth_change_addr change_addr; 415 struct qeth_change_addr change_addr;
396 struct qeth_snmp_cmd snmp; 416 struct qeth_snmp_cmd snmp;
417 struct qeth_set_access_ctrl set_access_ctrl;
397 __u32 mode; 418 __u32 mode;
398 } data; 419 } data;
399} __attribute__ ((packed)); 420} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3a..9ff2b36fdc43 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -416,7 +416,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, 416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
417 qeth_dev_layer2_store); 417 qeth_dev_layer2_store);
418 418
419static ssize_t qeth_dev_large_send_show(struct device *dev, 419#define ATTR_QETH_ISOLATION_NONE ("none")
420#define ATTR_QETH_ISOLATION_FWD ("forward")
421#define ATTR_QETH_ISOLATION_DROP ("drop")
422
423static ssize_t qeth_dev_isolation_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 424 struct device_attribute *attr, char *buf)
421{ 425{
422 struct qeth_card *card = dev_get_drvdata(dev); 426 struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +428,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
424 if (!card) 428 if (!card)
425 return -EINVAL; 429 return -EINVAL;
426 430
427 switch (card->options.large_send) { 431 switch (card->options.isolation) {
428 case QETH_LARGE_SEND_NO: 432 case ISOLATION_MODE_NONE:
429 return sprintf(buf, "%s\n", "no"); 433 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
430 case QETH_LARGE_SEND_TSO: 434 case ISOLATION_MODE_FWD:
431 return sprintf(buf, "%s\n", "TSO"); 435 return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
436 case ISOLATION_MODE_DROP:
437 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
432 default: 438 default:
433 return sprintf(buf, "%s\n", "N/A"); 439 return snprintf(buf, 5, "%s\n", "N/A");
434 } 440 }
435} 441}
436 442
437static ssize_t qeth_dev_large_send_store(struct device *dev, 443static ssize_t qeth_dev_isolation_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count) 444 struct device_attribute *attr, const char *buf, size_t count)
439{ 445{
440 struct qeth_card *card = dev_get_drvdata(dev); 446 struct qeth_card *card = dev_get_drvdata(dev);
441 enum qeth_large_send_types type; 447 enum qeth_ipa_isolation_modes isolation;
442 int rc = 0; 448 int rc = 0;
443 char *tmp; 449 char *tmp, *curtoken;
450 curtoken = (char *) buf;
444 451
445 if (!card) 452 if (!card) {
446 return -EINVAL; 453 rc = -EINVAL;
447 tmp = strsep((char **) &buf, "\n"); 454 goto out;
448 if (!strcmp(tmp, "no")) { 455 }
449 type = QETH_LARGE_SEND_NO; 456
450 } else if (!strcmp(tmp, "TSO")) { 457 /* check for unknown, too, in case we do not yet know who we are */
451 type = QETH_LARGE_SEND_TSO; 458 if (card->info.type != QETH_CARD_TYPE_OSAE &&
459 card->info.type != QETH_CARD_TYPE_UNKNOWN) {
460 rc = -EOPNOTSUPP;
461 dev_err(&card->gdev->dev, "Adapter does not "
462 "support QDIO data connection isolation\n");
463 goto out;
464 }
465
466 /* parse input into isolation mode */
467 tmp = strsep(&curtoken, "\n");
468 if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
469 isolation = ISOLATION_MODE_NONE;
470 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
471 isolation = ISOLATION_MODE_FWD;
472 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
473 isolation = ISOLATION_MODE_DROP;
452 } else { 474 } else {
453 return -EINVAL; 475 rc = -EINVAL;
476 goto out;
454 } 477 }
455 if (card->options.large_send == type) 478 rc = count;
456 return count; 479
457 rc = qeth_set_large_send(card, type); 480 /* defer IP assist if device is offline (until discipline->set_online)*/
458 if (rc) 481 card->options.isolation = isolation;
459 return rc; 482 if (card->state == CARD_STATE_SOFTSETUP ||
460 return count; 483 card->state == CARD_STATE_UP) {
484 int ipa_rc = qeth_set_access_ctrl_online(card);
485 if (ipa_rc != 0)
486 rc = ipa_rc;
487 }
488out:
489 return rc;
461} 490}
462 491
463static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, 492static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
464 qeth_dev_large_send_store); 493 qeth_dev_isolation_store);
465 494
466static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 495static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
467{ 496{
@@ -582,7 +611,7 @@ static struct attribute *qeth_device_attrs[] = {
582 &dev_attr_recover.attr, 611 &dev_attr_recover.attr,
583 &dev_attr_performance_stats.attr, 612 &dev_attr_performance_stats.attr,
584 &dev_attr_layer2.attr, 613 &dev_attr_layer2.attr,
585 &dev_attr_large_send.attr, 614 &dev_attr_isolation.attr,
586 NULL, 615 NULL,
587}; 616};
588 617
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f4f3ca1393b2..0b763396d5d1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -866,7 +866,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
866 .get_link = ethtool_op_get_link, 866 .get_link = ethtool_op_get_link,
867 .get_strings = qeth_core_get_strings, 867 .get_strings = qeth_core_get_strings,
868 .get_ethtool_stats = qeth_core_get_ethtool_stats, 868 .get_ethtool_stats = qeth_core_get_ethtool_stats,
869 .get_stats_count = qeth_core_get_stats_count, 869 .get_sset_count = qeth_core_get_sset_count,
870 .get_drvinfo = qeth_core_get_drvinfo, 870 .get_drvinfo = qeth_core_get_drvinfo,
871 .get_settings = qeth_core_ethtool_get_settings, 871 .get_settings = qeth_core_ethtool_get_settings,
872}; 872};
@@ -874,7 +874,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
874static const struct ethtool_ops qeth_l2_osn_ops = { 874static const struct ethtool_ops qeth_l2_osn_ops = {
875 .get_strings = qeth_core_get_strings, 875 .get_strings = qeth_core_get_strings,
876 .get_ethtool_stats = qeth_core_get_ethtool_stats, 876 .get_ethtool_stats = qeth_core_get_ethtool_stats,
877 .get_stats_count = qeth_core_get_stats_count, 877 .get_sset_count = qeth_core_get_sset_count,
878 .get_drvinfo = qeth_core_get_drvinfo, 878 .get_drvinfo = qeth_core_get_drvinfo,
879}; 879};
880 880
@@ -940,30 +940,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
940 940
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 942 recover_flag = card->state;
943 rc = ccw_device_set_online(CARD_RDEV(card));
944 if (rc) {
945 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
946 return -EIO;
947 }
948 rc = ccw_device_set_online(CARD_WDEV(card));
949 if (rc) {
950 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
951 return -EIO;
952 }
953 rc = ccw_device_set_online(CARD_DDEV(card));
954 if (rc) {
955 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
956 return -EIO;
957 }
958
959 rc = qeth_core_hardsetup_card(card); 943 rc = qeth_core_hardsetup_card(card);
960 if (rc) { 944 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 945 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
946 rc = -ENODEV;
962 goto out_remove; 947 goto out_remove;
963 } 948 }
964 949
965 if (!card->dev && qeth_l2_setup_netdev(card)) 950 if (!card->dev && qeth_l2_setup_netdev(card)) {
951 rc = -ENODEV;
966 goto out_remove; 952 goto out_remove;
953 }
967 954
968 if (card->info.type != QETH_CARD_TYPE_OSN) 955 if (card->info.type != QETH_CARD_TYPE_OSN)
969 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 956 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +970,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
983 card->lan_online = 0; 970 card->lan_online = 0;
984 return 0; 971 return 0;
985 } 972 }
973 rc = -ENODEV;
986 goto out_remove; 974 goto out_remove;
987 } else 975 } else
988 card->lan_online = 1; 976 card->lan_online = 1;
989 977
990 if (card->info.type != QETH_CARD_TYPE_OSN) { 978 if (card->info.type != QETH_CARD_TYPE_OSN) {
991 qeth_set_large_send(card, card->options.large_send); 979 /* configure isolation level */
980 qeth_set_access_ctrl_online(card);
992 qeth_l2_process_vlans(card, 0); 981 qeth_l2_process_vlans(card, 0);
993 } 982 }
994 983
@@ -997,6 +986,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 rc = qeth_init_qdio_queues(card); 986 rc = qeth_init_qdio_queues(card);
998 if (rc) { 987 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 988 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
989 rc = -ENODEV;
1000 goto out_remove; 990 goto out_remove;
1001 } 991 }
1002 card->state = CARD_STATE_SOFTSETUP; 992 card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +1008,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1018 /* let user_space know that device is online */ 1008 /* let user_space know that device is online */
1019 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1009 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1020 return 0; 1010 return 0;
1011
1021out_remove: 1012out_remove:
1022 card->use_hard_stop = 1; 1013 card->use_hard_stop = 1;
1023 qeth_l2_stop_card(card, 0); 1014 qeth_l2_stop_card(card, 0);
@@ -1028,7 +1019,7 @@ out_remove:
1028 card->state = CARD_STATE_RECOVER; 1019 card->state = CARD_STATE_RECOVER;
1029 else 1020 else
1030 card->state = CARD_STATE_DOWN; 1021 card->state = CARD_STATE_DOWN;
1031 return -ENODEV; 1022 return rc;
1032} 1023}
1033 1024
1034static int qeth_l2_set_online(struct ccwgroup_device *gdev) 1025static int qeth_l2_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba3..321988fa9f7d 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -60,5 +60,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 62 const u8 *);
63int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
64int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
63 65
64#endif /* __QETH_L3_H__ */ 66#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 073b6d354915..fd1b6ed3721f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -41,6 +41,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41static int __qeth_l3_set_online(struct ccwgroup_device *, int); 41static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 42static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43 43
44int qeth_l3_set_large_send(struct qeth_card *card,
45 enum qeth_large_send_types type)
46{
47 int rc = 0;
48
49 card->options.large_send = type;
50 if (card->dev == NULL)
51 return 0;
52
53 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
54 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
55 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
56 NETIF_F_HW_CSUM;
57 } else {
58 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
59 NETIF_F_HW_CSUM);
60 card->options.large_send = QETH_LARGE_SEND_NO;
61 rc = -EOPNOTSUPP;
62 }
63 } else {
64 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
65 NETIF_F_HW_CSUM);
66 card->options.large_send = QETH_LARGE_SEND_NO;
67 }
68 return rc;
69}
44 70
45static int qeth_l3_isxdigit(char *buf) 71static int qeth_l3_isxdigit(char *buf)
46{ 72{
@@ -1439,6 +1465,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1439 return 0; 1465 return 0;
1440} 1466}
1441 1467
1468int qeth_l3_set_rx_csum(struct qeth_card *card,
1469 enum qeth_checksum_types csum_type)
1470{
1471 int rc = 0;
1472
1473 if (card->options.checksum_type == HW_CHECKSUMMING) {
1474 if ((csum_type != HW_CHECKSUMMING) &&
1475 (card->state != CARD_STATE_DOWN)) {
1476 rc = qeth_l3_send_simple_setassparms(card,
1477 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1478 if (rc)
1479 return -EIO;
1480 }
1481 } else {
1482 if (csum_type == HW_CHECKSUMMING) {
1483 if (card->state != CARD_STATE_DOWN) {
1484 if (!qeth_is_supported(card,
1485 IPA_INBOUND_CHECKSUM))
1486 return -EPERM;
1487 rc = qeth_l3_send_checksum_command(card);
1488 if (rc)
1489 return -EIO;
1490 }
1491 }
1492 }
1493 card->options.checksum_type = csum_type;
1494 return rc;
1495}
1496
1442static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1497static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443{ 1498{
1444 int rc = 0; 1499 int rc = 0;
@@ -1506,6 +1561,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1506static int qeth_l3_start_ipassists(struct qeth_card *card) 1561static int qeth_l3_start_ipassists(struct qeth_card *card)
1507{ 1562{
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1563 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1564
1565 qeth_set_access_ctrl_online(card); /* go on*/
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1566 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1567 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1568 qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -2684,6 +2741,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2741 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2685} 2742}
2686 2743
2744static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2745{
2746 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2747 tcp_hdr(skb)->doff * 4;
2748 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2749 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2750 elements += skb_shinfo(skb)->nr_frags;
2751 return elements;
2752}
2753
2754static inline int qeth_l3_tso_check(struct sk_buff *skb)
2755{
2756 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2757 (unsigned long)skb->data;
2758 return (((unsigned long)skb->data & PAGE_MASK) !=
2759 (((unsigned long)skb->data + len) & PAGE_MASK));
2760}
2761
2687static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2762static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2688{ 2763{
2689 int rc; 2764 int rc;
@@ -2777,16 +2852,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 /* fix hardware limitation: as long as we do not have sbal 2852 /* fix hardware limitation: as long as we do not have sbal
2778 * chaining we can not send long frag lists 2853 * chaining we can not send long frag lists
2779 */ 2854 */
2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2855 if (large_send == QETH_LARGE_SEND_TSO) {
2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2856 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2782 if (skb_linearize(new_skb)) 2857 if (skb_linearize(new_skb))
2783 goto tx_drop; 2858 goto tx_drop;
2859 if (card->options.performance_stats)
2860 card->perf_stats.tx_lin++;
2861 }
2784 } 2862 }
2785 2863
2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2864 if ((large_send == QETH_LARGE_SEND_TSO) &&
2787 (cast_type == RTN_UNSPEC)) { 2865 (cast_type == RTN_UNSPEC)) {
2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2866 hdr = (struct qeth_hdr *)skb_push(new_skb,
2789 sizeof(struct qeth_hdr_tso)); 2867 sizeof(struct qeth_hdr_tso));
2868 if (qeth_l3_tso_check(new_skb))
2869 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2870 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2871 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2792 qeth_tso_fill_header(card, hdr, new_skb); 2872 qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +2983,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2903static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2983static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2904{ 2984{
2905 struct qeth_card *card = dev->ml_priv; 2985 struct qeth_card *card = dev->ml_priv;
2906 enum qeth_card_states old_state;
2907 enum qeth_checksum_types csum_type; 2986 enum qeth_checksum_types csum_type;
2908 2987
2909 if ((card->state != CARD_STATE_UP) &&
2910 (card->state != CARD_STATE_DOWN))
2911 return -EPERM;
2912
2913 if (data) 2988 if (data)
2914 csum_type = HW_CHECKSUMMING; 2989 csum_type = HW_CHECKSUMMING;
2915 else 2990 else
2916 csum_type = SW_CHECKSUMMING; 2991 csum_type = SW_CHECKSUMMING;
2917 2992
2918 if (card->options.checksum_type != csum_type) { 2993 return qeth_l3_set_rx_csum(card, csum_type);
2919 old_state = card->state;
2920 if (card->state == CARD_STATE_UP)
2921 __qeth_l3_set_offline(card->gdev, 1);
2922 card->options.checksum_type = csum_type;
2923 if (old_state == CARD_STATE_UP)
2924 __qeth_l3_set_online(card->gdev, 1);
2925 }
2926 return 0;
2927} 2994}
2928 2995
2929static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2996static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2930{ 2997{
2931 struct qeth_card *card = dev->ml_priv; 2998 struct qeth_card *card = dev->ml_priv;
2999 int rc = 0;
2932 3000
2933 if (data) { 3001 if (data) {
2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 3002 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
2935 if (card->info.type == QETH_CARD_TYPE_IQD)
2936 return -EPERM;
2937 else
2938 card->options.large_send = QETH_LARGE_SEND_TSO;
2939 dev->features |= NETIF_F_TSO;
2940 }
2941 } else { 3003 } else {
2942 dev->features &= ~NETIF_F_TSO; 3004 dev->features &= ~NETIF_F_TSO;
2943 card->options.large_send = QETH_LARGE_SEND_NO; 3005 card->options.large_send = QETH_LARGE_SEND_NO;
2944 } 3006 }
2945 return 0; 3007 return rc;
2946} 3008}
2947 3009
2948static const struct ethtool_ops qeth_l3_ethtool_ops = { 3010static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -2957,7 +3019,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = {
2957 .set_tso = qeth_l3_ethtool_set_tso, 3019 .set_tso = qeth_l3_ethtool_set_tso,
2958 .get_strings = qeth_core_get_strings, 3020 .get_strings = qeth_core_get_strings,
2959 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3021 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2960 .get_stats_count = qeth_core_get_stats_count, 3022 .get_sset_count = qeth_core_get_sset_count,
2961 .get_drvinfo = qeth_core_get_drvinfo, 3023 .get_drvinfo = qeth_core_get_drvinfo,
2962 .get_settings = qeth_core_ethtool_get_settings, 3024 .get_settings = qeth_core_ethtool_get_settings,
2963}; 3025};
@@ -3058,6 +3120,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3058 NETIF_F_HW_VLAN_RX | 3120 NETIF_F_HW_VLAN_RX |
3059 NETIF_F_HW_VLAN_FILTER; 3121 NETIF_F_HW_VLAN_FILTER;
3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3122 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3123 card->dev->gso_max_size = 15 * PAGE_SIZE;
3061 3124
3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3125 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3063 return register_netdev(card->dev); 3126 return register_netdev(card->dev);
@@ -3154,32 +3217,19 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3155 3218
3156 recover_flag = card->state; 3219 recover_flag = card->state;
3157 rc = ccw_device_set_online(CARD_RDEV(card));
3158 if (rc) {
3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3160 return -EIO;
3161 }
3162 rc = ccw_device_set_online(CARD_WDEV(card));
3163 if (rc) {
3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3165 return -EIO;
3166 }
3167 rc = ccw_device_set_online(CARD_DDEV(card));
3168 if (rc) {
3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3170 return -EIO;
3171 }
3172
3173 rc = qeth_core_hardsetup_card(card); 3220 rc = qeth_core_hardsetup_card(card);
3174 if (rc) { 3221 if (rc) {
3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3222 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3223 rc = -ENODEV;
3176 goto out_remove; 3224 goto out_remove;
3177 } 3225 }
3178 3226
3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3227 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3180 3228
3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3229 if (!card->dev && qeth_l3_setup_netdev(card)) {
3230 rc = -ENODEV;
3182 goto out_remove; 3231 goto out_remove;
3232 }
3183 3233
3184 card->state = CARD_STATE_HARDSETUP; 3234 card->state = CARD_STATE_HARDSETUP;
3185 qeth_print_status_message(card); 3235 qeth_print_status_message(card);
@@ -3196,10 +3246,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3196 card->lan_online = 0; 3246 card->lan_online = 0;
3197 return 0; 3247 return 0;
3198 } 3248 }
3249 rc = -ENODEV;
3199 goto out_remove; 3250 goto out_remove;
3200 } else 3251 } else
3201 card->lan_online = 1; 3252 card->lan_online = 1;
3202 qeth_set_large_send(card, card->options.large_send); 3253 qeth_l3_set_large_send(card, card->options.large_send);
3203 3254
3204 rc = qeth_l3_setadapter_parms(card); 3255 rc = qeth_l3_setadapter_parms(card);
3205 if (rc) 3256 if (rc)
@@ -3218,6 +3269,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3218 rc = qeth_init_qdio_queues(card); 3269 rc = qeth_init_qdio_queues(card);
3219 if (rc) { 3270 if (rc) {
3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3272 rc = -ENODEV;
3221 goto out_remove; 3273 goto out_remove;
3222 } 3274 }
3223 card->state = CARD_STATE_SOFTSETUP; 3275 card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3300,7 @@ out_remove:
3248 card->state = CARD_STATE_RECOVER; 3300 card->state = CARD_STATE_RECOVER;
3249 else 3301 else
3250 card->state = CARD_STATE_DOWN; 3302 card->state = CARD_STATE_DOWN;
3251 return -ENODEV; 3303 return rc;
3252} 3304}
3253 3305
3254static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3306static int qeth_l3_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d52..3360b0941aa1 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -293,31 +293,79 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count) 293 struct device_attribute *attr, const char *buf, size_t count)
294{ 294{
295 struct qeth_card *card = dev_get_drvdata(dev); 295 struct qeth_card *card = dev_get_drvdata(dev);
296 enum qeth_checksum_types csum_type;
296 char *tmp; 297 char *tmp;
298 int rc;
297 299
298 if (!card) 300 if (!card)
299 return -EINVAL; 301 return -EINVAL;
300 302
301 if ((card->state != CARD_STATE_DOWN) &&
302 (card->state != CARD_STATE_RECOVER))
303 return -EPERM;
304
305 tmp = strsep((char **) &buf, "\n"); 303 tmp = strsep((char **) &buf, "\n");
306 if (!strcmp(tmp, "sw_checksumming")) 304 if (!strcmp(tmp, "sw_checksumming"))
307 card->options.checksum_type = SW_CHECKSUMMING; 305 csum_type = SW_CHECKSUMMING;
308 else if (!strcmp(tmp, "hw_checksumming")) 306 else if (!strcmp(tmp, "hw_checksumming"))
309 card->options.checksum_type = HW_CHECKSUMMING; 307 csum_type = HW_CHECKSUMMING;
310 else if (!strcmp(tmp, "no_checksumming")) 308 else if (!strcmp(tmp, "no_checksumming"))
311 card->options.checksum_type = NO_CHECKSUMMING; 309 csum_type = NO_CHECKSUMMING;
312 else { 310 else
313 return -EINVAL; 311 return -EINVAL;
314 } 312
313 rc = qeth_l3_set_rx_csum(card, csum_type);
314 if (rc)
315 return rc;
315 return count; 316 return count;
316} 317}
317 318
318static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
319 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
320 321
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 switch (card->options.large_send) {
331 case QETH_LARGE_SEND_NO:
332 return sprintf(buf, "%s\n", "no");
333 case QETH_LARGE_SEND_TSO:
334 return sprintf(buf, "%s\n", "TSO");
335 default:
336 return sprintf(buf, "%s\n", "N/A");
337 }
338}
339
340static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
341 struct device_attribute *attr, const char *buf, size_t count)
342{
343 struct qeth_card *card = dev_get_drvdata(dev);
344 enum qeth_large_send_types type;
345 int rc = 0;
346 char *tmp;
347
348 if (!card)
349 return -EINVAL;
350 tmp = strsep((char **) &buf, "\n");
351 if (!strcmp(tmp, "no"))
352 type = QETH_LARGE_SEND_NO;
353 else if (!strcmp(tmp, "TSO"))
354 type = QETH_LARGE_SEND_TSO;
355 else
356 return -EINVAL;
357
358 if (card->options.large_send == type)
359 return count;
360 rc = qeth_l3_set_large_send(card, type);
361 if (rc)
362 return rc;
363 return count;
364}
365
366static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
367 qeth_l3_dev_large_send_store);
368
321static struct attribute *qeth_l3_device_attrs[] = { 369static struct attribute *qeth_l3_device_attrs[] = {
322 &dev_attr_route4.attr, 370 &dev_attr_route4.attr,
323 &dev_attr_route6.attr, 371 &dev_attr_route6.attr,
@@ -325,6 +373,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
325 &dev_attr_broadcast_mode.attr, 373 &dev_attr_broadcast_mode.attr,
326 &dev_attr_canonical_macaddr.attr, 374 &dev_attr_canonical_macaddr.attr,
327 &dev_attr_checksumming.attr, 375 &dev_attr_checksumming.attr,
376 &dev_attr_large_send.attr,
328 NULL, 377 NULL,
329}; 378};
330 379
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 538c570df337..f1dcd7969a5c 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -551,13 +551,13 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
551 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); 551 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
552 552
553 /* Enable interrupts for this device. */ 553 /* Enable interrupts for this device. */
554 if (bus->host_pci && 554 if ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE)) {
555 ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
556 u32 coremask; 555 u32 coremask;
557 556
558 /* Calculate the "coremask" for the device. */ 557 /* Calculate the "coremask" for the device. */
559 coremask = (1 << dev->core_index); 558 coremask = (1 << dev->core_index);
560 559
560 SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
561 err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); 561 err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
562 if (err) 562 if (err)
563 goto out; 563 goto out;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 579b114be412..5681ebed9c65 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -140,6 +140,19 @@ static void ssb_device_put(struct ssb_device *dev)
140 put_device(dev->dev); 140 put_device(dev->dev);
141} 141}
142 142
143static inline struct ssb_driver *ssb_driver_get(struct ssb_driver *drv)
144{
145 if (drv)
146 get_driver(&drv->drv);
147 return drv;
148}
149
150static inline void ssb_driver_put(struct ssb_driver *drv)
151{
152 if (drv)
153 put_driver(&drv->drv);
154}
155
143static int ssb_device_resume(struct device *dev) 156static int ssb_device_resume(struct device *dev)
144{ 157{
145 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev); 158 struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
@@ -210,90 +223,81 @@ int ssb_bus_suspend(struct ssb_bus *bus)
210EXPORT_SYMBOL(ssb_bus_suspend); 223EXPORT_SYMBOL(ssb_bus_suspend);
211 224
212#ifdef CONFIG_SSB_SPROM 225#ifdef CONFIG_SSB_SPROM
213int ssb_devices_freeze(struct ssb_bus *bus) 226/** ssb_devices_freeze - Freeze all devices on the bus.
227 *
228 * After freezing no device driver will be handling a device
229 * on this bus anymore. ssb_devices_thaw() must be called after
230 * a successful freeze to reactivate the devices.
231 *
232 * @bus: The bus.
233 * @ctx: Context structure. Pass this to ssb_devices_thaw().
234 */
235int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx)
214{ 236{
215 struct ssb_device *dev; 237 struct ssb_device *sdev;
216 struct ssb_driver *drv; 238 struct ssb_driver *sdrv;
217 int err = 0; 239 unsigned int i;
218 int i; 240
219 pm_message_t state = PMSG_FREEZE; 241 memset(ctx, 0, sizeof(*ctx));
242 ctx->bus = bus;
243 SSB_WARN_ON(bus->nr_devices > ARRAY_SIZE(ctx->device_frozen));
220 244
221 /* First check that we are capable to freeze all devices. */
222 for (i = 0; i < bus->nr_devices; i++) { 245 for (i = 0; i < bus->nr_devices; i++) {
223 dev = &(bus->devices[i]); 246 sdev = ssb_device_get(&bus->devices[i]);
224 if (!dev->dev || 247
225 !dev->dev->driver || 248 if (!sdev->dev || !sdev->dev->driver ||
226 !device_is_registered(dev->dev)) 249 !device_is_registered(sdev->dev)) {
227 continue; 250 ssb_device_put(sdev);
228 drv = drv_to_ssb_drv(dev->dev->driver);
229 if (!drv)
230 continue; 251 continue;
231 if (!drv->suspend) {
232 /* Nope, can't suspend this one. */
233 return -EOPNOTSUPP;
234 } 252 }
235 } 253 sdrv = ssb_driver_get(drv_to_ssb_drv(sdev->dev->driver));
236 /* Now suspend all devices */ 254 if (!sdrv || SSB_WARN_ON(!sdrv->remove)) {
237 for (i = 0; i < bus->nr_devices; i++) { 255 ssb_device_put(sdev);
238 dev = &(bus->devices[i]);
239 if (!dev->dev ||
240 !dev->dev->driver ||
241 !device_is_registered(dev->dev))
242 continue;
243 drv = drv_to_ssb_drv(dev->dev->driver);
244 if (!drv)
245 continue; 256 continue;
246 err = drv->suspend(dev, state);
247 if (err) {
248 ssb_printk(KERN_ERR PFX "Failed to freeze device %s\n",
249 dev_name(dev->dev));
250 goto err_unwind;
251 } 257 }
258 sdrv->remove(sdev);
259 ctx->device_frozen[i] = 1;
252 } 260 }
253 261
254 return 0; 262 return 0;
255err_unwind:
256 for (i--; i >= 0; i--) {
257 dev = &(bus->devices[i]);
258 if (!dev->dev ||
259 !dev->dev->driver ||
260 !device_is_registered(dev->dev))
261 continue;
262 drv = drv_to_ssb_drv(dev->dev->driver);
263 if (!drv)
264 continue;
265 if (drv->resume)
266 drv->resume(dev);
267 }
268 return err;
269} 263}
270 264
271int ssb_devices_thaw(struct ssb_bus *bus) 265/** ssb_devices_thaw - Unfreeze all devices on the bus.
266 *
267 * This will re-attach the device drivers and re-init the devices.
268 *
269 * @ctx: The context structure from ssb_devices_freeze()
270 */
271int ssb_devices_thaw(struct ssb_freeze_context *ctx)
272{ 272{
273 struct ssb_device *dev; 273 struct ssb_bus *bus = ctx->bus;
274 struct ssb_driver *drv; 274 struct ssb_device *sdev;
275 int err; 275 struct ssb_driver *sdrv;
276 int i; 276 unsigned int i;
277 int err, result = 0;
277 278
278 for (i = 0; i < bus->nr_devices; i++) { 279 for (i = 0; i < bus->nr_devices; i++) {
279 dev = &(bus->devices[i]); 280 if (!ctx->device_frozen[i])
280 if (!dev->dev ||
281 !dev->dev->driver ||
282 !device_is_registered(dev->dev))
283 continue; 281 continue;
284 drv = drv_to_ssb_drv(dev->dev->driver); 282 sdev = &bus->devices[i];
285 if (!drv) 283
284 if (SSB_WARN_ON(!sdev->dev || !sdev->dev->driver))
286 continue; 285 continue;
287 if (SSB_WARN_ON(!drv->resume)) 286 sdrv = drv_to_ssb_drv(sdev->dev->driver);
287 if (SSB_WARN_ON(!sdrv || !sdrv->probe))
288 continue; 288 continue;
289 err = drv->resume(dev); 289
290 err = sdrv->probe(sdev, &sdev->id);
290 if (err) { 291 if (err) {
291 ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n", 292 ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n",
292 dev_name(dev->dev)); 293 dev_name(sdev->dev));
294 result = err;
293 } 295 }
296 ssb_driver_put(sdrv);
297 ssb_device_put(sdev);
294 } 298 }
295 299
296 return 0; 300 return result;
297} 301}
298#endif /* CONFIG_SSB_SPROM */ 302#endif /* CONFIG_SSB_SPROM */
299 303
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index e8b89e8ac9bd..0d6c0280eb34 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -354,7 +354,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
354 dev->bus = bus; 354 dev->bus = bus;
355 dev->ops = bus->ops; 355 dev->ops = bus->ops;
356 356
357 ssb_dprintk(KERN_INFO PFX 357 printk(KERN_DEBUG PFX
358 "Core %d found: %s " 358 "Core %d found: %s "
359 "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n", 359 "(cc 0x%03X, rev 0x%02X, vendor 0x%04X)\n",
360 i, ssb_core_name(dev->id.coreid), 360 i, ssb_core_name(dev->id.coreid),
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 8943015a3eef..d0e6762fec50 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -13,6 +13,8 @@
13 13
14#include "ssb_private.h" 14#include "ssb_private.h"
15 15
16#include <linux/ctype.h>
17
16 18
17static const struct ssb_sprom *fallback_sprom; 19static const struct ssb_sprom *fallback_sprom;
18 20
@@ -33,17 +35,27 @@ static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
33static int hex2sprom(u16 *sprom, const char *dump, size_t len, 35static int hex2sprom(u16 *sprom, const char *dump, size_t len,
34 size_t sprom_size_words) 36 size_t sprom_size_words)
35{ 37{
36 char tmp[5] = { 0 }; 38 char c, tmp[5] = { 0 };
37 int cnt = 0; 39 int err, cnt = 0;
38 unsigned long parsed; 40 unsigned long parsed;
39 41
40 if (len < sprom_size_words * 2) 42 /* Strip whitespace at the end. */
43 while (len) {
44 c = dump[len - 1];
45 if (!isspace(c) && c != '\0')
46 break;
47 len--;
48 }
49 /* Length must match exactly. */
50 if (len != sprom_size_words * 4)
41 return -EINVAL; 51 return -EINVAL;
42 52
43 while (cnt < sprom_size_words) { 53 while (cnt < sprom_size_words) {
44 memcpy(tmp, dump, 4); 54 memcpy(tmp, dump, 4);
45 dump += 4; 55 dump += 4;
46 parsed = simple_strtoul(tmp, NULL, 16); 56 err = strict_strtoul(tmp, 16, &parsed);
57 if (err)
58 return err;
47 sprom[cnt++] = swab16((u16)parsed); 59 sprom[cnt++] = swab16((u16)parsed);
48 } 60 }
49 61
@@ -90,6 +102,7 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
90 u16 *sprom; 102 u16 *sprom;
91 int res = 0, err = -ENOMEM; 103 int res = 0, err = -ENOMEM;
92 size_t sprom_size_words = bus->sprom_size; 104 size_t sprom_size_words = bus->sprom_size;
105 struct ssb_freeze_context freeze;
93 106
94 sprom = kcalloc(bus->sprom_size, sizeof(u16), GFP_KERNEL); 107 sprom = kcalloc(bus->sprom_size, sizeof(u16), GFP_KERNEL);
95 if (!sprom) 108 if (!sprom)
@@ -111,18 +124,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
111 err = -ERESTARTSYS; 124 err = -ERESTARTSYS;
112 if (mutex_lock_interruptible(&bus->sprom_mutex)) 125 if (mutex_lock_interruptible(&bus->sprom_mutex))
113 goto out_kfree; 126 goto out_kfree;
114 err = ssb_devices_freeze(bus); 127 err = ssb_devices_freeze(bus, &freeze);
115 if (err == -EOPNOTSUPP) {
116 ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze devices. "
117 "No suspend support. Is CONFIG_PM enabled?\n");
118 goto out_unlock;
119 }
120 if (err) { 128 if (err) {
121 ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); 129 ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n");
122 goto out_unlock; 130 goto out_unlock;
123 } 131 }
124 res = sprom_write(bus, sprom); 132 res = sprom_write(bus, sprom);
125 err = ssb_devices_thaw(bus); 133 err = ssb_devices_thaw(&freeze);
126 if (err) 134 if (err)
127 ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); 135 ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n");
128out_unlock: 136out_unlock:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 25433565dfda..56054be4d113 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -176,13 +176,21 @@ extern const struct ssb_sprom *ssb_get_fallback_sprom(void);
176 176
177/* core.c */ 177/* core.c */
178extern u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m); 178extern u32 ssb_calc_clock_rate(u32 plltype, u32 n, u32 m);
179extern int ssb_devices_freeze(struct ssb_bus *bus);
180extern int ssb_devices_thaw(struct ssb_bus *bus);
181extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev); 179extern struct ssb_bus *ssb_pci_dev_to_bus(struct pci_dev *pdev);
182int ssb_for_each_bus_call(unsigned long data, 180int ssb_for_each_bus_call(unsigned long data,
183 int (*func)(struct ssb_bus *bus, unsigned long data)); 181 int (*func)(struct ssb_bus *bus, unsigned long data));
184extern struct ssb_bus *ssb_pcmcia_dev_to_bus(struct pcmcia_device *pdev); 182extern struct ssb_bus *ssb_pcmcia_dev_to_bus(struct pcmcia_device *pdev);
185 183
184struct ssb_freeze_context {
185 /* Pointer to the bus */
186 struct ssb_bus *bus;
187 /* Boolean list to indicate whether a device is frozen on this bus. */
188 bool device_frozen[SSB_MAX_NR_CORES];
189};
190extern int ssb_devices_freeze(struct ssb_bus *bus, struct ssb_freeze_context *ctx);
191extern int ssb_devices_thaw(struct ssb_freeze_context *ctx);
192
193
186 194
187/* b43_pci_bridge.c */ 195/* b43_pci_bridge.c */
188#ifdef CONFIG_SSB_B43_PCI_BRIDGE 196#ifdef CONFIG_SSB_B43_PCI_BRIDGE
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index d21b3469f6d7..dfcd75cf4907 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,5 +125,13 @@ source "drivers/staging/sep/Kconfig"
125 125
126source "drivers/staging/iio/Kconfig" 126source "drivers/staging/iio/Kconfig"
127 127
128source "drivers/staging/strip/Kconfig"
129
130source "drivers/staging/arlan/Kconfig"
131
132source "drivers/staging/wavelan/Kconfig"
133
134source "drivers/staging/netwave/Kconfig"
135
128endif # !STAGING_EXCLUDE_BUILD 136endif # !STAGING_EXCLUDE_BUILD
129endif # STAGING 137endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 8cbf1aebea2e..7719d04a4a86 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,3 +44,8 @@ obj-$(CONFIG_VME_BUS) += vme/
44obj-$(CONFIG_RAR_REGISTER) += rar/ 44obj-$(CONFIG_RAR_REGISTER) += rar/
45obj-$(CONFIG_DX_SEP) += sep/ 45obj-$(CONFIG_DX_SEP) += sep/
46obj-$(CONFIG_IIO) += iio/ 46obj-$(CONFIG_IIO) += iio/
47obj-$(CONFIG_STRIP) += strip/
48obj-$(CONFIG_ARLAN) += arlan/
49obj-$(CONFIG_WAVELAN) += wavelan/
50obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
51obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
diff --git a/drivers/staging/arlan/Kconfig b/drivers/staging/arlan/Kconfig
new file mode 100644
index 000000000000..5e42b81f97b0
--- /dev/null
+++ b/drivers/staging/arlan/Kconfig
@@ -0,0 +1,15 @@
1config ARLAN
2 tristate "Aironet Arlan 655 & IC2200 DS support"
3 depends on ISA && !64BIT && WLAN
4 select WIRELESS_EXT
5 ---help---
6 Aironet makes Arlan, a class of wireless LAN adapters. These use the
7 www.Telxon.com chip, which is also used on several similar cards.
8 This driver is tested on the 655 and IC2200 series cards. Look at
9 <http://www.ylenurme.ee/~elmer/655/> for the latest information.
10
11 The driver is built as two modules, arlan and arlan-proc. The latter
12 is the /proc interface and is not needed most of time.
13
14 On some computers the card ends up in non-valid state after some
15 time. Use a ping-reset script to clear it.
diff --git a/drivers/staging/arlan/Makefile b/drivers/staging/arlan/Makefile
new file mode 100644
index 000000000000..9e58e5fae7b9
--- /dev/null
+++ b/drivers/staging/arlan/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ARLAN) += arlan.o
2
3arlan-objs := arlan-main.o arlan-proc.o
diff --git a/drivers/staging/arlan/TODO b/drivers/staging/arlan/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/arlan/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 921a082487a1..921a082487a1 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/staging/arlan/arlan-proc.c
index a8b689635a3b..a8b689635a3b 100644
--- a/drivers/net/wireless/arlan-proc.c
+++ b/drivers/staging/arlan/arlan-proc.c
diff --git a/drivers/net/wireless/arlan.h b/drivers/staging/arlan/arlan.h
index fb3ad51a1caf..fb3ad51a1caf 100644
--- a/drivers/net/wireless/arlan.h
+++ b/drivers/staging/arlan/arlan.h
diff --git a/drivers/staging/netwave/Kconfig b/drivers/staging/netwave/Kconfig
new file mode 100644
index 000000000000..8033e8171f9e
--- /dev/null
+++ b/drivers/staging/netwave/Kconfig
@@ -0,0 +1,11 @@
1config PCMCIA_NETWAVE
2 tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
3 depends on PCMCIA && WLAN
4 select WIRELESS_EXT
5 select WEXT_PRIV
6 help
7 Say Y here if you intend to attach this type of PCMCIA (PC-card)
8 wireless Ethernet networking card to your computer.
9
10 To compile this driver as a module, choose M here: the module will be
11 called netwave_cs. If unsure, say N.
diff --git a/drivers/staging/netwave/Makefile b/drivers/staging/netwave/Makefile
new file mode 100644
index 000000000000..2ab89de59b9b
--- /dev/null
+++ b/drivers/staging/netwave/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
diff --git a/drivers/staging/netwave/TODO b/drivers/staging/netwave/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/netwave/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index 9498b46c99a4..9498b46c99a4 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 203c79b8180f..3211dd3765a0 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -1,6 +1,7 @@
1config RTL8187SE 1config RTL8187SE
2 tristate "RealTek RTL8187SE Wireless LAN NIC driver" 2 tristate "RealTek RTL8187SE Wireless LAN NIC driver"
3 depends on PCI && WLAN 3 depends on PCI && WLAN
4 depends on WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV
5 default N 6 default N
6 ---help--- 7 ---help---
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 37e4fde45073..2ae3745f775f 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,6 +1,7 @@
1config RTL8192E 1config RTL8192E
2 tristate "RealTek RTL8192E Wireless LAN NIC driver" 2 tristate "RealTek RTL8192E Wireless LAN NIC driver"
3 depends on PCI && WLAN 3 depends on PCI && WLAN
4 depends on WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV
5 default N 6 default N
6 ---help--- 7 ---help---
diff --git a/drivers/staging/strip/Kconfig b/drivers/staging/strip/Kconfig
new file mode 100644
index 000000000000..36257b5cd6e1
--- /dev/null
+++ b/drivers/staging/strip/Kconfig
@@ -0,0 +1,22 @@
1config STRIP
2 tristate "STRIP (Metricom starmode radio IP)"
3 depends on INET
4 select WIRELESS_EXT
5 ---help---
6 Say Y if you have a Metricom radio and intend to use Starmode Radio
7 IP. STRIP is a radio protocol developed for the MosquitoNet project
8 to send Internet traffic using Metricom radios. Metricom radios are
9 small, battery powered, 100kbit/sec packet radio transceivers, about
10 the size and weight of a cellular telephone. (You may also have heard
11 them called "Metricom modems" but we avoid the term "modem" because
12 it misleads many people into thinking that you can plug a Metricom
13 modem into a phone line and use it as a modem.)
14
15 You can use STRIP on any Linux machine with a serial port, although
16 it is obviously most useful for people with laptop computers. If you
17 think you might get a Metricom radio in the future, there is no harm
18 in saying Y to STRIP now, except that it makes the kernel a bit
19 bigger.
20
21 To compile this as a module, choose M here: the module will be
22 called strip.
diff --git a/drivers/staging/strip/Makefile b/drivers/staging/strip/Makefile
new file mode 100644
index 000000000000..6417bdcac2fb
--- /dev/null
+++ b/drivers/staging/strip/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_STRIP) += strip.o
diff --git a/drivers/staging/strip/TODO b/drivers/staging/strip/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/strip/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/strip.c b/drivers/staging/strip/strip.c
index ea6a87c19319..698aade79d40 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/staging/strip/strip.c
@@ -106,6 +106,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE";
106#include <linux/serial.h> 106#include <linux/serial.h>
107#include <linux/serialP.h> 107#include <linux/serialP.h>
108#include <linux/rcupdate.h> 108#include <linux/rcupdate.h>
109#include <linux/compat.h>
109#include <net/arp.h> 110#include <net/arp.h>
110#include <net/net_namespace.h> 111#include <net/net_namespace.h>
111 112
@@ -2725,6 +2726,19 @@ static int strip_ioctl(struct tty_struct *tty, struct file *file,
2725 return 0; 2726 return 0;
2726} 2727}
2727 2728
2729#ifdef CONFIG_COMPAT
2730static long strip_compat_ioctl(struct tty_struct *tty, struct file *file,
2731 unsigned int cmd, unsigned long arg)
2732{
2733 switch (cmd) {
2734 case SIOCGIFNAME:
2735 case SIOCSIFHWADDR:
2736 return strip_ioctl(tty, file, cmd,
2737 (unsigned long)compat_ptr(arg));
2738 }
2739 return -ENOIOCTLCMD;
2740}
2741#endif
2728 2742
2729/************************************************************************/ 2743/************************************************************************/
2730/* Initialization */ 2744/* Initialization */
@@ -2736,6 +2750,9 @@ static struct tty_ldisc_ops strip_ldisc = {
2736 .open = strip_open, 2750 .open = strip_open,
2737 .close = strip_close, 2751 .close = strip_close,
2738 .ioctl = strip_ioctl, 2752 .ioctl = strip_ioctl,
2753#ifdef CONFIG_COMPAT
2754 .compat_ioctl = strip_compat_ioctl,
2755#endif
2739 .receive_buf = strip_receive_buf, 2756 .receive_buf = strip_receive_buf,
2740 .write_wakeup = strip_write_some_more, 2757 .write_wakeup = strip_write_some_more,
2741}; 2758};
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 9bec95adcce2..825bbc4fc3fa 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,8 @@
1config VT6655 1config VT6655
2 tristate "VIA Technologies VT6655 support" 2 tristate "VIA Technologies VT6655 support"
3 depends on WIRELESS_EXT && PCI 3 depends on PCI
4 select WIRELESS_EXT
5 select WEXT_PRIV
4 ---help--- 6 ---help---
5 This is a vendor-written driver for VIA VT6655. 7 This is a vendor-written driver for VIA VT6655.
6 8
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index 3165f2c42079..87bcd269310c 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,6 +1,8 @@
1config VT6656 1config VT6656
2 tristate "VIA Technologies VT6656 support" 2 tristate "VIA Technologies VT6656 support"
3 depends on WIRELESS_EXT && USB 3 depends on USB
4 select WIRELESS_EXT
5 select WEXT_PRIV
4 ---help--- 6 ---help---
5 This is a vendor-written driver for VIA VT6656. 7 This is a vendor-written driver for VIA VT6656.
6 8
diff --git a/drivers/staging/wavelan/Kconfig b/drivers/staging/wavelan/Kconfig
new file mode 100644
index 000000000000..af655668c2a7
--- /dev/null
+++ b/drivers/staging/wavelan/Kconfig
@@ -0,0 +1,38 @@
1config WAVELAN
2 tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
3 depends on ISA && WLAN
4 select WIRELESS_EXT
5 select WEXT_SPY
6 select WEXT_PRIV
7 ---help---
8 The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
9 a Radio LAN (wireless Ethernet-like Local Area Network) using the
10 radio frequencies 900 MHz and 2.4 GHz.
11
12 If you want to use an ISA WaveLAN card under Linux, say Y and read
13 the Ethernet-HOWTO, available from
14 <http://www.tldp.org/docs.html#howto>. Some more specific
15 information is contained in
16 <file:Documentation/networking/wavelan.txt> and in the source code
17 <file:drivers/net/wireless/wavelan.p.h>.
18
19 You will also need the wireless tools package available from
20 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
21 Please read the man pages contained therein.
22
23 To compile this driver as a module, choose M here: the module will be
24 called wavelan.
25
26config PCMCIA_WAVELAN
27 tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
28 depends on PCMCIA && WLAN
29 select WIRELESS_EXT
30 select WEXT_SPY
31 select WEXT_PRIV
32 help
33 Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
34 (PC-card) wireless Ethernet networking card to your computer. This
35 driver is for the non-IEEE-802.11 Wavelan cards.
36
37 To compile this driver as a module, choose M here: the module will be
38 called wavelan_cs. If unsure, say N.
diff --git a/drivers/staging/wavelan/Makefile b/drivers/staging/wavelan/Makefile
new file mode 100644
index 000000000000..1cde17c69a43
--- /dev/null
+++ b/drivers/staging/wavelan/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_WAVELAN) += wavelan.o
2obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
diff --git a/drivers/staging/wavelan/TODO b/drivers/staging/wavelan/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/wavelan/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/i82586.h b/drivers/staging/wavelan/i82586.h
index 5f65b250646f..5f65b250646f 100644
--- a/drivers/net/wireless/i82586.h
+++ b/drivers/staging/wavelan/i82586.h
diff --git a/drivers/net/wireless/wavelan.c b/drivers/staging/wavelan/wavelan.c
index d634b2da3b84..d634b2da3b84 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
diff --git a/drivers/net/wireless/wavelan.h b/drivers/staging/wavelan/wavelan.h
index 9ab360558ffd..9ab360558ffd 100644
--- a/drivers/net/wireless/wavelan.h
+++ b/drivers/staging/wavelan/wavelan.h
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/staging/wavelan/wavelan.p.h
index dbe8de6e5f52..dbe8de6e5f52 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/staging/wavelan/wavelan.p.h
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 431a20ec6db6..431a20ec6db6 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
diff --git a/drivers/net/wireless/wavelan_cs.h b/drivers/staging/wavelan/wavelan_cs.h
index 2e4bfe4147c6..2e4bfe4147c6 100644
--- a/drivers/net/wireless/wavelan_cs.h
+++ b/drivers/staging/wavelan/wavelan_cs.h
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/staging/wavelan/wavelan_cs.p.h
index 81d91531c4f9..8fbfaa8a5a67 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/staging/wavelan/wavelan_cs.p.h
@@ -446,7 +446,7 @@
446#include <pcmcia/ds.h> 446#include <pcmcia/ds.h>
447 447
448/* Wavelan declarations */ 448/* Wavelan declarations */
449#include "i82593.h" /* Definitions for the Intel chip */ 449#include <linux/i82593.h> /* Definitions for the Intel chip */
450 450
451#include "wavelan_cs.h" /* Others bits of the hardware */ 451#include "wavelan_cs.h" /* Others bits of the hardware */
452 452